max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
5,250
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.flowable.cmmn.model; /** * The reactivation listener is a very specific user event listener available on a historic case instance in order to reactivate it again. * * @author <NAME> */ public class ReactivateEventListener extends UserEventListener { /** * The optional, default reactivation rule to be considered, if a plan item does not specify an explicit one, if this one is not provided either, such * a plan item will be ignored for reactivation. */ protected ReactivationRule defaultReactivationRule; /** * If there is an available condition set for the reactivate event listener as part of the model, it will end up here as the default available * condition of the generic event listener will be predefined for a reactivate event listener making it unavailable as long as the case is active. */ protected String reactivationAvailableConditionExpression; public ReactivationRule getDefaultReactivationRule() { return defaultReactivationRule; } public void setDefaultReactivationRule(ReactivationRule defaultReactivationRule) { this.defaultReactivationRule = defaultReactivationRule; } public String getReactivationAvailableConditionExpression() { return reactivationAvailableConditionExpression; } public void setReactivationAvailableConditionExpression(String reactivationAvailableConditionExpression) { this.reactivationAvailableConditionExpression = reactivationAvailableConditionExpression; } }
555
435
<filename>europython-2019/videos/adriana-dorneles-what-about-recommendation-engines.json { "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "| How recommendation engines are taking part in our daily routine and\n how companies as Netflix and Amazon implement it?\n| This talk aims to show the elements that compound a recommendation\n engine to people who have never been in touch with the matter or want\n to know a bit more. At the end of this session, you might be able to\n reproduce your own recommendation system and also know where to find\n more about it.\n\n| Talk structure:\n| 1. What is and why use a recommendation engine?\n| 2. Recommendation engine importance\n| 3. Steps of a recommendation\n| 4. Recommendation algorithms\n| 5. Basic Statistics for distance and correlation\n| 6. Example", "duration": 1793, "language": "eng", "recorded": "2019-07-10", "related_urls": [ { "label": "Conference schedule", "url": "https://ep2019.europython.eu/schedule/" } ], "speakers": [ "<NAME>" ], "tags": [ "Algorithms", "Big Data", "Business", "Data Science", "Python 3" ], "thumbnail_url": "https://i.ytimg.com/vi/1F2wEi6fkAM/maxresdefault.jpg", "title": "What about recommendation engines?", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=1F2wEi6fkAM" } ] }
488
32,544
package com.baeldung.maths; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import java.math.BigDecimal; import java.math.BigInteger; import java.math.MathContext; import java.math.RoundingMode; import java.util.Random; import org.junit.jupiter.api.Test; public class BigDecimalDemoUnitTest { @Test public void whenBigDecimalCreated_thenValueMatches() { BigDecimal bdFromString = new BigDecimal("0.1"); BigDecimal bdFromCharArray = new BigDecimal( new char[] { '3', '.', '1', '6', '1', '5' }); BigDecimal bdlFromInt = new BigDecimal(42); BigDecimal bdFromLong = new BigDecimal(123412345678901L); BigInteger bigInteger = BigInteger.probablePrime(100, new Random()); BigDecimal bdFromBigInteger = new BigDecimal(bigInteger); assertEquals("0.1", bdFromString.toString()); assertEquals("3.1615", bdFromCharArray.toString()); assertEquals("42", bdlFromInt.toString()); assertEquals("123412345678901", bdFromLong.toString()); assertEquals(bigInteger.toString(), bdFromBigInteger.toString()); } @Test public void whenBigDecimalCreatedFromDouble_thenValueMayNotMatch() { BigDecimal bdFromDouble = new BigDecimal(0.1d); assertNotEquals("0.1", bdFromDouble.toString()); } @Test public void whenBigDecimalCreatedUsingValueOf_thenValueMatches() { BigDecimal bdFromLong1 = BigDecimal.valueOf(123412345678901L); BigDecimal bdFromLong2 = BigDecimal.valueOf(123412345678901L, 2); BigDecimal bdFromDouble = BigDecimal.valueOf(0.1d); assertEquals("123412345678901", bdFromLong1.toString()); assertEquals("1234123456789.01", bdFromLong2.toString()); assertEquals("0.1", bdFromDouble.toString()); } @Test public void whenEqualsCalled_thenSizeAndScaleMatched() { BigDecimal bd1 = new BigDecimal("1.0"); BigDecimal bd2 = new BigDecimal("1.00"); assertFalse(bd1.equals(bd2)); } @Test public void whenComparingBigDecimals_thenExpectedResult() { BigDecimal bd1 = new BigDecimal("1.0"); BigDecimal bd2 = new BigDecimal("1.00"); BigDecimal bd3 = new BigDecimal("2.0"); assertTrue(bd1.compareTo(bd3) < 0); assertTrue(bd3.compareTo(bd1) > 0); assertTrue(bd1.compareTo(bd2) == 0); assertTrue(bd1.compareTo(bd3) <= 0); assertTrue(bd1.compareTo(bd2) >= 0); assertTrue(bd1.compareTo(bd3) != 0); } @Test public void whenPerformingArithmetic_thenExpectedResult() { BigDecimal bd1 = new BigDecimal("4.0"); BigDecimal bd2 = new BigDecimal("2.0"); BigDecimal sum = bd1.add(bd2); BigDecimal difference = bd1.subtract(bd2); BigDecimal quotient = bd1.divide(bd2); BigDecimal product = bd1.multiply(bd2); assertTrue(sum.compareTo(new BigDecimal("6.0")) == 0); assertTrue(difference.compareTo(new BigDecimal("2.0")) == 0); assertTrue(quotient.compareTo(new BigDecimal("2.0")) == 0); assertTrue(product.compareTo(new BigDecimal("8.0")) == 0); } @Test public void whenGettingAttributes_thenExpectedResult() { BigDecimal bd = new BigDecimal("-12345.6789"); assertEquals(9, bd.precision()); assertEquals(4, bd.scale()); assertEquals(-1, bd.signum()); } @Test public void whenRoundingDecimal_thenExpectedResult() { BigDecimal bd = new BigDecimal("2.5"); // Round to 1 digit using HALF_EVEN BigDecimal rounded = bd .round(new MathContext(1, RoundingMode.HALF_EVEN)); assertEquals("2", rounded.toString()); } @Test public void givenPurchaseTxn_whenCalculatingTotalAmount_thenExpectedResult() { BigDecimal quantity = new BigDecimal("4.5"); BigDecimal unitPrice = new BigDecimal("2.69"); BigDecimal discountRate = new BigDecimal("0.10"); BigDecimal taxRate = new BigDecimal("0.0725"); BigDecimal amountToBePaid = BigDecimalDemo .calculateTotalAmount(quantity, unitPrice, discountRate, taxRate); assertEquals("11.68", amountToBePaid.toString()); } }
1,879
31,928
from localstack.services.cloudformation.service_models import GenericBaseModel from localstack.utils.aws import aws_stack from localstack.utils.common import select_attributes class Route53RecordSet(GenericBaseModel): @staticmethod def cloudformation_type(): return "AWS::Route53::RecordSet" def get_physical_resource_id(self, attribute=None, **kwargs): return self.props.get("Name") # Ref attribute is the domain name itself def fetch_state(self, stack_name, resources): route53 = aws_stack.connect_to_service("route53") props = self.props result = route53.list_resource_record_sets(HostedZoneId=props["HostedZoneId"])[ "ResourceRecordSets" ] result = [r for r in result if r["Name"] == props["Name"] and r["Type"] == props["Type"]] return (result or [None])[0] @staticmethod def get_deploy_templates(): def param_change_batch(params, **kwargs): attr_names = [ "Name", "Type", "SetIdentifier", "Weight", "Region", "GeoLocation", "Failover", "MultiValueAnswer", "TTL", "ResourceRecords", "AliasTarget", "HealthCheckId", ] attrs = select_attributes(params, attr_names) alias_target = attrs.get("AliasTarget", {}) alias_target["EvaluateTargetHealth"] = alias_target.get("EvaluateTargetHealth", False) return { "Comment": params.get("Comment", ""), "Changes": [{"Action": "CREATE", "ResourceRecordSet": attrs}], } return { "create": { "function": "change_resource_record_sets", "parameters": { "HostedZoneId": "HostedZoneId", "ChangeBatch": param_change_batch, }, } }
984
10,608
{"default": {"description": "PUBHEALTH is a comprehensive dataset for explainable automated fact-checking of\npublic health claims. Each instance in the PUBHEALTH dataset has an associated\nveracity label (true, false, unproven, mixture). Furthermore each instance in the\ndataset has an explanation text field. The explanation is a justification for which\nthe claim has been assigned a particular veracity label.\n\nThe dataset was created to explore fact-checking of difficult to verify claims i.e.,\nthose which require expertise from outside of the journalistics domain, in this case\nbiomedical and public health expertise.\n\nIt was also created in response to the lack of fact-checking datasets which provide\ngold standard natural language explanations for verdicts/labels.\n\nNOTE: There are missing labels in the dataset and we have replaced them with -1.\n", "citation": "@inproceedings{kotonya-toni-2020-explainable,\n title = \"Explainable Automated Fact-Checking for Public Health Claims\",\n author = \"<NAME> and <NAME>\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods\n in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.623\",\n pages = \"7740--7754\",\n}\n", "homepage": "https://github.com/neemakot/Health-Fact-Checking/blob/master/data/DATASHEET.md", "license": "", "features": {"claim_id": {"dtype": "string", "id": null, "_type": "Value"}, "claim": {"dtype": "string", "id": null, "_type": "Value"}, "date_published": {"dtype": "string", "id": null, "_type": "Value"}, "explanation": {"dtype": "string", "id": null, "_type": "Value"}, "fact_checkers": {"dtype": "string", "id": null, "_type": "Value"}, "main_text": {"dtype": "string", "id": null, "_type": "Value"}, "sources": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["false", "mixture", "true", "unproven"], "names_file": null, "id": null, "_type": "ClassLabel"}, "subjects": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "health_fact", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 53985377, "num_examples": 9832, "dataset_name": "health_fact"}, "test": {"name": "test", "num_bytes": 6825221, "num_examples": 1235, "dataset_name": "health_fact"}, "validation": {"name": "validation", "num_bytes": 6653044, "num_examples": 1225, "dataset_name": "health_fact"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1eTtRs5cUlBP5dXsx-FTAlmXuB6JQi2qj": {"num_bytes": 24892660, "checksum": "3f0a5541f4a60c09a138a896621402893ce4b3a37060363d9257010c2c27fc3a"}}, "download_size": 24892660, "post_processing_size": null, "dataset_size": 67463642, "size_in_bytes": 92356302}}
1,014
1,232
#!/usr/bin/python # -*- coding: utf-8 -*- # Author: <NAME> # 437. Path Sum III # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def pathSum(self, root, sum): """ :type root: TreeNode :type sum: int :rtype: int """ # Edge Case: if not root: return 0 # Process def dfs(root, sum): count = 0 if not root: return 0 if root.val == sum: count += 1 count += dfs(root.left, sum - root.val) count += dfs(root.right, sum -root.val) return count # Recursion return dfs(root,sum) + self.pathSum(root.left, sum) + self.pathSum(root.right, sum)
465
325
#include <il2c_private.h> #include "../Platform/debugbreak.h" /////////////////////////////////////////////////////// // Another special runtime helper functions #if defined(IL2C_USE_LINE_INFORMATION) void il2c_break__(const char* pFile, int line) #else void il2c_break__(void) #endif { debug_break(); } /////////////////////////////////////////////////////// // Low level debugger formatted writer #if defined(IL2C_USE_RUNTIME_DEBUG_LOG) typedef struct IL2C_RUNTIME_DEBUG_LOG_FORMAT_STATE_DECL { va_list va; uint16_t argumentCount; uint16_t length; wchar_t* pBuffer; } IL2C_RUNTIME_DEBUG_LOG_FORMAT_STATE; static int8_t il2c_runtime_debug_log_format_writer_step1__( const wchar_t* pTokenFrom, uint32_t tokenLength, void* pState) { IL2C_RUNTIME_DEBUG_LOG_FORMAT_STATE* p = pState; p->length = (uint16_t)(p->length + tokenLength); return IL2C_STRING_FORMAT_SUCCEEDED; } static int8_t il2c_runtime_debug_log_format_argument_writer_step1__( uint16_t argumentIndex, const wchar_t* pFormatFrom, uint32_t formatLength, void* pState) { IL2C_RUNTIME_DEBUG_LOG_FORMAT_STATE* p = pState; wchar_t buffer[24]; if (il2c_unlikely__((argumentIndex != (p->argumentCount++)) || (formatLength != 1))) { return IL2C_STRING_FORMAT_INVALID; } switch (*pFormatFrom) { case L'd': { int32_t value = va_arg(p->va, int32_t); il2c_i32tow(value, buffer, 10); } break; case L'u': { uint32_t value = va_arg(p->va, uint32_t); il2c_u32tow(value, buffer, 10); } break; case L'x': { uint32_t value = va_arg(p->va, uint32_t); il2c_u32tow(value, buffer, 16); } break; case L'D': { int64_t value = va_arg(p->va, int64_t); il2c_i64tow(value, buffer, 10); } break; case L'U': { uint64_t value = va_arg(p->va, uint64_t); il2c_u64tow(value, buffer, 10); } break; case L'X': { uint64_t value = va_arg(p->va, uint64_t); il2c_u64tow(value, buffer, 16); } break; case L'p': { uintptr_t value = va_arg(p->va, uintptr_t); il2c_u64tow(value, buffer, 16); } break; case L's': { const char* pStr = va_arg(p->va, const char*); p->length = (uint16_t)(strlen(pStr) + p->length); } return IL2C_STRING_FORMAT_SUCCEEDED; case L'S': { const wchar_t* pStr = va_arg(p->va, const wchar_t*); p->length = (uint16_t)(il2c_wcslen(pStr) + p->length); } return IL2C_STRING_FORMAT_SUCCEEDED; default: return IL2C_STRING_FORMAT_INVALID; } p->length = (uint16_t)(il2c_wcslen(buffer) + p->length); return IL2C_STRING_FORMAT_SUCCEEDED; } static int8_t il2c_runtime_debug_log_format_writer_step2__( const wchar_t* pTokenFrom, uint32_t tokenLength, void* pState) { IL2C_RUNTIME_DEBUG_LOG_FORMAT_STATE* p = pState; memcpy(p->pBuffer, pTokenFrom, tokenLength * sizeof(wchar_t)); p->pBuffer += tokenLength; return 0; } static int8_t il2c_runtime_debug_log_format_argument_writer_step2__( uint16_t argumentIndex, const wchar_t* pFormatFrom, uint32_t formatLength, void* pState) { IL2C_RUNTIME_DEBUG_LOG_FORMAT_STATE* p = pState; wchar_t buffer[24]; uint16_t length; switch (*pFormatFrom) { case L'd': { int32_t value = va_arg(p->va, int32_t); il2c_i32tow(value, buffer, 10); } break; case L'u': { uint32_t value = va_arg(p->va, uint32_t); il2c_u32tow(value, buffer, 10); } break; case L'x': { uint32_t value = va_arg(p->va, uint32_t); il2c_u32tow(value, buffer, 16); } break; case L'D': { int64_t value = va_arg(p->va, int64_t); il2c_i64tow(value, buffer, 10); } break; case L'U': { uint64_t value = va_arg(p->va, uint64_t); il2c_u64tow(value, buffer, 10); } break; case L'X': { uint64_t value = va_arg(p->va, uint64_t); il2c_u64tow(value, buffer, 16); } break; case L'p': { uintptr_t value = va_arg(p->va, uintptr_t); il2c_u64tow(value, buffer, 16); } break; case L's': { const char* pStr = va_arg(p->va, const char*); while (*pStr != '\0') { *p->pBuffer++ = (wchar_t)*pStr++; } } return IL2C_STRING_FORMAT_SUCCEEDED; case L'S': { const wchar_t* pStr = va_arg(p->va, const wchar_t*); while (*pStr != L'\0') { *p->pBuffer++ = *pStr++; } } return IL2C_STRING_FORMAT_SUCCEEDED; default: return IL2C_STRING_FORMAT_INVALID; } length = (uint16_t)il2c_wcslen(buffer); memcpy(p->pBuffer, buffer, length * sizeof(wchar_t)); p->pBuffer += length; return IL2C_STRING_FORMAT_SUCCEEDED; } void il2c_runtime_debug_log_format(const wchar_t* format, ...) { IL2C_RUNTIME_DEBUG_LOG_FORMAT_STATE state = { 0 }; va_start(state.va, format); int8_t result = il2c_format_string__( format, il2c_runtime_debug_log_format_writer_step1__, il2c_runtime_debug_log_format_argument_writer_step1__, &state); if (il2c_likely__(result == IL2C_STRING_FORMAT_SUCCEEDED)) { il2c_mcalloc(wchar_t, pBuffer, (state.length + 3U) * sizeof(wchar_t)); state.pBuffer = pBuffer; va_end(state.va); va_start(state.va, format); result = il2c_format_string__( format, il2c_runtime_debug_log_format_writer_step2__, il2c_runtime_debug_log_format_argument_writer_step2__, &state); if (il2c_likely__(result == IL2C_STRING_FORMAT_SUCCEEDED)) { *state.pBuffer++ = L'\r'; *state.pBuffer++ = L'\n'; *state.pBuffer = L'\0'; il2c_runtime_debug_log(pBuffer); } il2c_mcfree(pBuffer); } va_end(state.va); } #endif
3,442
2,206
<filename>runtime-parent/runtime-core/src/main/java/com/speedment/runtime/core/internal/util/InternalMergeUtil.java /* * * Copyright (c) 2006-2020, Speedment, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); You may not * use this file except in compliance with the License. You may obtain a copy of * the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.speedment.runtime.core.internal.util; import com.speedment.runtime.core.exception.SpeedmentException; import com.speedment.runtime.core.manager.Manager; import com.speedment.runtime.field.predicate.SpeedmentPredicate; import com.speedment.runtime.field.trait.HasComparableOperators; import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.emptySet; import static java.util.Collections.singletonList; import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.toCollection; import static java.util.stream.Collectors.toSet; public final class InternalMergeUtil { private static final int CHUNK_SIZE = 100; private InternalMergeUtil() {} public static <T> Set<T> merge(Manager<T> manager, Set<T> entities) { return chunks(entities) .stream() .map(subSet -> mergeHelper(manager, subSet)) .reduce((a, b) -> { a.addAll(b); return a;}) .orElse(emptySet()); } private static <T, V extends Comparable<V>> Set<T> mergeHelper(Manager<T> manager, Set<T> entities) { requireNonNull(manager); requireNonNull(entities); @SuppressWarnings("unchecked") final List<HasComparableOperators<T, V>> pks = manager.primaryKeyFields() .map(pk -> (HasComparableOperators<T, V>)pk) .collect(Collectors.toList()); if (pks.size() != 1) { throw new UnsupportedOperationException( "Merge operations are only supported for entities with exactly one primary keys." + " Operation failed because there are " + pks.size() + " primary keys for " + manager.getEntityClass().getSimpleName() + "." ); // Multiple PKs are hard to support because in predicates cannot be composed // correctly by .and(). E.g. in (1, 2) and in (3 ,4) is not the same as // (1 and 3) or (2 and 4) } final HasComparableOperators<T, V> co = pks .iterator() .next(); @SuppressWarnings("unchecked") final SpeedmentPredicate<T> pkPredicate = co.in(entities.stream() .map(pkExtractor(co)) .collect(toSet())); final Set<V> existingPks = manager.stream() .filter(pkPredicate) .map(pkExtractor(co)) .collect(toSet()); final Set<T> existing = entities.stream() .filter(e -> existingPks.contains(pkExtractor(co).apply(e))) .collect(toCollection(LinkedHashSet::new)); // Retain order final Set<T> missing = entities.stream() .filter(e -> !existingPks.contains(pkExtractor(co).apply(e))) .collect(toCollection(LinkedHashSet::new)); // Retain order System.out.println("entities = " + entities); System.out.println("existing = " + existing); System.out.println("missing = " + missing); final List<SpeedmentException> exceptions = new ArrayList<>(); final Set<T> result = new HashSet<>(); for (T entity:existing) { try { final T updated = manager.update(entity); result.add(updated); } catch (SpeedmentException ex) { exceptions.add(ex); } } for (T entity:missing) { try { final T persisted = manager.persist(entity); result.add(persisted); } catch (SpeedmentException ex) { exceptions.add(ex); } } if (!exceptions.isEmpty()) { throw new SpeedmentException("Unable to merge because " + exceptions.size() + " operation(s) failed.", exceptions.iterator().next()); } return result; } @SuppressWarnings("unchecked") private static <T, V extends Comparable<V>> Function<T, V> pkExtractor(HasComparableOperators<T, V> co) { return entity -> (V) co.getter().apply(entity); } private static <T> List<Set<T>> chunks(Collection<T> keys) { if (keys.size() <= CHUNK_SIZE) { return singletonList(new HashSet<>(keys)); } final List<Set<T>> result = new ArrayList<>(keys.size() / CHUNK_SIZE); Set<T> current = new HashSet<>(CHUNK_SIZE); for (T k : keys) { current.add(k); if (current.size() >= CHUNK_SIZE) { result.add(current); current = new HashSet<>(); } } if (!current.isEmpty()) { result.add(current); } return result; } }
2,281
1,089
package org.zalando.logbook; import lombok.AllArgsConstructor; import org.zalando.fauxpas.ThrowingConsumer; import java.io.IOException; import java.util.Collection; @AllArgsConstructor public final class CompositeSink implements Sink { private final Collection<Sink> sinks; @Override public boolean isActive() { return sinks.stream().anyMatch(Sink::isActive); } @Override public void write(final Precorrelation precorrelation, final HttpRequest request) { each(sink -> sink.write(precorrelation, request)); } @Override public void write(final Correlation correlation, final HttpRequest request, final HttpResponse response) { each(sink -> sink.write(correlation, request, response)); } @Override public void writeBoth(final Correlation correlation, final HttpRequest request, final HttpResponse response) { each(sink -> sink.writeBoth(correlation, request, response)); } private void each(final ThrowingConsumer<Sink, IOException> consumer) { sinks.stream().filter(Sink::isActive).forEach(consumer); } }
367
3,102
<gh_stars>1000+ // RUN: %clang_cc1 -triple x86_64-linux -std=c++98 %s -O3 -disable-llvm-passes -pedantic-errors -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -triple x86_64-linux -std=c++11 %s -O3 -disable-llvm-passes -pedantic-errors -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -triple x86_64-linux -std=c++14 %s -O3 -disable-llvm-passes -pedantic-errors -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -triple x86_64-linux -std=c++1z %s -O3 -disable-llvm-passes -pedantic-errors -emit-llvm -o - | FileCheck %s // dr158: yes // CHECK-LABEL: define {{.*}} @_Z1f const int *f(const int * const *p, int **q) { // CHECK: load i32**, {{.*}}, !tbaa ![[INTPTR_TBAA:[^,]*]] const int *x = *p; // CHECK: store i32* null, {{.*}}, !tbaa ![[INTPTR_TBAA]] *q = 0; return x; } struct A {}; // CHECK-LABEL: define {{.*}} @_Z1g const int *(A::*const *g(const int *(A::* const **p)[3], int *(A::***q)[3]))[3] { // CHECK: load i64**, {{.*}}, !tbaa ![[MEMPTR_TBAA:[^,]*]] const int *(A::*const *x)[3] = *p; // CHECK: store i64* null, {{.*}}, !tbaa ![[MEMPTR_TBAA]] *q = 0; return x; }
525
4,526
#define SOL_CHECK_ARGUMENTS 1 #include <sol.hpp> #include "assert.hpp" class vector { public: double data[3]; vector() : data{ 0,0,0 } {} double& operator[](int i) { return data[i]; } static double my_index(vector& v, int i) { return v[i]; } static void my_new_index(vector& v, int i, double x) { v[i] = x; } }; int main () { sol::state lua; lua.open_libraries(sol::lib::base); lua.new_usertype<vector>("vector", sol::constructors<sol::types<>>(), sol::meta_function::index, &vector::my_index, sol::meta_function::new_index, &vector::my_new_index); lua.script("v = vector.new()\n" "print(v[1])\n" "v[2] = 3\n" "print(v[2])\n" ); vector& v = lua["v"]; c_assert(v[0] == 0.0); c_assert(v[1] == 0.0); c_assert(v[2] == 3.0); return 0; }
361
2,122
package com.sishuok.es.sys.organization.entity; /** * 组织机构类型 * <p>User: <NAME> * <p>Date: 13-4-5 下午1:53 * <p>Version: 1.0 */ public enum OrganizationType { bloc("集团"), branch_office("分公司"), department("部门"), group("部门小组"); private final String info; private OrganizationType(String info) { this.info = info; } public String getInfo() { return info; } }
197
1,463
/* +----------------------------------------------------------------------+ | Zan | +----------------------------------------------------------------------+ | Copyright (c) 2016-2017 <NAME> <https://github.com/youzan/zan> | +----------------------------------------------------------------------+ | This source file is subject to version 2.0 of the Apache license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.apache.org/licenses/LICENSE-2.0.html | | If you did not receive a copy of the Apache2.0 license and are unable| | to obtain it through the world-wide-web, please send a note to | | <EMAIL> so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | <NAME> <<EMAIL>> | +----------------------------------------------------------------------+ */ #ifndef _ZAN_ZANFACTORY_H_ #define _ZAN_ZANFACTORY_H_ #include "swoole.h" #ifdef __cplusplus extern "C" { #endif /* * -----------------------------------Factory-------------------------------------------- */ enum zanDispatchMode { ZAN_DISPATCH_ROUND = 1, //轮循模式 ZAN_DISPATCH_FDMOD = 2, //固定模式,根据连接的文件描述符分配worker ZAN_DISPATCH_QUEUE = 3, //抢占模式 ZAN_DISPATCH_IPMOD = 4, //对 client IP 取模,分配给一个固定 worker //ZAN_DISPATCH_UIDMOD = 5, //UID 分配 }; typedef struct { long target_worker_id; swEventData data; } swDispatchData; typedef struct _swSendData { swDataHead info; /** * for big package */ uint32_t length; char *data; } swSendData; typedef struct _zanFactory { int (*start)(struct _zanFactory *); int (*shutdown)(struct _zanFactory *); int (*dispatch)(struct _zanFactory *, swDispatchData *); int (*finish)(struct _zanFactory *, swSendData *); int (*notify)(struct _zanFactory *, swDataHead *); int (*end)(struct _zanFactory *, int session_id); } zanFactory; int zanFactory_create(zanFactory *factory); /*----------------------------Thread Pool-------------------------------*/ enum swThread_type { SW_THREAD_MASTER = 1, SW_THREAD_REACTOR = 2, SW_THREAD_WORKER = 3, SW_THREAD_UDP = 4, SW_THREAD_UNIX_DGRAM = 5, SW_THREAD_HEARTBEAT = 6, }; typedef struct _swThread swThread; typedef struct _swThreadPool swThreadPool; typedef struct _swThreadParam { void *object; int pti; } swThreadParam; struct _swThreadPool { zanCond cond; swThread *threads; swThreadParam *params; void *ptr1; void *ptr2; #ifdef SW_THREADPOOL_USE_CHANNEL swChannel *chan; #else swRingQueue queue; #endif int thread_num; int shutdown; sw_atomic_t task_num; void (*onStart)(struct _swThreadPool *pool, int id); void (*onStop)(struct _swThreadPool *pool, int id); int (*onTask)(struct _swThreadPool *pool, void *task, int task_len); }; struct _swThread { pthread_t tid; int id; swThreadPool *pool; }; int swThreadPool_create(swThreadPool *pool, int max_num); int swThreadPool_dispatch(swThreadPool *pool, void *task, int task_len); int swThreadPool_run(swThreadPool *pool); int swThreadPool_free(swThreadPool *pool); #ifdef __cplusplus } #endif #endif //_ZAN_ZANFACTORY_H_
1,430
852
<reponame>ckamtsikis/cmssw import FWCore.ParameterSet.Config as cms from RecoMuon.TrackingTools.MuonServiceProxy_cff import * from RecoMuon.TrackingTools.MuonUpdatorAtVertex_cff import * muonSeedTrack = cms.EDAnalyzer("MuonSeedTrack", MuonUpdatorAtVertex, MuonServiceProxy, MuonSeed = cms.InputTag("MuonSeed") )
137
1,530
<gh_stars>1000+ /* * This file is part of ClassGraph. * * Author: <NAME> * * Hosted at: https://github.com/classgraph/classgraph * * -- * * The MIT License (MIT) * * Copyright (c) 2019 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without * limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO * EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE * OR OTHER DEALINGS IN THE SOFTWARE. */ package nonapi.io.github.classgraph.fastzipfilereader; import java.io.IOException; import java.util.Calendar; import java.util.TimeZone; import nonapi.io.github.classgraph.fileslice.Slice; import nonapi.io.github.classgraph.fileslice.reader.RandomAccessReader; import nonapi.io.github.classgraph.utils.VersionFinder; /** A zip entry within a {@link LogicalZipFile}. */ public class FastZipEntry implements Comparable<FastZipEntry> { /** The parent logical zipfile. */ final LogicalZipFile parentLogicalZipFile; /** The offset of the entry's local header, as an offset relative to the parent logical zipfile. */ private final long locHeaderPos; /** The zip entry path. */ public final String entryName; /** True if the zip entry is deflated; false if the zip entry is stored. */ final boolean isDeflated; /** The compressed size of the zip entry, in bytes. */ public final long compressedSize; /** The uncompressed size of the zip entry, in bytes. */ public final long uncompressedSize; /** The last modified millis since the epoch, or 0L if it is unknown */ private long lastModifiedTimeMillis; /** The last modified time in MSDOS format, if {@link FastZipEntry#lastModifiedTimeMillis} is 0L. */ private final int lastModifiedTimeMSDOS; /** The last modified date in MSDOS format, if {@link FastZipEntry#lastModifiedTimeMillis} is 0L. */ private final int lastModifiedDateMSDOS; /** The file attributes for this resource, or 0 if unknown. */ public final int fileAttributes; /** The {@link Slice} for the zip entry's raw data (which can be either stored or deflated). */ private Slice slice; /** * The version code (&gt;= 9), or 8 for the base layer or a non-versioned jar (whether JDK 7 or 8 compatible). */ final int version; /** * The unversioned entry name (i.e. entryName with "META_INF/versions/{versionInt}/" stripped) */ public final String entryNameUnversioned; // ------------------------------------------------------------------------------------------------------------- /** * Constructor. * * @param parentLogicalZipFile * The parent logical zipfile containing this entry. * @param locHeaderPos * The offset of the LOC header for this entry within the parent logical zipfile. * @param entryName * The name of the entry. * @param isDeflated * True if the entry is deflated; false if the entry is stored. * @param compressedSize * The compressed size of the entry. * @param uncompressedSize * The uncompressed size of the entry. * @param lastModifiedTimeMillis * The last modified date/time in millis since the epoch, or 0L if unknown (in which case, the MSDOS * time and date fields will be provided). * @param lastModifiedTimeMSDOS * The last modified date, in MSDOS format, if lastModifiedMillis is 0L. * @param lastModifiedDateMSDOS * The last modified date, in MSDOS format, if lastModifiedMillis is 0L. * @param fileAttributes * The POSIX file attribute bits from the zip entry. */ FastZipEntry(final LogicalZipFile parentLogicalZipFile, final long locHeaderPos, final String entryName, final boolean isDeflated, final long compressedSize, final long uncompressedSize, final long lastModifiedTimeMillis, final int lastModifiedTimeMSDOS, final int lastModifiedDateMSDOS, final int fileAttributes) { this.parentLogicalZipFile = parentLogicalZipFile; this.locHeaderPos = locHeaderPos; this.entryName = entryName; this.isDeflated = isDeflated; this.compressedSize = compressedSize; this.uncompressedSize = !isDeflated && uncompressedSize < 0 ? compressedSize : uncompressedSize; this.lastModifiedTimeMillis = lastModifiedTimeMillis; this.lastModifiedTimeMSDOS = lastModifiedTimeMSDOS; this.lastModifiedDateMSDOS = lastModifiedDateMSDOS; this.fileAttributes = fileAttributes; // Get multi-release jar version number, and strip any version prefix int entryVersion = 8; String entryNameWithoutVersionPrefix = entryName; if (entryName.startsWith(LogicalZipFile.MULTI_RELEASE_PATH_PREFIX) && entryName.length() > LogicalZipFile.MULTI_RELEASE_PATH_PREFIX.length() + 1) { // This is a multi-release jar path final int nextSlashIdx = entryName.indexOf('/', LogicalZipFile.MULTI_RELEASE_PATH_PREFIX.length()); if (nextSlashIdx > 0) { // Get path after version number, i.e. strip "META-INF/versions/{versionInt}/" prefix final String versionStr = entryName.substring(LogicalZipFile.MULTI_RELEASE_PATH_PREFIX.length(), nextSlashIdx); // For multi-release jars, the version number has to be an int >= 9 // Integer.parseInt() is slow, so this is a custom implementation (this is called many times // for large classpaths, and Integer.parseInt() was a bit of a bottleneck, surprisingly) int versionInt = 0; if (versionStr.length() < 6 && !versionStr.isEmpty()) { for (int i = 0; i < versionStr.length(); i++) { final char c = versionStr.charAt(i); if (c < '0' || c > '9') { versionInt = 0; break; } if (versionInt == 0) { versionInt = c - '0'; } else { versionInt = versionInt * 10 + c - '0'; } } } if (versionInt != 0) { entryVersion = versionInt; } // Set version to 8 for out-of-range version numbers or invalid paths if (entryVersion < 9 || entryVersion > VersionFinder.JAVA_MAJOR_VERSION) { entryVersion = 8; } if (entryVersion > 8) { // Strip version path prefix entryNameWithoutVersionPrefix = entryName.substring(nextSlashIdx + 1); // For META-INF/versions/{versionInt}/META-INF/*, don't strip version prefix: // "The intention is that the META-INF directory cannot be versioned." // http://mail.openjdk.java.net/pipermail/jigsaw-dev/2018-October/013954.html if (entryNameWithoutVersionPrefix.startsWith(LogicalZipFile.META_INF_PATH_PREFIX)) { entryVersion = 8; entryNameWithoutVersionPrefix = entryName; } } } } this.version = entryVersion; this.entryNameUnversioned = entryNameWithoutVersionPrefix; } // ------------------------------------------------------------------------------------------------------------- /** * Lazily get zip entry slice -- this is deferred until zip entry data needs to be read, in order to avoid * randomly seeking within zipfile for every entry as the central directory is read. * * @return the offset within the physical zip file of the entry's start offset. * @throws IOException * If an I/O exception occurs. */ public Slice getSlice() throws IOException { if (slice == null) { final RandomAccessReader randomAccessReader = parentLogicalZipFile.slice.randomAccessReader(); // Check header magic if (randomAccessReader.readInt(locHeaderPos) != 0x04034b50) { throw new IOException("Zip entry has bad LOC header: " + entryName); } final long dataStartPos = locHeaderPos + 30 + randomAccessReader.readShort(locHeaderPos + 26) + randomAccessReader.readShort(locHeaderPos + 28); if (dataStartPos > parentLogicalZipFile.slice.sliceLength) { throw new IOException("Unexpected EOF when trying to read zip entry data: " + entryName); } // Create a new Slice that wraps just the data of the zip entry, and mark whether it is deflated slice = parentLogicalZipFile.slice.slice(dataStartPos, compressedSize, isDeflated, uncompressedSize); } return slice; } // ------------------------------------------------------------------------------------------------------------- /** * Get the path to this zip entry, using "!/" as a separator between the parent logical zipfile and the entry * name. * * @return the path of the entry */ public String getPath() { return parentLogicalZipFile.getPath() + "!/" + entryName; } /** * Get the last modified time in Epoch millis, or 0L if unknown. * * @return the last modified time in Epoch millis. */ public long getLastModifiedTimeMillis() { // If lastModifiedTimeMillis is zero, but there is an MSDOS date and time available if (lastModifiedTimeMillis == 0L && (lastModifiedDateMSDOS != 0 || lastModifiedTimeMSDOS != 0)) { // Convert from MS-DOS Date & Time Format to Epoch millis final int lastModifiedSecond = (lastModifiedTimeMSDOS & 0b11111) * 2; final int lastModifiedMinute = lastModifiedTimeMSDOS >> 5 & 0b111111; final int lastModifiedHour = lastModifiedTimeMSDOS >> 11; final int lastModifiedDay = lastModifiedDateMSDOS & 0b11111; final int lastModifiedMonth = (lastModifiedDateMSDOS >> 5 & 0b111) - 1; final int lastModifiedYear = (lastModifiedDateMSDOS >> 9) + 1980; final Calendar lastModifiedCalendar = Calendar.getInstance(TimeZone.getTimeZone("UTC")); lastModifiedCalendar.set(lastModifiedYear, lastModifiedMonth, lastModifiedDay, lastModifiedHour, lastModifiedMinute, lastModifiedSecond); lastModifiedCalendar.set(Calendar.MILLISECOND, 0); // Cache converted time by overwriting the zero lastModifiedTimeMillis field lastModifiedTimeMillis = lastModifiedCalendar.getTimeInMillis(); } // Return the last modified time, or 0L if it is totally unknown. return lastModifiedTimeMillis; } /** * Sort in decreasing order of version number, then lexicographically increasing order of unversioned entry * path. * * @param o * the object to compare to * @return the result of comparison */ @Override public int compareTo(final FastZipEntry o) { final int diff0 = o.version - this.version; if (diff0 != 0) { return diff0; } final int diff1 = entryNameUnversioned.compareTo(o.entryNameUnversioned); if (diff1 != 0) { return diff1; } final int diff2 = entryName.compareTo(o.entryName); if (diff2 != 0) { return diff2; } // In case of multiple entries with the same entry name, return them in consecutive order of location, // so that the earliest entry overrides later entries (this is an arbitrary decision for consistency) final long diff3 = locHeaderPos - o.locHeaderPos; return diff3 < 0L ? -1 : diff3 > 0L ? 1 : 0; } /* (non-Javadoc) * @see java.lang.Object#hashCode() */ @Override public int hashCode() { return parentLogicalZipFile.hashCode() ^ version ^ entryName.hashCode() ^ (int) locHeaderPos; } /* (non-Javadoc) * @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(final Object obj) { if (this == obj) { return true; } else if (!(obj instanceof FastZipEntry)) { return false; } final FastZipEntry other = (FastZipEntry) obj; return this.parentLogicalZipFile.equals(other.parentLogicalZipFile) && this.compareTo(other) == 0; } /* (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { return "jar:file:" + getPath(); } }
5,393
723
<reponame>Acidburn0zzz/LiveReload #import <Foundation/Foundation.h> extern NSString *const LRRuntimeManagerErrorDomain; enum { LRRuntimeManagerErrorValidationFailed = 1, };
63
935
#pragma once #include <cstddef> #include <cstdint> #include <utility> #include <string> namespace limits { constexpr size_t maxCmdSlots = 32; constexpr size_t maxPorts = 32; } struct receivedFis { uint8_t dmaFis[0x1C]; uint8_t _reservedA[4]; uint8_t pioFis[0x14]; uint8_t _reservedB[12]; uint8_t d2hFis[0x14]; uint8_t _reservedC[4]; uint8_t sdbFis[8]; uint8_t unkFis[0x40]; uint8_t _reservedD[0x60]; }; static_assert(sizeof(receivedFis) == 256); struct commandHeader { uint8_t configBytes[2]; uint16_t prdtLength; uint32_t prdByteCount; uint32_t ctBase; uint32_t ctBaseUpper; uint32_t _reserved[4]; }; static_assert(sizeof(commandHeader) == 32); struct commandList { commandHeader slots[32]; }; static_assert(sizeof(commandList) == 32 * 32); struct prdtEntry { uint32_t dataBase; uint32_t dataBaseUpper; uint32_t _reserved; uint32_t info; }; static_assert(sizeof(prdtEntry) == 16); struct fisH2D { uint8_t fisType; uint8_t info; uint8_t command; uint8_t features; uint8_t lba0; uint8_t lba1; uint8_t lba2; uint8_t devHead; uint8_t lba3; uint8_t lba4; uint8_t lba5; uint8_t featuresUpper; uint16_t sectorCount; uint8_t _reservedA; uint8_t control; uint32_t _reservedB; }; static_assert(sizeof(fisH2D) == 20); struct commandTable { fisH2D commandFis; uint8_t commandFisPad[0x40 - 20]; uint8_t atapiCommand[0x10]; uint8_t _reserved[0x30]; // Allows us to read 64kb into a buffer (16 * 512), plus one to deal with unaligned buffers. static constexpr std::size_t prdtEntries = 16 + 1; prdtEntry prdts[prdtEntries]; }; static_assert(sizeof(commandTable) == 128 + 16 * commandTable::prdtEntries); struct identifyDevice { uint16_t _junkA[27]; uint16_t model[20]; uint16_t _junkB[36]; uint16_t capabilities; uint16_t _junkC[16]; uint64_t maxLBA48; uint16_t _junkD[2]; uint16_t sectorSizeInfo; uint16_t _junkE[9]; uint16_t logicalSectorSize; uint16_t _junkF[139]; std::string getModel() const { char modelNative[41]; memcpy(modelNative, model, 40); modelNative[40] = 0; // Model name is returned as big endian, swap each two byte pair to fix that for (int i = 0; i < 40; i += 2) { std::swap(modelNative[i], modelNative[i + 1]); } std::string out{modelNative}; // Chop off the spaces at the end auto cutPos = out.find_last_not_of(' '); if (cutPos != std::string::npos) { out.resize(cutPos + 1); } return out; } // Returns logical and physical sector sizes std::pair<size_t, size_t> getSectorSize() const { if (sectorSizeInfo & (1 << 14) && !(sectorSizeInfo & (1 << 15))) { auto logical = 512; if (sectorSizeInfo & (1 << 12)) { // Logical sector size is greater than 512 bytes logical = logicalSectorSize; assert(logical > 512); } auto physical = (1 << (sectorSizeInfo & 0xF)) * logical; assert(physical <= 4096); return { logical, physical }; } else { // Word is invalid, just assume 512 / 512 return { 512, 512 }; } } bool supportsLba48() const { return capabilities & (1 << 10); } }; static_assert(sizeof(identifyDevice) == 512);
1,315
480
# # Collective Knowledge: compiler flags crowdtuning (crowdsource autotuning via spare computers such as mobile devices) # # See CK LICENSE.txt for licensing details # See CK COPYRIGHT.txt for copyright details # # Developer: <NAME>, <EMAIL>, http://fursin.net # cfg={} # Will be updated by CK (meta description of this module) work={} # Will be updated by CK (temporal data) ck=None # Will be updated by CK (initialized CK kernel) # Local settings compiler_choices='#choices#compiler_flags#' line='================================================================' fsummary='summary.json' fclassification='classification.json' fgraph='tmp-reactions-graph.json' ############################################################################## # Initialize module def init(i): """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ return {'return':0} ############################################################################## # show results def html_viewer(i): """ Input: { TBD (interactive_report) - if 'yes' output keys for interactive report (minimal) - if 'yes', minimal html } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 html (style) - styles - useful for plotting JavaScript-based graphs (predicted_opt) - if MILEPOST prediction is used } """ import os global cfg, work orig_module_uid=work['self_module_uid'] features=i.get('features',{}) xfeatures={} if len(features)>0: for k in features: xfeatures[k[2:]]=features[k] mcfg=i.get('module_cfg',{}) if len(mcfg)>0: cfg=mcfg mwork=i.get('module_work',{}) if len(mwork)>0: work=mwork ir=i.get('interactive_report','') mn=i.get('minimal','') st='' if i.get('force_url','')=='': url0=i['url_base'] else: url0=i['force_url'] ap=i.get('all_params',{}) ruoa=i.get('repo_uoa','') eruoa=i.get('experiment_repo_uoa','') # for interactive articles if eruoa!='': ruoa=eruoa muoa=work['self_module_uoa'] muid=work['self_module_uid'] duoa=i.get('data_uoa','') ik=cfg['improvements_keys'] # Load program optimization entry rx=ck.access({'action':'load', 'module_uoa':cfg['module_deps']['module'], 'data_uoa':cfg['module_deps']['program.optimization']}) if rx['return']>0: return rx urld=rx['dict'].get('url_discuss','') # Load Entry r=ck.access({'action':'load', 'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa}) if r['return']>0: return {'return':0, 'html':'<b>CK error:</b> '+r['error']+'!'} p=r['path'] d=r['dict'] duid=r['data_uid'] # Load program module to get desc keys r=ck.access({'action':'load', 'module_uoa':cfg['module_deps']['module'], 'data_uoa':cfg['replay_desc']['module_uoa']}) if r['return']>0: return r pdesc=r.get('desc',{}) xxkey=cfg['replay_desc'].get('desc_key','') if xxkey!='': pdesc=pdesc.get(xxkey,{}) h='<center>\n' h+='\n\n<script language="JavaScript">function copyToClipboard (text) {window.prompt ("Copy to clipboard: Ctrl+C, Enter", text);}</script>\n\n' h+='<H2>Distinct solutions after online classification ('+cfg['desc']+')</H2>\n' h+='</center>\n' h+='<p>\n' cid=muid+':'+duid h+='<table border="0" cellpadding="4" cellspacing="0">\n' x=muid if muoa!=muid: x+=' ('+muoa+')' h+='<tr><td><b>Scenario UID</b></td><td>'+x+'</td></tr>\n' h+='<tr><td><b>Data UID</b></td><td>'+duid+'</td></tr>\n' h+='<tr><td><td></td></tr>\n' url5=ck.cfg.get('wiki_data_web','') if url5!='' or urld!='': if url5!='': x='<a href="'+url5+muid+'_'+duid+'">GitHub wiki</a>' if urld!='': if x!='': x+=', ' x+='<a href="'+urld+'">Google group</a>' h+='<tr><td><b>Discuss (optimizations to improve compilers,<br><a href="https://arxiv.org/abs/1506.06256">semantic/data set/hardware features<br>to improve predictions</a>, etc):</b></td><td>'+x+'</td></tr>\n' h+='<tr><td><td></td></tr>\n' urlx=url0+'action=get&cid='+cfg['module_deps']['program.optimization']+':'+duid+'&scenario_module_uoa='+muid+'&out=json' urls=url0+'action=pull&common_action=yes&cid='+muid+':'+duid+'&filename=summary.json' urlc=url0+'action=pull&common_action=yes&cid='+muid+':'+duid+'&filename=classification.json' x='' if urls!='': x+='[ <a href="'+urls+'">All solutions in JSON</a> ]' if urlc!='': if x!='': x+=', ' x+='[ <a href="'+urlc+'">Solutions\' classification in JSON</a> ]' if x!='': h+='<tr><td><b>Download:</b></td><td>'+x+'</td></tr>\n' h+='<tr><td><b>Reproduce all (with reactions):</b></td><td><i>ck replay '+cid+'</i></td></tr>\n' h+='<tr><td><td></td></tr>\n' pr=cfg.get('prune_results',[]) mm=d.get('meta',{}) em=d.get('extra_meta',{}) obj=mm.get('objective','') for k in pr: qd=k.get('desc','') qi=k.get('id','') qr=k.get('ref_uid','') qm=k.get('ref_module_uoa','') x=mm.get(qi,'') if x!='' and qm!='' and qr!='': xuid=mm.get(qr,'') if xuid!='': x='<a href="'+url0+'wcid='+qm+':'+xuid+'">'+x+'</a>' h+='<tr><td><b>'+qd+'</b></td><td>'+x+'</td></tr>\n' h+='<tr><td><b>Objective</b></td><td>'+obj+'</td></tr>\n' h+='<tr><td></td><td></td></tr>\n' kk=0 for kx in range(0, len(ik)): k=ik[kx] k1=k.replace('$#obj#$',obj) ik[kx]=k1 if pdesc.get(k1,{}).get('desc','')!='': k1=pdesc[k1]['desc'] kk+=1 h+='<tr><td><b>Improvement key IK'+str(kk)+'</b></td><td>'+k1+'</td></tr>\n' ik0=ik[0] # first key to sort h+='</table>\n' h+='<p>\n' h+='<center>\n' bgraph={"0":[], "1":[]} # graph with highest improvements # Load summary sols=[] psum=os.path.join(p, fsummary) if os.path.isfile(psum): rx=ck.load_json_file({'json_file':psum}) if rx['return']>0: return rx sols=rx['dict'] # Load classification file classification={} pcl=os.path.join(p, fclassification) if os.path.isfile(pcl): rx=ck.load_json_file({'json_file':pcl}) if rx['return']>0: return rx classification=rx['dict'] # If features, update similarity and find min/max predicted_opt='' if len(features)>0: dist_min=None dist_max=None for q in sols: em=q.get('extra_meta',{}) suid=q['solution_uid'] dv=em.get('flat',{}).get('##characteristics#compile#joined_compiler_flags#min','') cls=classification.get(suid,{}) if len(cls)>0: wl_best=len(cls.get('best',[])) wl_worst=len(cls.get('worst',[])) wl_best_prog_uoa='' wl_best_cmd_key='' if wl_best>0: wl_best_prog_uoa=cls['best'][0].get('workload',{}).get('program_uoa','') # for now only for the 1st program # howelever later should group all programs with best opt! wl_best_cmd_key=cls['best'][0].get('workload',{}).get('cmd_key','') if wl_best_prog_uoa!='' and wl_best_cmd_key!='': # Try to load program static features ra=ck.access({'action':'load', 'module_uoa':cfg['module_deps']['program.static.features'], 'data_uoa':wl_best_prog_uoa}) if ra['return']==0: dfeat=ra['dict'].get('features',{}).get('program_static_milepost_features',{}) # Load program to get hot kernel (for features) ra=ck.access({'action':'load', 'module_uoa':cfg['module_deps']['program'], 'data_uoa':wl_best_prog_uoa}) if ra['return']==0: func=ra['dict'].get('run_cmds',{}).get(wl_best_cmd_key,{}).get('hot_functions',[]) if len(func)>0: dft=dfeat.get(func[0]['name'],{}) # # Calculate similarity r=ck.access({'action':'calculate_similarity', 'module_uoa':cfg['module_deps']['program.static.features'], 'features1':xfeatures, 'features2':dft}) if r['return']==0: dist=r['distance'] if dist!=None: cls['distance']=dist if dist_max==None or dist>dist_max: dist_max=dist if dist_min==None or dist<dist_min: dist_min=dist h+='<p>\n' h+='$#graph#$\n' h+='<p>\n' rrr={'return':0} # Preparing output (useful for interactive graphs) # List solutions if len(sols)==0: h+='<h2>No distinct solutions found!</h2>\n' else: # Check host URL prefix and default module/action h+='<table class="ck_table" border="0">\n' h+=' <tr style="background-color:#cfcfff;">\n' h+=' <td colspan="1"></td>\n' if len(features)>0: h+=' <td colspan="1" style="background-color:#bfffbf;"></td>\n' h+=' <td colspan="1" style="background-color:#bfbfff;"></td>\n' h+=' <td colspan="'+str(len(ik))+'" align="center"><b>Improvements (<4% variation)</b></td>\n' h+=' <td colspan="2" align="center" style="background-color:#bfbfff;"></td>\n' h+=' <td colspan="4"></td>\n' h+=' <td colspan="4" align="center" style="background-color:#bfbfff;"><b>Distinct workload for highest improvement</b></td>\n' h+=' <td colspan="4"></td>\n' h+=' <td colspan="1" align="center" style="background-color:#bfbfff;"></td>\n' h+=' </tr>\n' h+=' <tr style="background-color:#cfcfff;">\n' h+=' <td><b>\n' h+=' #\n' h+=' </b></td>\n' if len(features)>0: h+=' <td style="background-color:#bfffbf;"><b>\n' h+=' <a href="http://ctuning.org/wiki/index.php/CTools:MilepostGCC:StaticFeatures:MILEPOST_V2.1">MILEPOST features and NN distance (red - most close)</a>\n' h+=' </b></td>\n' h+=' <td style="background-color:#bfbfff;"><b>\n' h+=' Solution UID\n' h+=' </b></td>\n' for k in range(0, len(ik)): h+=' <td align="right"><b>\n' h+=' IK'+str(k+1)+'\n' h+=' </b></td>\n' h+=' <td align="center" style="background-color:#bfbfff;"><b>\n' h+=' New distinct optimization choices\n' h+=' </b></td>\n' h+=' <td align="center" style="background-color:#bfbfff;" align="right"><b>\n' h+=' Ref\n' h+=' </b></td>\n' h+=' <td align="center"><b>\n' h+=' Best species\n' h+=' </b></td>\n' h+=' <td align="center"><b>\n' h+=' Worst species\n' h+=' </b></td>\n' h+=' <td align="center"><b>\n' h+=' Touched\n' h+=' </b></td>\n' h+=' <td align="center"><b>\n' h+=' Iters\n' h+=' </b></td>\n' h+=' <td style="background-color:#bfbfff;"><b>\n' h+=' Program\n' h+=' </b></td>\n' h+=' <td style="background-color:#bfbfff;"><b>\n' h+=' CMD\n' h+=' </b></td>\n' h+=' <td style="background-color:#bfbfff;"><b>\n' h+=' Dataset\n' h+=' </b></td>\n' h+=' <td style="background-color:#bfbfff;"><b>\n' h+=' Dataset file\n' h+=' </b></td>\n' h+=' <td align="right"><b>\n' h+=' CPU freq (MHz)\n' h+=' </b></td>\n' h+=' <td align="right"><b>\n' h+=' Cores\n' h+=' </b></td>\n' h+=' <td><b>\n' h+=' Platform\n' h+=' </b></td>\n' h+=' <td><b>\n' h+=' OS\n' h+=' </b></td>\n' h+=' <td align="center" style="background-color:#bfbfff;">\n' h+=' <b>Replay</b>\n' h+=' </td>\n' h+=' </tr>\n' # List num=0 iq=-1 iq1=0 res={} sres=[] ires=0 em={} cls={} tbl=[] while iq1<len(sols): # already sorted by most "interesting" solutions (such as highest speedups) xtbl={} xreuse_shared_solution='' if iq!=iq1: num+=1 iq+=1 q=sols[iq] em=q.get('extra_meta',{}) suid=q['solution_uid'] cls=classification.get(suid,{}) xcls=cls.get('highest_improvements_workload',{}) program_uoa=xcls.get('program_uoa','') cmd=xcls.get('cmd_key','') dataset_uoa=xcls.get('dataset_uoa','') dataset_file=xcls.get('dataset_file','') wl_best=len(cls.get('best',[])) wl_worst=len(cls.get('worst',[])) dist=cls.get('distance',None) xreuse_shared_solution=cfg['module_deps']['program.optimization']+':'+duid+'-'+muid+'-'+suid url_wl=url0+'action=get_workloads&cid='+cfg['module_deps']['program.optimization']+':'+duid+'&scenario_module_uoa='+muid+'&solution_uid='+suid+'&out=json' url_wl_best=url_wl+'&key=best' url_wl_worst=url_wl+'&key=worst' res={} ref_res={} sres=[] ires=0 # Try to load all solutions p1=os.path.join(p, suid) try: dirList=os.listdir(p1) except Exception as e: None else: for fn in dirList: if fn.startswith('ckp-') and fn.endswith('.flat.json'): uid=fn[4:-10] px=os.path.join(p1, fn) rx=ck.load_json_file({'json_file':px}) if rx['return']>0: return rx d1=rx['dict'] px=os.path.join(p1,'ckp-'+uid+'.features_flat.json') if rx['return']>0: return rx d2=rx['dict'] x={'flat':d1, 'features_flat':d2} px=os.path.join(p1, 'ckp-'+uid+'.features.json') rx=ck.load_json_file({'json_file':px}) if rx['return']>0: return rx dx=rx['dict'] if dx.get('permanent','')=='yes': ref_res==x else: res[uid]=x rr=list(res.keys()) sres=sorted(rr, key=lambda v: (float(res[v].get('flat',{}).get(ik0,0.0))), reverse=True) rr={} if ires<len(sres): rr=res.get(sres[ires],{}) ires+=1 iterations=q.get('iterations',1) touched=q.get('touched',1) choices=q['choices'] ref_sol=q.get('ref_choices',{}) ref_sol_order=q.get('ref_choices_order',[]) target_os=choices.get('target_os','') speedup='' cmd1='' cmd2='' xtbl['solution_num']=num ss='S'+str(num) h+=' <tr>\n' h+=' <td valign="top" style="background-color:#efefff;">\n' if ires<2: h+=' '+ss+'\n' h+=' </td>\n' flags=rr.get('flat',{}).get('##characteristics#compile#joined_compiler_flags#min','') if len(features)>0: if dist!=None and dist_min!=None and dist_max!=None: if dist==dist_min: predicted_opt=flags xdist="%.3f" % dist col='FFFFFF' if dist<=1: col1=int(55+((dist-dist_min)/(1-dist_min))*200) col2=hex(col1)[2:] if (col1<16): col2='0'+col2 col='FF'+col2+col2 else: col1=int(55+(1-((dist-1)/(dist_max-1)))*200) col2=hex(col1)[2:] if (col1<16): col2='0'+col2 col=col2+col2+'FF' h+=' <td valign="top" align="right" style="background-color: #'+col+'">'+xdist+'\n' else: h+=' <td>\n' h+=' </td>\n' h+=' <td valign="top">\n' if ires<2 and urlx!='': h+=' <a href="'+urlx+'&solution_uid='+suid+'">'+suid+'</a>\n' h+=' </td>\n' xtbl['solution_uid']=suid for k in range(0, len(ik)): h+=' <td valign="top" align="right" style="background-color:#efefff;">\n' # dv=rr.get('flat',{}).get(ik[k],'') dv='' dvw='' points=q.get('points',[]) iresx=ires-1 # if iresx<len(points): # dv=points[iresx].get('improvements_best',{}).get(ik[k],'') # dvw=points[iresx].get('improvements_worst',{}).get(ik[k],'') # Add to graph (first dimension and first solution) # if k==0 and ires==1: dv=cls.get('highest_improvements',{}).get(ik[k],None) dvw=cls.get('highest_degradations',{}).get(ik[k],None) xtbl['highest_improvement_ik'+str(k+1)]=dv xtbl['highest_degradation_ik'+str(k+1)]=dvw if k==0: bgraph['0'].append([ss,dv]) bgraph['1'].append([ss,dvw]) y='' if type(dv)==int or type(dv)==ck.type_long: y=str(dv) else: try: y=('%.2f' % dv) except Exception as e: y=dv pass if dv!='': if dv>1.0: y='<span style="color:#bf0000">'+y+'</span>' elif dv!=0: y='<span style="color:#0000bf">'+y+'</span>' h+=str(y)+'\n' h+=' </td>\n' xtbl['best_flags']=flags h+=' <td valign="top">\n' h+=' '+flags+'\n' h+=' </td>\n' h+=' <td valign="top" align="center">\n' if ires<2: # Ideally should add pipeline description somewhere # to properly recreate flags. However since it is most of the time -Ox # we don't need to make it complex at the moment ry=rebuild_cmd({'choices':ref_sol, 'choices_order':ref_sol_order, 'choices_desc':{}}) if ry['return']>0: return ry ref=ry['cmd'] h+=' '+ref+'\n' xtbl['ref_flags']=ref h+=' \n' h+=' </td>\n' h+=' <td valign="top" align="center" style="background-color:#efefff;">\n' if ires<2: h+=' <a href="'+url_wl_best+'">'+str(wl_best)+'</a>\n' xtbl['best_species']=wl_best h+=' </td>\n' h+=' <td valign="top" align="center" style="background-color:#efefff;">\n' if ires<2: h+=' <a href="'+url_wl_worst+'">'+str(wl_worst)+'</a>\n' xtbl['worst_species']=wl_worst h+=' </td>\n' h+=' <td valign="top" align="center" style="background-color:#efefff;">\n' if ires<2: h+=' '+str(touched)+'\n' h+=' </td>\n' h+=' <td valign="top" align="center" style="background-color:#efefff;">\n' if ires<2: h+=' '+str(iterations)+'\n' h+=' </td>\n' h+=' <td valign="top">\n' if ires<2: h+=' <a href="'+url0+'wcid=program:'+program_uoa+'">'+program_uoa+'</a>\n' h+=' </td>\n' h+=' <td valign="top">\n' if ires<2: h+=' '+cmd+'\n' h+=' </td>\n' h+=' <td valign="top">\n' if ires<2: h+=' <a href="'+url0+'wcid=dataset:'+dataset_uoa+'">'+dataset_uoa+'</a>\n' h+=' </td>\n' h+=' <td valign="top">\n' if ires<2: h+=' <a href="'+url0+'action=pull&common_func=yes&cid=dataset:'+dataset_uoa+'&filename='+dataset_file+'">'+dataset_file+'</a>\n' h+=' </td>\n' # h+=' <td valign="top" align="right">\n' # if ires<2: # h+=' '+str(em.get('kernel_repetitions',-1))+'\n' # h+=' </td>\n' h+=' <td valign="top" align="right" style="background-color:#efefff;">\n' if ires<2: x='' qq=em.get('cpu_cur_freq',[]) for q in qq: xq=qq[q] if x!='': x+=', ' x+=str(xq) h+=' '+x+'\n' h+=' </td>\n' h+=' <td valign="top" align="right" style="background-color:#efefff;">\n' if ires<2: qq=em.get('cpu_num_proc',1) h+=' '+str(qq)+'\n' h+=' </td>\n' h+=' <td valign="top" style="background-color:#efefff;">\n' if ires<2: h+=' '+str(em.get('platform_name',''))+'\n' h+=' </td>\n' h+=' <td valign="top" style="background-color:#efefff;">\n' if ires<2: h+=' '+str(em.get('os_name',''))+'\n' h+=' </td>\n' x='ck replay '+cid+' --solution_uid='+suid y=ck.cfg.get('add_extra_to_replay','') if y!='': x+=' '+y xtbl['replay']=x h+=' <td valign="top" align="center"><input type="button" class="ck_small_button" style="height:40px;" onClick="copyToClipboard(\''+x+'\');" value="Copy CMD\nto clipboard"><br>\n' h+=' <input type="button" class="ck_small_button" style="height:40px;" onClick="copyToClipboard(\'--shared_solution_cid='+xreuse_shared_solution+'\');" value="Copy CID\nto clipboard"></td>\n' h+=' </tr>\n' else: iq1+=1 if len(xtbl)>0: tbl.append(xtbl) if ir=='yes': rrr['table']=tbl h+='</table>\n' if predicted_opt!='': h+='<br><br><b>Using machine learning to predict optimizations:</b><br><br><i>'+predicted_opt+'</i><br><br>\n' h+='<br><a href="http://arxiv.org/abs/1506.06256"><img src="'+url0+'action=pull&common_action=yes&cid='+cfg['module_deps']['module']+':'+orig_module_uid+'&filename=images/image-workflow1.png"></a>\n' h+='</center>\n' h+='<br><br>\n' rx=ck.access({'action':'links', 'module_uoa':cfg['module_deps']['program.optimization']}) if rx['return']>0: return rx h+=rx['html'] # Plot graph hg='' ftmp='' d3_div='ck_interactive' if i.get('graph_d3_div','')!='': d3_div=i['graph_d3_div'] if len(bgraph['0'])>0: ii={'action':'plot', 'module_uoa':cfg['module_deps']['graph'], "table":bgraph, "h_lines":[1.0], "ymin":0, "ignore_point_if_none":"yes", "plot_type":"d3_2d_bars", "display_y_error_bar":"no", "title":"Powered by Collective Knowledge", "axis_x_desc":"Distinct optimization solutions (highest improvement vs highest degradation)", "axis_y_desc":"Max improvement ( IK1 = Ref / Solution )", "plot_grid":"yes", "d3_div":d3_div, "image_width":"900", "image_height":"400", "wfe_url":url0} # Trick to save to file (for interactive/live articles) if ir=='yes': import copy rrr['graph_dict']=copy.deepcopy(ii) if ap.get('fgg_save_graph_to_file','')=='yes': import copy iii=copy.deepcopy(ii) iii["substitute_x_with_loop"]="yes" iii["plot_type"]="mpl_2d_bars" if 'ymin' in iii: del(iii['ymin']) if 'ymax' in iii: del(iii['ymax']) # Prepare batch file rx=ck.gen_tmp_file({'prefix':'tmp-', 'suffix':'.json'}) if rx['return']>0: return rx ftmp=rx['file_name'] rx=ck.save_json_to_file({'json_file':ftmp, 'dict':iii, 'sort_keys':'yes'}) if rx['return']>0: return rx r=ck.access(ii) if r['return']==0: x=r.get('html','') if x!='': st=r.get('style','') hg='<div id="ck_box_with_shadow" style="width:920px;">\n' if ftmp!='': hg+='<center><b>Note: graph info has been saved to file '+ftmp+' for interactive publications</b></center>' hg+=' <div id="'+d3_div+'" style="text-align:center">\n' hg+=x+'\n' hg+=' </div>\n' hg+='</div>\n' if mn=='yes': h=hg else: h=h.replace('$#graph#$', hg) rrr['html']=h rrr['style']=st rrr['predicted_opt']=predicted_opt return rrr ############################################################################## # crowdsource these experiments def crowdsource(i): """ Input: { See 'crowdsource program.optimization' (compiler_env_uoa) - fix compiler environment } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ global cfg, work import copy mcfg=i.get('module_cfg',{}) if len(mcfg)>0: cfg=mcfg mwork=i.get('module_work',{}) if len(mwork)>0: work=mwork # Setting output o=i.get('out','') oo='' if o=='con': oo='con' quiet=i.get('quiet','') er=i.get('exchange_repo','') if er=='': er=ck.cfg['default_exchange_repo_uoa'] esr=i.get('exchange_subrepo','') if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa'] if i.get('local','')=='yes': er='local' esr='' la=i.get('local_autotuning','') # Get user user='' mcfg={} ii={'action':'load', 'module_uoa':'module', 'data_uoa':cfg['module_deps']['program.optimization']} r=ck.access(ii) if r['return']==0: mcfg=r['dict'] dcfg={} ii={'action':'load', 'module_uoa':mcfg['module_deps']['cfg'], 'data_uoa':mcfg['cfg_uoa']} r=ck.access(ii) if r['return']>0 and r['return']!=16: return r if r['return']!=16: dcfg=r['dict'] user=dcfg.get('user_email','') ceuoa=i.get('compiler_env_uoa', '') if ceuoa!='': rx=ck.access({'action':'load', 'module_uoa':cfg['module_deps']['env'], 'data_uoa':ceuoa}) if rx['return']>0: return rx ceuoa=rx['data_uid'] # Initialize local environment for program optimization *********************************************************** pi=i.get('platform_info',{}) if len(pi)==0: ii=copy.deepcopy(i) ii['action']='initialize' ii['module_uoa']=cfg['module_deps']['program.optimization'] ii['exchange_repo']=er ii['exchange_subrepo']=esr r=ck.access(ii) if r['return']>0: return r pi=r['platform_info'] user=r.get('user','') hos=pi['host_os_uoa'] hosd=pi['host_os_dict'] tos=pi['os_uoa'] tosd=pi['os_dict'] tbits=tosd.get('bits','') remote=tosd.get('remote','') tdid=pi['device_id'] program_tags=i.get('program_tags','') if program_tags=='' and i.get('local_autotuning','')!='yes' and i.get('data_uoa','')=='': program_tags=cfg['program_tags'] # Check that has minimal dependencies for this scenario *********************************************************** sdeps=i.get('dependencies',{}) # useful to preset inside crowd-tuning if len(sdeps)==0: sdeps=copy.deepcopy(cfg['deps']) if len(sdeps)>0: if o=='con': ck.out(line) ck.out('Resolving software dependencies required for this scenario ...') ck.out('') if ceuoa!='': x=sdeps.get('compiler',{}) if len(x)>0: if 'cus' in x: del(x['cus']) if 'deps' in x: del(x['deps']) x['uoa']=ceuoa sdeps['compiler']=x ii={'action':'resolve', 'module_uoa':cfg['module_deps']['env'], 'host_os':hos, 'target_os':tos, 'device_id':tdid, 'deps':sdeps, 'add_customize':'yes'} if quiet=='yes': ii['random']='yes' else: ii['out']=oo rx=ck.access(ii) if rx['return']>0: return rx sdeps=rx['deps'] # Update deps (add UOA) cpu_name=pi.get('features',{}).get('cpu',{}).get('name','') compiler_soft_uoa=sdeps.get('compiler',{}).get('dict',{}).get('soft_uoa','') compiler_env=sdeps.get('compiler',{}).get('bat','') compiler_tool=sdeps.get('compiler',{}).get('dict',{}).get('env',{}).get('CK_CC','') plat_extra={} pft=pi.get('features',{}) for q in pft: if q.endswith('_uid'): plat_extra[q]=pft[q] elif type(pft[q])==dict and pft[q].get('name','')!='': plat_extra[q+'_name']=pft[q]['name'] # Detect real compiler version *********************************************************** if o=='con': ck.out(line) ck.out('Detecting compiler version ...') ii={'action':'internal_detect', 'module_uoa':cfg['module_deps']['soft'], 'data_uoa':compiler_soft_uoa, 'host_os':hos, 'target_os':tos, 'target_device_id':tdid, 'env':compiler_env, 'tool':compiler_tool} r=ck.access(ii) if r['return']>0: return r compiler_version=r['version_str'] compiler=cfg.get('compiler_name','')+' '+compiler_version if o=='con': ck.out('') ck.out('* Compiler: '+compiler) ck.out('* CPU: '+cpu_name) # Start preparing input to run program.optimization ii=copy.deepcopy(i) ii['action']='run' ii['module_uoa']=cfg['module_deps']['program.optimization'] ii['host_os']=hos ii['target_os']=tos ii['target_device_id']=tdid ii['dependencies']=sdeps ii['scenario_cfg']=cfg ii['platform_info']=pi ii['program_tags']=program_tags ii['scenario_module_uoa']=work['self_module_uid'] ii['experiment_meta']={'cpu_name':cpu_name, 'compiler':compiler} ii['experiment_meta_extra']=plat_extra ii['exchange_repo']=er ii['exchange_subrepo']=esr ii['user']=user # Select sub-scenario ******************************************************************** from random import randint ss=1 # num of scenarios sx=randint(1,ss) rr={'return':0} if sx==1 or la=='yes': # **************************************************************** explore random program/dataset sdesc='explore random program/cmd/data set' if o=='con': ck.out('') ck.out(' ****** Sub-scenario: '+sdesc+' ******') ii['subscenario_desc']=sdesc rr=ck.access(ii) if rr['return']>0: return rr rr['platform_info']=pi return rr ############################################################################## # rebuild compiler cmd from choices def rebuild_cmd(i): """ Input: { choices - dict of choices choices_order - choices order choices_desc - dict of choices desc } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 cmd - compiler command line pruned_choices - leave only compiler flags } """ cmd='' choices=i.get('choices',{}) corder=i.get('choices_order',[]) cdesc=i.get('choices_desc',{}) for q in sorted(corder): v=choices.get(q, None) d=cdesc.get(q, None) if v!=None: if cmd!='': cmd+=' ' cmd+=v return {'return':0, 'cmd':cmd} ############################################################################## # replay optimization def replay(i): """ See 'replay program.optimization' """ i['module_uoa']=cfg['module_deps']['program.optimization'] i['module_ref_uoa']=work['self_module_uid'] i['module_cfg']=copy.deepcopy(cfg) i['module_work']=copy.deepcopy(work) return ck.access(i) ############################################################################## # prune compiler flags to find minimal set of choices def prune(i): """ See 'replay program.optimization' """ i['module_uoa']=cfg['module_deps']['program.optimization'] i['module_ref_uoa']=work['self_module_uid'] i['module_cfg']=copy.deepcopy(cfg) i['module_work']=copy.deepcopy(work) return ck.access(i) ############################################################################## # prepare graph for interactive reports def show(i): """ Input: { (from_repo) - change repository (useful for remote-ck) (change_module_uoa) - change module_uoa (to select scenario module) (force_url) - useful to redirect interactive graphs to external repo (save_to_file) - output to file (for auto-generated LaTex and interactive graphs via CK) (minimal) - if 'yes', return minimal html } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ import os i['action']='process_interactive_graph' i['interactive_report']='yes' i['out']='' from_repo=i.get('from_repo','') if from_repo!='': i['repo_uoa']=from_repo rr=ck.access(i) if rr['return']>0: return rr if i.get('minimal','')=='yes': rr['html']=rr.get('graph_html','') stf=i.get('save_to_file','') stf0='' # Checks to avoid hacking if stf!='': stf=os.path.basename(stf) if not stf.endswith('.png') and not stf.endswith('.pdf'): stf='' if stf!='': stf0=os.path.splitext(stf)[0] # Save graph iii=rr.get('graph_dict',{}) iii["substitute_x_with_loop"]="yes" iii["plot_type"]="mpl_2d_bars" if 'ymin' in iii: del(iii['ymin']) if 'ymax' in iii: del(iii['ymax']) iii['out_to_file']=stf if i.get("save_graph_title",'')!='': iii['title']=i['save_graph_title'] if i.get("save_graph_axis_x_desc",'')!='': iii['axis_x_desc']=i['save_graph_axis_x_desc'] if i.get("save_graph_axis_y_desc",'')!='': iii['axis_y_desc']=i['save_graph_axis_y_desc'] if i.get("save_graph_axis_x_rotation",'')!='': iii['axis_x_rotation']=i['save_graph_axis_x_rotation'] if i.get("save_font_size",'')!='': iii['font_size']=i['save_font_size'] r=ck.access(iii) if r['return']>0: return r h ='<table border="1" cellpadding="3" cellspacing="0">\n' h+=' <tr>\n' h+=' <td align="right"><b>Solution</b></td>\n' h+=' <td><b>Pruned flags (complexity reduction)</b></td>\n' h+=' <td><b>Best species</b></td>\n' h+=' <td><b>Worst species</b></td>\n' h+=' </tr>\n' t =" \\begin{tabular}{|r|p{4.5in}|p{0.5in}|p{0.5in}|}\n" t+=" \\hline\n" t+=" \\textbf{Solution} & \\textbf{Pruned flags (complexity reduction)} & \\textbf{Best species} & \\textbf{Worst species} \\\\ \n" t+=" \\hline\n" tbl=rr.get('table',[]) for q in tbl: sn=q['solution_num'] bf=q['best_flags'] bs=q.get('best_species', 0) ws=q.get('worst_species', 0) t+=" "+str(sn)+" & "+bf+" & "+str(bs)+" & "+str(ws)+" \\\\\n" t+=" \\hline\n" h+=' <tr>' h+=' <td>'+str(sn)+'</td>\n' h+=' <td>'+bf+'</td>\n' h+=' <td>'+str(bs)+'</td>\n' h+=' <td>'+str(ws)+'</td>\n' h+=' </tr>' t+=" \\end{tabular}" h+='</table>\n' # Save tex file r=ck.save_text_file({'text_file':stf0+'.solutions.tex', 'string':t}) if r['return']>0: return r # Save html file r=ck.save_text_file({'text_file':stf0+'.solutions.html', 'string':h}) if r['return']>0: return r return rr ############################################################################## # prepare graph for interactive reports def process_interactive_graph(i): """ Input: { (change_module_uoa) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ i['action']='html_viewer' i['out']='' change_module_uoa=i.get('change_module_uoa','') if change_module_uoa!='': i['module_uoa']=change_module_uoa r=ck.access(i) if 'html' in r: r['graph_html']=r['html'] del(r['html']) return r
21,844
778
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7 # importing the Kratos Library import KratosMultiphysics from KratosMultiphysics import Vector import KratosMultiphysics.SwimmingDEMApplication as SDEM import sys class L2ErrorCalculatorUtility: def __init__(self, model, parameters): """The default constructor of the class. Keyword arguments: self -- It signifies an instance of a class. model -- the container of the different model parts. """ self.model_part = model self.u_characteristic = parameters["fluid_parameters"]["processes"]["initial_conditions_process_list"][0]["Parameters"]["benchmark_parameters"]["u_char"].GetDouble() for element in self.model_part.Elements: rho = element.Properties.GetValue(KratosMultiphysics.DENSITY) break self.p_characteristic = (1/2)*rho*self.u_characteristic**2 self.model = KratosMultiphysics.Model() self.element_name = "Element3D4N" self.error_model_part = self.model.CreateModelPart("ErrorModelPart") self.error_model_part.AddNodalSolutionStepVariable(SDEM.VECTORIAL_ERROR) self.error_model_part.AddNodalSolutionStepVariable(SDEM.SCALAR_ERROR) self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_X) self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_Y) self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_Z) self.error_model_part.AddNodalSolutionStepVariable(SDEM.ERROR_P) model_part_cloner = KratosMultiphysics.ConnectivityPreserveModeler() model_part_cloner.GenerateModelPart(self.model_part, self.error_model_part, self.element_name) self.error_model_part.ProcessInfo = self.model_part.ProcessInfo def CalculateL2(self): self.ComputeDofsErrors(self.error_model_part) self.velocity_error_norm = self.VectorL2ErrorNorm(self.error_model_part) self.pressure_error_norm = self.ScalarL2ErrorNorm(self.error_model_part) return self.velocity_error_norm/self.u_characteristic, self.pressure_error_norm/self.p_characteristic, self.error_model_part def ComputeDofsErrors(self, error_model_part): SDEM.L2ErrorNormCalculator().ComputeDofsErrors(self.error_model_part) def VectorL2ErrorNorm(self, error_model_part): return SDEM.L2ErrorNormCalculator().GetL2VectorErrorNorm(self.error_model_part) def ScalarL2ErrorNorm(self, error_model_part): return SDEM.L2ErrorNormCalculator().GetL2ScalarErrorNorm(self.error_model_part)
1,032
30,785
package jadx.plugins.input.java.data.code.trycatch; import jadx.api.plugins.input.data.ICatch; import jadx.api.plugins.input.data.ITry; import jadx.api.plugins.utils.Utils; public class JavaTryData implements ITry { private final int startOffset; private final int endOffset; private ICatch catchHandler; public JavaTryData(int startOffset, int endOffset) { this.startOffset = startOffset; this.endOffset = endOffset; } @Override public ICatch getCatch() { return catchHandler; } public void setCatch(ICatch catchHandler) { this.catchHandler = catchHandler; } @Override public int getStartOffset() { return startOffset; } @Override public int getEndOffset() { return endOffset; } @Override public int hashCode() { return startOffset + 31 * endOffset; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof JavaTryData)) { return false; } JavaTryData that = (JavaTryData) o; return startOffset == that.startOffset && endOffset == that.endOffset; } @Override public String toString() { return "Try{" + Utils.formatOffset(startOffset) + " - " + Utils.formatOffset(endOffset) + ": " + catchHandler + '}'; } }
427
1,030
<reponame>ahartikainen/pystan import numpy as np import pytest import stan np.random.seed(1) program_code = """ data { int<lower=0> N; int<lower=0> p; matrix[N,p] x; vector[N] y; } parameters { vector[p] beta; real<lower=0> sigma; } model { y ~ normal(x * beta, sigma); } """ n, p = 10000, 3 X = np.random.normal(size=(n, p)) X = (X - np.mean(X, axis=0)) / np.std(X, ddof=1, axis=0, keepdims=True) beta_true = (1, 3, 5) y = np.dot(X, beta_true) + np.random.normal(size=n) data = {"N": n, "p": p, "x": X, "y": y} @pytest.fixture(scope="module") def posterior(): return stan.build(program_code, data=data, random_seed=1) def test_linear_regression(posterior): fit = posterior.sample(num_chains=4) assert len(fit) == 2 # two parameters (beta, sigma) assert 0 < fit["sigma"].mean() < 2 assert np.allclose(fit["beta"].mean(axis=1), beta_true, atol=0.05)
397
2,645
/* * Copyright 2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.data.elasticsearch.core.query; import org.springframework.lang.Nullable; import org.springframework.util.Assert; /** * Implementation of RescorerQuery to be used for rescoring filtered search results. * * @author <NAME> * @since 4.2 */ public class RescorerQuery { private final Query query; private ScoreMode scoreMode = ScoreMode.Default; @Nullable private Integer windowSize; @Nullable private Float queryWeight; @Nullable private Float rescoreQueryWeight; public RescorerQuery(Query query) { Assert.notNull(query, "query must not be null"); this.query = query; } public Query getQuery() { return query; } public ScoreMode getScoreMode() { return scoreMode; } @Nullable public Integer getWindowSize() { return windowSize; } @Nullable public Float getQueryWeight() { return queryWeight; } @Nullable public Float getRescoreQueryWeight() { return rescoreQueryWeight; } public RescorerQuery withScoreMode(ScoreMode scoreMode) { Assert.notNull(scoreMode, "scoreMode must not be null"); this.scoreMode = scoreMode; return this; } public RescorerQuery withWindowSize(int windowSize) { this.windowSize = windowSize; return this; } public RescorerQuery withQueryWeight(float queryWeight) { this.queryWeight = queryWeight; return this; } public RescorerQuery withRescoreQueryWeight(float rescoreQueryWeight) { this.rescoreQueryWeight = rescoreQueryWeight; return this; } public enum ScoreMode { Default, Avg, Max, Min, Total, Multiply } }
657
1,077
import logging import json import re from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult from anime_downloader.sites import helpers from anime_downloader.extractors import get_extractor logger = logging.getLogger(__name__) class AnimeBinge(Anime, sitename='animebinge'): sitename = 'animebinge' @classmethod def search(cls, query): response = helpers.get('https://animebinge.net/search', params={'term': query}) soup = helpers.soupify(response) results = soup.select('a#epilink') search_results = [ SearchResult( title=x.text, url=x['href'] ) for x in results ] return search_results def _scrape_episodes(self): eps = helpers.soupify(helpers.get(self.url)).select('div.episode-wrap > a') eps = [x['href'] for x in eps] eps.reverse() return eps def _scrape_metadata(self): self.title = helpers.soupify(helpers.get(self.url)).select_one('div.contingo > p').text class AnimeBingeEpisode(AnimeEpisode, sitename='animebinge'): def _get_sources(self): html = helpers.get(self.url).text # Matches: # var episode = {"id":"187961", # "url":"https:\/\/animebinge.net\/episode\/187961-yakusoku-no-neverland-episode-1", # "lang":"dubbed"}; </script> # And parses the json in the script. episode_regex = r'var\s*episode\s*=\s*({[\W\w]*?);\s*<\/script>' source = re.search(episode_regex, str(html)) if source: source_json = json.loads(source.group(1))['videos'] else: return '' logger.debug('Sources: {}'.format(source_json)) mappings = { 'mp4upload': 'https://www.mp4upload.com/embed-{}.html', 'trollvid': 'https://trollvid.net/embed/{}', 'xstreamcdn': 'https://xstreamcdn.com/v/{}' } sources_list = [] for i in source_json: if mappings.get(i.get('host')): extractor = 'no_extractor' if not get_extractor(i['host']) else i['host'] sources_list.append({ 'extractor': extractor, 'url': mappings[i['host']].format(i['id']), 'server': i['host'], 'version': i.get('type', 'subbed') }) return self.sort_sources(sources_list)
1,291
4,047
#include<nonexisting.h> void func(void) { printf("This won't work.\n"); }
28
826
// SPDX-License-Identifier: BSD-3-Clause // Copyright Contributors to the OpenColorIO Project. #ifndef INCLUDED_OCIO_SYSTEM_MONITOR_H #define INCLUDED_OCIO_SYSTEM_MONITOR_H #include <string> #include <vector> #include <OpenColorIO/OpenColorIO.h> namespace OCIO_NAMESPACE { class SystemMonitorsImpl : public SystemMonitors { public: SystemMonitorsImpl() = default; SystemMonitorsImpl(const SystemMonitorsImpl &) = delete; SystemMonitorsImpl & operator= (const SystemMonitorsImpl &) = delete; virtual ~SystemMonitorsImpl() = default; bool isSupported() const noexcept override; size_t getNumMonitors() const noexcept override; const char * getMonitorName(size_t idx) const override; const char * getProfileFilepath(size_t idx) const override; static std::string GetICCProfileFromMonitorName(const char * monitorName); void getAllMonitors(); private: struct MonitorInfo { MonitorInfo() = default; MonitorInfo(const std::string & monitorName, const std::string & ICCFilepath) : m_monitorName(monitorName) , m_ICCFilepath(ICCFilepath) {} ~MonitorInfo() = default; std::string m_monitorName; // Name built using the vendor information from the monitor if accessible. std::string m_ICCFilepath; // The ICC profile path. }; std::vector<MonitorInfo> m_monitors; }; } // namespace OCIO_NAMESPACE #endif // INCLUDED_OCIO_SYSTEM_MONITOR_H
533
460
package org.sxdata.jingwei.dao; import org.springframework.stereotype.Repository; import org.sxdata.jingwei.entity.DirectoryEntity; /** * Created by cRAZY on 2017/2/27. */ @Repository public interface DirectoryDao { public DirectoryEntity getDirectoryById(Integer id); }
92
380
/** * Author: <NAME> * Date: 23 July 2021 (Friday) */ import java.util.*; public class SmallestMissingPositive{ public static void main(String args[]){ Solution s = new Solution(); int arr[] = {1,2,3,4,5}; System.out.println(s.smallestMissingPositive(arr,arr.length)); } } class Solution{ //solution 1(worstcase O(N^2)) int smallestMissingPositi(int arr[],int n){ int i; for(i=1;i<=n;i++){ if(!isPresent(arr,i)){ return i; } } return i; } boolean isPresent(int arr[],int num){ for(int i=0;i<arr.length;i++){ if(arr[i]==num) return true; } return false; } //solution 2 O(n) int smallestMissingPositive(int arr[],int n){ if(arr==null || arr.length==0) return 1; boolean containsOne = false; //step 1 : replacing -ve elements and elements>n by '1' for(int i=0;i<n;i++){ if(arr[i]==1){ containsOne = true; }else if(arr[i]<=0 || arr[i]>n){ arr[i]=1; } } if(!containsOne) return 1; //step 2 : Index referencing (make negative indexs of respecctive elements) for(int i=0;i<n;i++){ int index = Math.abs(arr[i])-1; //because index starts from 0 if(arr[index] > 0) arr[index] = -1 * arr[index]; } //step 3:Traversing and returning first positive or returning n+1 for(int i=0;i<n;i++){ if(arr[i]>0) return i+1; } return n+1; } }
622
903
<filename>vole-portal-data/src/main/java/com/github/vole/portal/data/mapper/SysRoleMenuMapper.java<gh_stars>100-1000 package com.github.vole.portal.data.mapper; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.github.vole.portal.data.model.entity.SysRoleMenu; import java.util.List; /** * * SysRoleMenu 表数据库控制层接口 * */ public interface SysRoleMenuMapper extends BaseMapper<SysRoleMenu> { /** * 根据用户Id获取用户所在角色的权限 */ List<String> selectRoleMenuIdsByUserId(String uid); }
264
3,269
# Time: O(n + logn + log(logn) + ...) = O(n) # Space: O(1) class Solution(object): def getLucky(self, s, k): """ :type s: str :type k: int :rtype: int """ total = reduce(lambda total, x: total+sum(divmod((ord(x)-ord('a')+1), 10)), s, 0) while k > 1 and total > 9: new_total = 0 while total: total, x = divmod(total, 10) new_total += x total = new_total k -= 1 return total
292
555
{ "serviceName":"hivedev", "serviceDef":{ "name":"hive", "id":3, "resources":[ {"name":"url","level":1,"mandatory":true,"lookupSupported":true,"matcher":"org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher","matcherOptions":{"wildCard":true, "ignoreCase":true},"label":"S3 URL","description":"S3 Bucket URL"}, {"name":"database","level":1,"mandatory":true,"lookupSupported":true,"matcher":"org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher","matcherOptions":{"wildCard":true, "ignoreCase":true},"label":"Hive Database","description":"Hive Database"}, {"name":"table","level":2,"parent":"database","mandatory":true,"lookupSupported":true,"matcher":"org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher","matcherOptions":{"wildCard":true, "ignoreCase":true},"label":"Hive Table","description":"Hive Table"}, {"name":"udf","level":2,"parent":"database","mandatory":true,"lookupSupported":true,"matcher":"org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher","matcherOptions":{"wildCard":true, "ignoreCase":true},"label":"Hive UDF","description":"Hive UDF"}, {"name":"column","level":3,"parent":"table","mandatory":true,"lookupSupported":true,"matcher":"org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher","matcherOptions":{"wildCard":true, "ignoreCase":true},"label":"Hive Column","description":"Hive Column"} ], "accessTypes":[ {"name":"select","label":"Select"}, {"name":"update","label":"Update"}, {"name":"create","label":"Create"}, {"name":"drop","label":"Drop"}, {"name":"alter","label":"Alter"}, {"name":"index","label":"Index"}, {"name":"lock","label":"Lock"}, {"name":"all","label":"All"} ], "contextEnrichers": [ { "itemId": 1, "name" : "TagEnricher", "enricher" : "org.apache.ranger.plugin.contextenricher.RangerTagEnricher", "enricherOptions" : { "tagRetrieverClassName": "org.apache.ranger.plugin.contextenricher.RangerAdminTagRetriever", "tagRefresherPollingInterval": 60000 } } ], "policyConditions": [ { "itemId":1, "name":"accessed-after-expiry", "evaluator": "org.apache.ranger.plugin.conditionevaluator.RangerScriptTemplateConditionEvaluator", "evaluatorOptions" : { "scriptTemplate":"ctx.isAccessedAfter('expiry_date');" }, "uiHint": "{ \"singleValue\":true }", "label":"Accessed after expiry_date (yes/no)?", "description": "Accessed after expiry_date? (yes/no)" }, { "itemId":2, "name":"expression", "evaluator": "org.apache.ranger.plugin.conditionevaluator.RangerScriptConditionEvaluator", "evaluatorOptions" : {"engineName":"JavaScript", "ui.isMultiline":"true"}, "label":"Enter boolean expression", "description": "Boolean expression" }, { "itemId":3, "name":"tags", "evaluator": "org.apache.ranger.plugin.conditionevaluator.RangerTagsAllPresentConditionEvaluator", "label":"All tags present?", "description": "All tags present condition" } ] }, "tagDefinitions": { "1":{"name":"PII"}, "2":{"name":"EXPIRES_ON","attributeDefs":[{"name":"expiry_date","type":"date"}]}, "3":{"name":"FINANCE"}, "4":{"name":"PCI"}, "5":{"name":"HIPPA"} }, "tags": { "1":{"type":"PII"}, "2":{"type":"EXPIRES_ON","attributes":{"expiry_date":"2015/12/31"}}, "3":{"type":"FINANCE"}, "4":{"type":"EXPIRES_ON","attributes":{"expiry_date":"2025/12/31"}}, "5":{"type":"PCI"}, "6":{"type":"HIPPA"} }, "serviceResources": [ {"id":1,"serviceName":"cl1_hive","resourceElements":{"database":{"values":["hr"]},"table":{"values":["employee"]},"column":{"values":["ssn"]}}}, {"id":2,"serviceName":"cl1_hive","resourceElements":{"database":{"values":["finance"]}}}, {"id":3,"serviceName":"cl1_hive","resourceElements":{"database":{"values":["finance"]},"table":{"values":["tax_2010"]}}}, {"id":4,"serviceName":"cl1_hive","resourceElements":{"database":{"values":["finance"]},"table":{"values":["tax_2010"]},"column":{"values":["ssn"]}}}, {"id":5,"serviceName":"cl1_hive","resourceElements":{"url":{"values":["someurl"]}}}, {"id":6,"serviceName":"cl1_hive","resourceElements":{"database":{"values":["def?n"]}}} ], "resourceToTagIds": { "1":[1], "2":[3], "3":[2], "4":[1, 4], "5":[1], "6":[5] }, "tests":[ {"name":"multitag-policy-for-financeDB", "resource":{"elements":{"database":"finance"}}, "accessType": "read", "conditions": [ { "type": "tags", "values": [ "PCI", "HIPPA" ] } ], "result":[{"type":"EXPIRES_ON"}, {"type":"FINANCE"}, {"type":"PII"}, {"type":"EXPIRES_ON"},{"type":"PCI"}, {"type":"HIPPA"}] } ] }
1,980
6,224
/* * Copyright (c) 2019 <NAME>, LLC * * SPDX-License-Identifier: Apache-2.0 */ /* Common header used to define underlying types for typedefs that * must appear in multiple headers independently. */ #ifndef ZEPHYR_LIB_LIBC_MINIMAL_INCLUDE_SYS_XTYPES_H_ #define ZEPHYR_LIB_LIBC_MINIMAL_INCLUDE_SYS_XTYPES_H_ #include <stdint.h> typedef int64_t _TIME_T_; typedef int32_t _SUSECONDS_T_; #endif /* ZEPHYR_LIB_LIBC_MINIMAL_INCLUDE_SYS_XTYPES_H_ */
202
14,668
<filename>chrome/browser/download/offline_item_model_data.h // Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_DOWNLOAD_OFFLINE_ITEM_MODEL_DATA_H_ #define CHROME_BROWSER_DOWNLOAD_OFFLINE_ITEM_MODEL_DATA_H_ // Per OfflineItem data used by OfflineItemModel. The model doesn't keep any // state, all the state will be stored in this class. struct OfflineItemModelData { // Whether the UI has been notified about this offline item. bool was_ui_notified_ = false; }; #endif // CHROME_BROWSER_DOWNLOAD_OFFLINE_ITEM_MODEL_DATA_H_
217
388
<filename>ustwo-clockwise/src/androidTest/java/com/ustwo/clockwise/TimeUtilTest.java<gh_stars>100-1000 package com.ustwo.clockwise; import com.ustwo.clockwise.common.WatchFaceTime; import com.ustwo.clockwise.common.util.TimeUtil; import junit.framework.Assert; import junit.framework.TestCase; public class TimeUtilTest extends TestCase { public void testSecondDegreesWithTime() throws Exception { // 30s, 500ms WatchFaceTime time = new WatchFaceTime(); time.set(30, 10, 10, 1, 1, 2015); time.millis = 500; float degrees = TimeUtil.getSecondDegrees(time); assertEquals(183.0f, degrees, Math.ulp(degrees)); } public void testSecondDegreesWithSeconds() throws Exception { // 30s, 500ms float degrees = TimeUtil.getSecondDegrees(30.5f); assertEquals(183.0f, degrees, Math.ulp(degrees)); } public void testNegativeSecondDegrees() throws Exception { // 30s, 500ms float degrees = TimeUtil.getSecondDegrees(-30.5f); assertEquals(-183.0f, degrees, Math.ulp(degrees)); } public void testHighSecondDegrees() throws Exception { // 90s, 500ms float degrees = TimeUtil.getSecondDegrees(60f + 30.5f); assertEquals(360f + 183.0f, degrees, Math.ulp(degrees)); } public void testSecondDegreesThrowsException() throws Exception { try { TimeUtil.getSecondDegrees(null); Assert.fail("Should have thrown NullPointerException"); } catch (NullPointerException npe) { //success } } public void testMinuteDegreesWithTime() throws Exception { // 10m, 30s WatchFaceTime time = new WatchFaceTime(); time.set(30, 10, 10, 1, 1, 2015); float degrees = TimeUtil.getMinuteDegrees(time); assertEquals(63.0f, degrees, Math.ulp(degrees)); } public void testMinuteDegreesWithMinute() throws Exception { // 10m, 30s float degrees = TimeUtil.getMinuteDegrees(10.5f); assertEquals(63.0f, degrees, Math.ulp(degrees)); } public void testNegativeMinuteDegrees() throws Exception { // 10m, 30s float degrees = TimeUtil.getMinuteDegrees(-10.5f); assertEquals(-63.0f, degrees, Math.ulp(degrees)); } public void testHighMinuteDegrees() throws Exception { // 70m, 30s float degrees = TimeUtil.getMinuteDegrees(60f + 10.5f); assertEquals(360f + 63.0f, degrees, Math.ulp(degrees)); } public void testMinuteDegreesThrowsException() throws Exception { try { TimeUtil.getMinuteDegrees(null); Assert.fail("Should have thrown NullPointerException"); } catch (NullPointerException npe) { //success } } public void testHourDegreesWithTime() throws Exception { // 10h, 30m WatchFaceTime time = new WatchFaceTime(); time.set(0, 30, 10, 1, 1, 2015); float degrees = TimeUtil.getHourDegrees(time); assertEquals(315.0f, degrees, Math.ulp(degrees)); } public void testHourDegreesWithHours() throws Exception { // 10h, 30m float degrees = TimeUtil.getHourDegrees(10.5f); assertEquals(315.0f, degrees, Math.ulp(degrees)); } public void testNegativeHourDegrees() throws Exception { // 10h, 30m float degrees = TimeUtil.getHourDegrees(-10.5f); assertEquals(-315.0f, degrees, Math.ulp(degrees)); } public void testHighHourDegrees() throws Exception { // 22h, 30m float degrees = TimeUtil.getHourDegrees(12f + 10.5f); assertEquals(360f + 315.0f, degrees, Math.ulp(degrees)); } public void testHourDegreesThrowsException() throws Exception { try { TimeUtil.getHourDegrees(null); Assert.fail("Should have thrown NullPointerException"); } catch (NullPointerException npe) { //success } } }
1,708
5,169
<gh_stars>1000+ { "name": "FrameworkDemo", "version": "0.0.1", "summary": "CocoaPods 测试.", "homepage": "https://github.com/limaofuyuanzhang/FrameworkDemo", "license": "MIT", "authors": { "limaofuyuanzhang": "<EMAIL>" }, "platforms": { "ios": "8.0" }, "source": { "git": "https://github.com/limaofuyuanzhang/FrameworkDemo.git", "tag": "0.0.1" }, "vendored_frameworks": "**/FrameworkDemo.framework", "requires_arc": true }
214
494
<reponame>waffle620/fagyhal import discord import asyncio from discord.ext import commands from utils import checks from mods.cog import Cog import mods.Tags parser = mods.Tags.Tags.parser default_join = 'Welcome to **{server}** - {mention}! You are the {servercount} member to join.' default_leave = '**{user}#{discrim}** has left the server.' #http://stackoverflow.com/a/16671271 def number_formating(n): return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th")) class Object(): pass cool = "```xl\n{0}\n```" code = "```py\n{0}\n```" class JoinLeave(Cog): def __init__(self, bot): super().__init__(bot) self.cursor = bot.mysql.cursor self.escape = bot.escape @commands.group(pass_context=True, aliases=['welcomemessage', 'join'], invoke_without_command=True, no_pm=True) @checks.admin_or_perm(manage_server=True) async def welcome(self, ctx, *, message:str=None): channel = ctx.message.channel for c in ctx.message.channel_mentions: channel = c message = message.replace(c.mention, '').replace('#'+c.name, '') sql = 'SELECT server FROM `welcome` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: if message is None: msg = ':white_check_mark: Enabled welcome messages for {0}.'.format(channel.mention) else: msg = ':white_check_mark: Added welcome message for {0}.'.format(channel.mention) sql = 'INSERT INTO `welcome` (`server`, `channel`, `message`, `user`) VALUES (%s, %s, %s, %s)' self.cursor.execute(sql, (ctx.message.server.id, channel.id, message, ctx.message.author.id)) else: if message is None: await self.bot.say(':warning: Please input something to edit the welcome message to.\n`welcome clear` to disable welcome messages.') return msg = ':white_check_mark: Edited welcome message.' sql = "UPDATE `welcome` SET message={0} WHERE server={1}" sql = sql.format(self.escape(message), ctx.message.server.id) self.cursor.execute(sql) self.cursor.commit() await self.bot.say(msg) @welcome.command(name='remove', pass_context=True, aliases=['delete', 'clear', 'disable'], invoke_without_command=True) @checks.admin_or_perm(manage_server=True) async def welcome_remove(self, ctx): sql = 'SELECT server FROM `welcome` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: await self.bot.say(':no_entry: Server does not have welcome messages enabled.') return sql = 'DELETE FROM `welcome` WHERE server={0}' sql = sql.format(ctx.message.server.id) self.cursor.execute(sql) self.cursor.commit() await self.bot.say(':negative_squared_cross_mark: Disabled welcome message.') @welcome.command(name='channel', pass_context=True, aliases=['setchannel'], invoke_without_command=True) @checks.admin_or_perm(manage_server=True) async def welcome_channel(self, ctx, channel:discord.Channel=None): sql = 'SELECT channel FROM `welcome` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: await self.bot.say(':no_entry: Server does not have welcome messages enabled.') return if channel is None: channel = ctx.message.server.get_channel(str(result[0]['channel'])) if channel is None: channel = ctx.message.channel else: await self.bot.say('Current Welcome Channel: {0}'.format(channel.mention)) return sql = 'UPDATE `welcome` SET channel={0} WHERE server={1}' sql = sql.format(channel.id, ctx.message.server.id) self.cursor.execute(sql) self.cursor.commit() await self.bot.say(':white_check_mark: Changed welcome channel to {0}'.format(channel.mention)) @commands.group(pass_context=True, aliases=['leavemessage'], invoke_without_command=True, no_pm=True) @checks.admin_or_perm(manage_server=True) async def leave(self, ctx, *, message:str=None): channel = ctx.message.channel for c in ctx.message.channel_mentions: channel = c message = message.replace(c.mention, '').replace('#'+c.name, '') sql = 'SELECT server FROM `leave` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: if message is None: msg = ':white_check_mark: Enabled leave messages for {0}.'.format(channel.mention) else: msg = ':white_check_mark: Added leave message for {0}.'.format(channel.mention) sql = 'INSERT INTO `leave` (`server`, `channel`, `message`, `user`) VALUES (%s, %s, %s, %s)' self.cursor.execute(sql, (ctx.message.server.id, channel.id, message, ctx.message.author.id)) else: if message is None: await self.bot.say(':warning: Please input something to edit the leave message to.\n`leave clear` to disable leave messages.') return msg = ':white_check_mark: Edited leave message.' sql = "UPDATE `leave` SET message={0} WHERE server={1}" sql = sql.format(self.escape(message), ctx.message.server.id) self.cursor.execute(sql) self.cursor.commit() await self.bot.say(msg) @leave.command(name='remove', pass_context=True, aliases=['delete', 'clear', 'disable'], invoke_without_command=True) @checks.admin_or_perm(manage_server=True) async def leave_remove(self, ctx): sql = 'SELECT server FROM `leave` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: await self.bot.say(':no_entry: Server does not have leave messages enabled.') return sql = 'DELETE FROM `leave` WHERE server={0}' sql = sql.format(ctx.message.server.id) self.cursor.execute(sql) self.cursor.commit() await self.bot.say(':negative_squared_cross_mark: Disabled leave message.') @leave.command(name='channel', pass_context=True, aliases=['setchannel'], invoke_without_command=True) @checks.admin_or_perm(manage_server=True) async def leave_channel(self, ctx, channel:discord.Channel=None): sql = 'SELECT channel FROM `leave` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: await self.bot.say(':no_entry: Server does not have leave messages enabled.') return if channel is None: channel = ctx.message.server.get_channel(str(result[0]['channel'])) if channel is None: channel = ctx.message.channel else: await self.bot.say('Current Leave Channel: {0}'.format(channel.mention)) return sql = 'UPDATE `leave` SET channel={0} WHERE server={1}' sql = sql.format(channel.id, ctx.message.server.id) self.cursor.execute(sql) self.cursor.commit() await self.bot.say(':white_check_mark: Changed leave channel to {0}'.format(channel.mention)) @welcome.command(name='current', pass_context=True, aliases=['show'], invoke_without_command=True) async def welcome_current(self, ctx): sql = 'SELECT message FROM `welcome` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: await self.bot.say(':no_entry: Server does not have welcome messages enabled.') return msg = result[0]['message'] if result[0]['message'] != None else default_join await self.bot.say(msg) @leave.command(name='current', pass_context=True, aliases=['show'], invoke_without_command=True) async def leave_current(self, ctx): sql = 'SELECT message FROM `leave` WHERE server={0}' sql = sql.format(ctx.message.server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: await self.bot.say(':no_entry: Server does not have leave messages enabled.') return msg = result[0]['message'] if result[0]['message'] != None else default_leave await self.bot.say(msg) async def remove(self, server, welcome): if welcome: sql = 'DELETE FROM `welcome` WHERE server={0}' else: sql = 'DELETE FROM `leave` WHERE server={0}' sql = sql.format(server.id) self.cursor.execute(sql) self.cursor.commit() async def on_member_join(self, member): try: server = member.server sql = 'SELECT * FROM `welcome` WHERE server={0}' sql = sql.format(server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: return channel = server.get_channel(str(result[0]['channel'])) if channel is None: await self.remove(server, True) ctx = Object() ctx.message = Object() ctx.message.author = member ctx.message.server = server ctx.message.channel = channel msg = result[0]['message'] join_message = msg if msg != None and len(msg) != 0 else default_join message = await parser(self, ctx, join_message.replace('{servercount}', number_formating(len(server.members))).replace('{membercount}', number_formating(len(server.members))), ()) await self.bot.send_message(channel, message, replace_mentions=False, replace_everyone=False) except (discord.errors.Forbidden, discord.errors.NotFound, discord.errors.InvalidArgument): await self.remove(server, True) async def on_member_remove(self, member): try: server = member.server sql = 'SELECT * FROM `leave` WHERE server={0}' sql = sql.format(server.id) result = self.cursor.execute(sql).fetchall() if len(result) == 0: return channel = server.get_channel(str(result[0]['channel'])) if channel is None: await self.remove(server, False) ctx = Object() ctx.message = Object() ctx.message.author = member ctx.message.server = server ctx.message.channel = channel msg = result[0]['message'] leave_message = msg if msg != None and len(msg) != 0 else default_leave message = await parser(self, ctx, leave_message.replace('{servercount}', number_formating(len(server.members))).replace('{membercount}', number_formating(len(server.members))), ()) await self.bot.send_message(channel, message, replace_mentions=False, replace_everyone=False) except (discord.errors.Forbidden, discord.errors.NotFound, discord.errors.InvalidArgument): await self.remove(server, False) def setup(bot): bot.add_cog(JoinLeave(bot))
3,746
918
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gobblin.elasticsearch; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.HttpURLConnection; import java.net.URL; import java.nio.file.Files; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.gobblin.test.TestUtils; import org.testng.Assert; import com.google.common.base.Throwables; import javax.annotation.concurrent.NotThreadSafe; import lombok.extern.slf4j.Slf4j; /** * A Test ElasticSearch server */ @Slf4j @NotThreadSafe public class ElasticsearchTestServer { private static final String ELASTICSEARCH_VERSION="5.6.8"; private static final String TEST_ROOT_DIR="gobblin-modules/gobblin-elasticsearch/test-elasticsearch/"; // The clean elasticsearch instance is installed here private static final String BASE_ELASTICSEARCH_INSTALL =TEST_ROOT_DIR + "elasticsearch-" + ELASTICSEARCH_VERSION; // Per-test elasticsearch instances are installed under a different directory private static final String TEST_INSTALL_PREFIX =TEST_ROOT_DIR + "es-test-install-"; private static final String ELASTICSEARCH_BIN="/bin/elasticsearch"; private static final String ELASTICSEARCH_CONFIG_FILE= "/config/elasticsearch.yml"; private static final String ELASTICSEARCH_JVMOPTS_FILE="/config/jvm.options"; private final String _testId; private final int _tcpPort; private Process elasticProcess; private final int _httpPort; private String _pid = ManagementFactory.getRuntimeMXBean().getName(); private final String _testInstallDirectory; private AtomicBoolean _started = new AtomicBoolean(false); public ElasticsearchTestServer(String testId) throws IOException { this(testId, TestUtils.findFreePort(), TestUtils.findFreePort()); } private ElasticsearchTestServer(String testId, int httpPort, int tcpPort) throws IOException { _testId = testId; _httpPort = httpPort; _tcpPort = tcpPort; _testInstallDirectory = TEST_INSTALL_PREFIX + _testId; try { createInstallation(); } catch (Exception e) { throw new IOException("Failed to create a test installation of elasticsearch", e); } configure(); } public ElasticsearchTestServer() throws IOException { this(TestUtils.generateRandomAlphaString(25)); } private void createInstallation() throws IOException { File srcDir = new File(BASE_ELASTICSEARCH_INSTALL); if (!srcDir.exists()) { throw new IOException("Could not find base elasticsearch instance installed at " + srcDir.getAbsolutePath() + "\n" + "Run ./gradlew :gobblin-modules:gobblin-elasticsearch:installTestDependencies before running this test"); } File destDir = new File(_testInstallDirectory); log.debug("About to recreate directory : {}", destDir.getPath()); if (destDir.exists()) { org.apache.commons.io.FileUtils.deleteDirectory(destDir); } String[] commands = {"cp", "-r", srcDir.getAbsolutePath(), destDir.getAbsolutePath()}; try { log.debug("{}: Will run command: {}", this._pid, Arrays.toString(commands)); Process copyProcess = new ProcessBuilder().inheritIO().command(commands).start(); copyProcess.waitFor(); } catch (Exception e) { log.error("Failed to create installation directory at {}", destDir.getPath(), e); Throwables.propagate(e); } } private void configure() throws IOException { File configFile = new File(_testInstallDirectory + ELASTICSEARCH_CONFIG_FILE); FileOutputStream configFileStream = new FileOutputStream(configFile); try { configFileStream.write(("cluster.name: " + _testId + "\n").getBytes("UTF-8")); configFileStream.write(("http.port: " + _httpPort + "\n").getBytes("UTF-8")); configFileStream.write(("transport.tcp.port: " + _tcpPort + "\n").getBytes("UTF-8")); } finally { configFileStream.close(); } File jvmConfigFile = new File(_testInstallDirectory + ELASTICSEARCH_JVMOPTS_FILE); try (Stream<String> lines = Files.lines(jvmConfigFile.toPath())) { List<String> newLines = lines.map(line -> line.replaceAll("^\\s*(-Xm[s,x]).*$", "$1128m")) .collect(Collectors.toList()); Files.write(jvmConfigFile.toPath(), newLines); } } public void start(int maxStartupTimeSeconds) { if (_started.get()) { log.warn("ElasticSearch server has already been attempted to be started... returning without doing anything"); return; } _started.set(true); log.error("{}: Starting elasticsearch server on port {}", this._pid, this._httpPort); String[] commands = {_testInstallDirectory + ELASTICSEARCH_BIN}; try { log.error("{}: Will run command: {}", this._pid, Arrays.toString(commands)); elasticProcess = new ProcessBuilder().inheritIO().command(commands).start(); if (elasticProcess != null) { // register destroy of process on shutdown in-case of unclean test termination Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { if (elasticProcess!=null) { elasticProcess.destroy(); } } }); } } catch (Exception e) { log.error("Failed to start elasticsearch server", e); Throwables.propagate(e); } boolean isUp = false; int numTries = maxStartupTimeSeconds * 2; while (!isUp && numTries-- > 0) { try { Thread.sleep(500); // wait 1/2 second isUp = isUp(); } catch (Exception e) { } } Assert.assertTrue(isUp, "Server is not up!"); } public boolean isUp() { try { URL url = new URL("http://localhost:" + _httpPort + "/_cluster/health?wait_for_status=green"); long startTime = System.nanoTime(); HttpURLConnection httpURLConnection = (HttpURLConnection) url.openConnection(); int responseCode = httpURLConnection.getResponseCode(); log.info("Duration: {} seconds, Response code = {}", (System.nanoTime() - startTime) / 1000000000.0, responseCode); if (responseCode == 200) { return true; } else {return false;} } catch (Exception e) { Throwables.propagate(e); return false; } } public int getTransportPort() { return _tcpPort; } public int getHttpPort() { return _httpPort; } public void stop() { if (elasticProcess != null) { try { elasticProcess.destroy(); elasticProcess = null; // set to null to prevent redundant call to destroy on shutdown } catch (Exception e) { log.warn("Failed to stop the ElasticSearch server", e); } } } }
2,639
337
<gh_stars>100-1000 public class Foo { void foo() { // nothing } }
38
438
<filename>Upstream/Src/interrupts.c /** ****************************************************************************** * @file stm32f4xx_it.c * @date 03/02/2015 20:27:00 * @brief Interrupt Service Routines. ****************************************************************************** * * COPYRIGHT(c) 2015 STMicroelectronics * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** * * Modifications by <NAME> */ /* Includes ------------------------------------------------------------------*/ #include "interrupts.h" #include "upstream_spi.h" #include "stm32f4xx_hal.h" #include "board_config.h" #include "build_config.h" #include "led.h" #include "upstream_hid_botdetect.h" #include "upstream_statemachine.h" /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ /* External variables --------------------------------------------------------*/ extern PCD_HandleTypeDef hpcd_USB_OTG_FS; extern DMA_HandleTypeDef spiTxDmaHandle; extern DMA_HandleTypeDef spiRxDmaHandle; uint8_t BusFaultAllowed = 0; /******************************************************************************/ /* Cortex-M4 Processor Interruption and Exception Handlers */ /******************************************************************************/ void SysTick_Handler(void) { HAL_IncTick(); LED_Tick(); Upstream_StateMachine_PollDeviceConnected(); #if (defined (CONFIG_KEYBOARD_ENABLED) && defined (CONFIG_KEYBOARD_BOT_DETECT_ENABLED)) || \ (defined (CONFIG_MOUSE_ENABLED) && defined (CONFIG_MOUSE_BOT_DETECT_ENABLED)) Upstream_HID_BotDetect_Systick(); #endif } ///////////////////////// //All interrupts in this section must be at the same priority. //They interact with each other, and calls are not thread-safe //when different interrupt priorities are used. ///////////////////////// void OTG_FS_IRQHandler(void) { HAL_PCD_IRQHandler(&hpcd_USB_OTG_FS); } void DMA2_Stream2_IRQHandler(void) { HAL_DMA_IRQHandler(&spiRxDmaHandle); } void DMA2_Stream3_IRQHandler(void) { HAL_DMA_IRQHandler(&spiTxDmaHandle); } void EXTI3_IRQHandler(void) { __HAL_GPIO_EXTI_CLEAR_IT(DOWNSTREAM_TX_OK_PIN); Upstream_TxOkInterrupt(); } ///////////////////////// ///////////////////////// //This weird stuff is required when disabling flash writes. //The deliberate flash lockout will cause a bus fault that we need to process. void EnableOneBusFault(void) { //It should not be enabled already! if (BusFaultAllowed) { while (1); } SCB->SHCSR = SCB_SHCSR_BUSFAULTENA_Msk; BusFaultAllowed = 1; } void BusFault_Handler(void) { if (BusFaultAllowed) { BusFaultAllowed = 0; return; } while (1); } /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
1,549
16,259
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging from collections.abc import Iterable from itertools import repeat from typing import List, Optional, Tuple import torch from torch import Tensor # ------------------------------------------------------------------------------ # assert_equal() # ------------------------------------------------------------------------------ def assert_equal(value1, value2, name1=None, name2=None): """Asserts two values are equal otherwise raise an error.""" str_name1 = "" if name1 is None else "{} ".format(name1) str_name2 = "" if name2 is None else "{} ".format(name2) if value1 != value2: str_value1 = "{}" if name1 is None else "({})" str_value1 = str_value1.format(value1) str_value2 = "{}" if name2 is None else "({})" str_value2 = str_value2.format(value2) raise ValueError( "Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2) ) def fill_config(config, key, value): if value is not None: if key not in config or config[key] is None: config[key] = value assert_equal(value, config[key], "value", f'config["{key}"]') # ------------------------------------------------------------------------------ # check_and_return_expected() # ------------------------------------------------------------------------------ def check_and_return_expected(value, undefined_value, expected_value, name=None): """ Return the expected value while checking if the given value is undefined or equal to the expected value. """ if (undefined_value is None and value is None) or (undefined_value == value): return expected_value if value != expected_value: str_name = "" if name is None else "{} ".format(name) str_value = "{}" if name is None else "({})" str_value = str_value.format(value) raise ValueError( "Expected {}{} == {}".format(str_name, str_value, expected_value) ) return expected_value # ------------------------------------------------------------------------------ # get_time_axis() # ------------------------------------------------------------------------------ def get_time_axis(layout): """ Extract the time axis from the layout, for example for breaking sequence into segments. """ if layout in ["TB", "TBD"]: return 0 if layout in ["BT", "BTD"]: return 1 if layout in ["BCTD"]: return 2 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # get_batch_axis() # ------------------------------------------------------------------------------ def get_batch_axis(layout): """ Extract the batch axis from the layout """ if layout in ["TB", "TBD"]: return 1 if layout in ["BT", "BTD", "BCTD"]: return 0 raise ValueError("Unsupported layout = {}".format(layout)) # ------------------------------------------------------------------------------ # monotonically_increasing_and_bounded() # ------------------------------------------------------------------------------ def monotonically_increasing_and_bounded(iterable, min=None, max=None): """ Check if the elements in the given iterable are monotonically increasing and bounded by upper/lower bounds. """ if not isinstance(iterable, Iterable): raise TypeError( "Expected iterable to be of type Iterable, got ({})".format( iterable.__class__.__name__ ) ) for i in range(len(iterable)): if min is not None and iterable[i] < min: return False if max is not None and iterable[i] > max: return False if i > 0 and iterable[i] <= iterable[i - 1]: return False return True # ------------------------------------------------------------------------------ # to_pair() # ------------------------------------------------------------------------------ def to_pair(value, name): """Make a pair (of type tuple) of given value.""" if isinstance(value, Iterable): if len(value) != 2: raise ValueError( "Expected `{}` to have exactly 2 elements, got: ({})".format( name, value ) ) return value return tuple(repeat(value, 2)) # ------------------------------------------------------------------------------ # infer_conv_output_attrs() # ------------------------------------------------------------------------------ # TODO(cfyeh): figure out if we can get `output_dim` without calling the module. def infer_conv_output_attrs( module, input_channels, input_dim, batch_size=1, max_length=8 ): """Get output attributes of a module with input.""" input = torch.randn(batch_size, input_channels, max_length, input_dim) output = module(input) output_channels = output.shape[1] output_dim = output.shape[-1] return output_channels, output_dim # ------------------------------------------------------------------------------ # NoOp # ------------------------------------------------------------------------------ class NoOp(torch.nn.Module): """ NoOp simply passes the input as the output. """ def __init__(self): super().__init__() def forward(self, input: Tensor) -> Tensor: return input # ------------------------------------------------------------------------------ # Permute: a torch.nn.Module applies permutation on the input tensor. # ------------------------------------------------------------------------------ class Permute(torch.nn.Module): def __init__(self, dims): super().__init__() self.dims = dims def forward(self, input: Tensor) -> Tensor: return input.permute(self.dims).contiguous() # ------------------------------------------------------------------------------ # lengths_to_padding_mask() # ------------------------------------------------------------------------------ def lengths_to_padding_mask(lengths: Tensor) -> Tensor: """Convert lengths of shape (B, ) to padding mask.""" batch_size = lengths.shape[0] max_length = int(torch.max(lengths).item()) padding_mask = torch.arange( # [0, ..., T-1] max_length, device=lengths.device, dtype=lengths.dtype ).expand(batch_size, max_length) >= lengths.unsqueeze(1) return padding_mask # ------------------------------------------------------------------------------ # lengths_to_attention_mask() # ------------------------------------------------------------------------------ def lengths_to_attention_mask( lengths: Tensor, left_context: Optional[int] = None, right_context: Optional[int] = None, ) -> Optional[Tensor]: """ Generate attention mask based on (lengths, left_context, right_context). left_context is None means unlimited left context. right_context is None means unlimited right context. """ if left_context is None and right_context is None: return None max_length = int(torch.max(lengths).item()) # For example, with `max_length` == 5, # indices = tensor([ # [ 0, 1, 2, 3, 4, 5], # [-1, 0, 1, 2, 3, 4], # [-2, -1, 0, 1, 2, 3], # [-3, -2, -1, 0, 1, 2], # [-4, -3, -2, -1, 0, 1], # [-5, -4, -3, -2, -1, 0], # ]) # In some cases the second torch.arange is created on cpu which causes a # failure. Adding the device option to guard against it. indices = torch.arange( max_length, device=lengths.device, dtype=lengths.dtype ).expand(max_length, max_length) - torch.arange( max_length, device=lengths.device ).view( max_length, -1 ) # For example, with `max_length` == 5, # bool_mask = tensor([ # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # [True, True, True, True, True], # ]) bool_mask = ( torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length) ) # For example, with `max_length` == 5, left_context == 2 # left_mask = tensor([ # [ True, True, True, True, True], # [ True, True, True, True, True], # [ True, True, True, True, True], # [False, True, True, True, True], # [False, False, True, True, True], # ]) if left_context is not None: left_mask = indices >= -left_context bool_mask = bool_mask & left_mask # For example, with `max_length` == 5, right_context == 1 # right_mask = tensor([ # [True, True, False, False, False], # [True, True, True, False, False], # [True, True, True, True, False], # [True, True, True, True, True], # [True, True, True, True, True], # ]) if right_context is not None: right_mask = indices <= right_context bool_mask = bool_mask & right_mask bool_mask = (~bool_mask).to(device=lengths.device) return bool_mask # ------------------------------------------------------------------------------ # infer_output_norm() # ------------------------------------------------------------------------------ def infer_output_norm(module, output_norm=None): """ Infer the output norm (string and module) needed on the module gvien desired output normalization. """ if output_norm == module.output_norm(): # output_norm already matches module.output_norm(). return (None, NoOp()) if output_norm is None and module.output_norm() is not None: logger = logging.getLogger("infer_output_norm()") logger.warning( "trying to set output_norm ({}) ".format(output_norm) + "but got module.output_norm() ({}), ".format(module.output_norm()) + "the combined output_norm() will be ({})".format(module.output_norm()) ) return (None, NoOp()) if output_norm == "log_softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("log_softmax", torch.nn.LogSoftmax(dim=-1)) if output_norm == "softmax": if module.output_norm() is not None: raise ValueError( "incompatible output_norm ({}) ".format(output_norm) + "and module.output_norm() ({})".format(module.output_norm()) ) else: return ("softmax", torch.nn.Softmax(dim=-1)) raise ValueError( "output_norm ({}) not in ".format(output_norm) + "supported list = [None, softmax, log_softmax]" ) # ------------------------------------------------------------------------------ # infer_channels_from_layout() # ------------------------------------------------------------------------------ def infer_channels_from_layout(layout, channels): """Extract the number of channels from the layout.""" if layout in ("TBD", "BTD"): if channels is not None and channels != 1: raise ValueError( "Expected channels ({}) to be 1 for layout = {}".format( channels, layout ) ) if channels is None: return 1 return channels # ------------------------------------------------------------------------------ # pad_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def pad_sequence( sequence: Tensor, time_axis: int, extra_left_context: int = 0, extra_right_context: int = 0, ) -> Tensor: """Pad extra left/right contexts to the sequence.""" if extra_left_context == 0 and extra_right_context == 0: return sequence tensors_to_concat = [] if extra_left_context: size = (extra_left_context,) fill_value = 0 indices = torch.full( size=size, fill_value=fill_value, dtype=torch.long, device=sequence.device, ) left_padding = torch.index_select(sequence, time_axis, indices) tensors_to_concat.append(left_padding) tensors_to_concat.append(sequence) # NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for # extra right contexts. if extra_right_context: size = list(sequence.shape) size[time_axis] = extra_right_context right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device) tensors_to_concat.append(right_padding) padded_sequence = torch.cat(tensors_to_concat, dim=time_axis) return padded_sequence # ------------------------------------------------------------------------------ # sequence_to_segments() # ------------------------------------------------------------------------------ @torch.jit.export def sequence_to_segments( sequence: Tensor, time_axis: int, lengths: Tensor, segment_size: Optional[int] = None, extra_left_context: int = 0, extra_right_context: int = 0, ) -> List[Tuple[Tensor, Tensor]]: """Breaks sequence into segments.""" sequence = pad_sequence( sequence=sequence, time_axis=time_axis, extra_left_context=extra_left_context, extra_right_context=extra_right_context, ) lengths = lengths + extra_left_context + extra_right_context segments: List[Tuple[Tensor, Tensor]] = [] if segment_size is None: segments.append((sequence, lengths)) return segments offset = 0 end = sequence.shape[time_axis] step = segment_size size = extra_left_context + segment_size + extra_right_context while offset + extra_left_context + extra_right_context < end: clamped_size = min(size, end - offset) segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size) indices = torch.arange( start=offset, end=(offset + clamped_size), step=1, dtype=torch.long, device=sequence.device, ) segment_tensor = torch.index_select(sequence, time_axis, indices) segments.append((segment_tensor, segment_lengths)) offset = offset + step return segments # ------------------------------------------------------------------------------ # segments_to_sequence() # ------------------------------------------------------------------------------ @torch.jit.export def segments_to_sequence( segments: List[Tuple[Tensor, Tensor]], time_axis: int ) -> Tuple[Tensor, Tensor]: """Concatenate segments into a full sequence.""" if len(segments) == 1: return segments[0] tensors_to_concat: List[Tensor] = [] lengths_to_stack: List[Tensor] = [] for tensor, lengths in segments: tensors_to_concat.append(tensor) lengths_to_stack.append(lengths) sequence = torch.cat(tensors_to_concat, dim=time_axis) lengths = torch.stack(lengths_to_stack, dim=0) lengths = torch.sum(lengths, dim=0) return sequence, lengths def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False): """ convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor batch_first: whether to return a (B, T) tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = False for t < lengths[b] and True otherwise TODO: kernelize this function if benchmarking shows this function is slow """ max_lengths = torch.max(lengths).item() bsz = lengths.size(0) encoder_padding_mask = torch.arange( max_lengths ).to( # a (T, ) tensor with [0, ..., T-1] lengths.device ).view( # move to the right device 1, max_lengths ).expand( # reshape to (1, T)-shaped tensor bsz, -1 ) > lengths.view( # expand to (B, T)-shaped tensor bsz, 1 ).expand( -1, max_lengths ) if not batch_first: return encoder_padding_mask.t(), max_lengths else: return encoder_padding_mask, max_lengths # ------------------------------------------------------------------------------ # attention suppression # ------------------------------------------------------------------------------ def attention_suppression(attention_weights: Tensor, scale: float): # B, H, qlen, klen -> B, H, qlen, 1 attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1) attention_nozeros = attention_prob.to(torch.bool) nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True) # For very sparse situation, we need get round about 0s key_sum = torch.sum(attention_prob, dim=-1, keepdim=True) # nozeros_sum should > 1 key_mean = key_sum / (nozeros_sum + 1e-8) # std calculation dis = (attention_prob - key_mean) * (attention_prob - key_mean) # if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i dis_masked = torch.where( attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size()) ) key_var = torch.sum(dis_masked, dim=-1, keepdim=True) key_var = key_var / (nozeros_sum - 1.0 + 1e-8) key_std = torch.sqrt(key_var) key_thread = key_mean - scale * key_std # if attention_prob[i] >= key_thread, then attention_prob[i] # , otherwise "-inf" inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach() inf_tensor[:] = float("-inf") attention_weights_float = torch.where( attention_prob < key_thread, inf_tensor, attention_weights.float(), ) return attention_weights_float.type_as(attention_weights) def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value): return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
6,756
601
# Standard Libraries import json import logging # Django Imports from django.test import TestCase # Ghostwriter Libraries from ghostwriter.commandcenter.forms import ReportConfigurationForm from ghostwriter.factories import ( ReportConfigurationFactory, ReportDocxTemplateFactory, ReportPptxTemplateFactory, ) logging.disable(logging.CRITICAL) class ReportConfigurationFormTests(TestCase): """Collection of tests for :form:`commandcenter.ReportConfigurationForm`.""" @classmethod def setUpTestData(cls): cls.config = ReportConfigurationFactory() cls.docx_template = ReportDocxTemplateFactory() cls.pptx_template = ReportPptxTemplateFactory() def setUp(self): pass def form_data( self, enable_borders=None, border_weight=None, border_color=None, prefix_figure=None, label_figure=None, prefix_table=None, label_table=None, default_docx_template_id=None, default_pptx_template_id=None, **kwargs, ): return ReportConfigurationForm( data={ "enable_borders": enable_borders, "border_weight": border_weight, "border_color": border_color, "prefix_figure": prefix_figure, "label_figure": label_figure, "prefix_table": prefix_table, "label_table": label_table, "default_docx_template": default_docx_template_id, "default_pptx_template": default_pptx_template_id, }, ) def test_valid_data(self): form = self.form_data(**self.config.__dict__) self.assertTrue(form.is_valid()) def test_clean_default_docx_template(self): config = self.config.__dict__.copy() form = self.form_data(**config) self.assertTrue(form.is_valid()) self.docx_template.lint_result = json.dumps( {"result": "failed", "warnings": [], "errors": []} ) self.docx_template.save() config["default_docx_template_id"] = self.docx_template.pk form = self.form_data(**config) errors = form.errors["default_docx_template"].as_data() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].code, "invalid") def test_clean_default_pptx_template(self): config = self.config.__dict__.copy() form = self.form_data(**config) self.assertTrue(form.is_valid()) self.pptx_template.lint_result = json.dumps( {"result": "failed", "warnings": [], "errors": []} ) self.pptx_template.save() config["default_pptx_template_id"] = self.pptx_template.pk form = self.form_data(**config) errors = form.errors["default_pptx_template"].as_data() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].code, "invalid")
1,303
530
<filename>strongbox-storage/strongbox-storage-layout-providers/strongbox-storage-maven-layout/strongbox-storage-maven-layout-provider/src/main/java/org/carlspring/strongbox/artifact/archive/JarArchiveListingFunction.java package org.carlspring.strongbox.artifact.archive; import org.carlspring.strongbox.providers.io.RepositoryPath; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.Set; import org.apache.commons.compress.archivers.ArchiveInputStream; import org.apache.commons.compress.archivers.jar.JarArchiveInputStream; /** * @author <NAME> */ public enum JarArchiveListingFunction implements ArchiveListingFunction { INSTANCE; @Override public Set<String> listFilenames(final RepositoryPath path) throws IOException { try (InputStream is = Files.newInputStream(path); BufferedInputStream bis = new BufferedInputStream(is); ArchiveInputStream ais = new JarArchiveInputStream(bis)) { return getEntryNames(ais); } } @Override public boolean supports(final RepositoryPath path) { final Path fileName = path.getFileName(); if (fileName == null) { return false; } final String filenameString = fileName.toString(); return filenameString.endsWith("jar") || filenameString.endsWith("war") || filenameString.endsWith("ear") || filenameString.endsWith("zip"); } }
626
5,659
<filename>cms/test_utils/project/mti_pluginapp/cms_plugins.py<gh_stars>1000+ from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .models import TestPluginAlphaModel, TestPluginBetaModel class TestPluginAlpha(CMSPluginBase): model = TestPluginAlphaModel render_template = 'mti_pluginapp/alpha.html' name = 'test mti plugin alpha' def render(self, context, instance, placeholder): context['alpha'] = instance.alpha return context plugin_pool.register_plugin(TestPluginAlpha) class TestPluginBeta(CMSPluginBase): model = TestPluginBetaModel render_template = 'mti_pluginapp/beta.html' name = 'test mti plugin beta' def render(self, context, instance, placeholder): context['alpha'] = instance.alpha context['beta'] = instance.beta return context plugin_pool.register_plugin(TestPluginBeta)
304
1,338
<reponame>Kirishikesan/haiku /* * Copyright 2011, <NAME> <<EMAIL>> * Distributed under the terms of the MIT License. */ #ifndef _PACKAGE__HPKG__PRIVATE__REPOSITORY_READER_IMPL_H_ #define _PACKAGE__HPKG__PRIVATE__REPOSITORY_READER_IMPL_H_ #include <package/hpkg/ReaderImplBase.h> #include <package/RepositoryInfo.h> namespace BPackageKit { namespace BHPKG { class BRepositoryContentHandler; namespace BPrivate { class RepositoryReaderImpl : public ReaderImplBase { typedef ReaderImplBase inherited; public: RepositoryReaderImpl(BErrorOutput* errorOutput); ~RepositoryReaderImpl(); status_t Init(const char* fileName); status_t Init(int fd, bool keepFD); status_t Init(BPositionIO* file, bool keepFile); status_t GetRepositoryInfo( BRepositoryInfo* _repositoryInfo) const; status_t ParseContent( BRepositoryContentHandler* contentHandler); private: class PackagesAttributeHandler; class PackageContentHandlerAdapter; private: BRepositoryInfo fRepositoryInfo; }; } // namespace BPrivate } // namespace BHPKG } // namespace BPackageKit #endif // _PACKAGE__HPKG__PRIVATE__REPOSITORY_READER_IMPL_H_
466
364
<reponame>dfint/DwarfTherapistRus-30.1 /* Dwarf Therapist Copyright (c) 2009 <NAME> (chmod) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "viewcolumncolors.h" #include "viewcolumnset.h" #include "viewcolumnsetcolors.h" ViewColumnColors::ViewColumnColors(QObject *parent) : CellColors(parent) , m_set(0) { use_defaults(); } ViewColumnColors::ViewColumnColors(ViewColumnSet *set, QObject *parent) : CellColors(parent) , m_set(set) { use_defaults(); } ViewColumnColors::ViewColumnColors(QSettings &s, ViewColumnSet *set, QObject *parent) : CellColors(parent) , m_set(set) { use_defaults(); load_settings(s); } ViewColumnColors::~ViewColumnColors(){ m_set = 0; } void ViewColumnColors::use_defaults(){ //use the set's colors if(m_set){ m_color_defs.clear(); foreach(QSharedPointer<CellColorDef> c, m_set->get_colors()->get_color_defs()){ m_color_defs.append(c); } }else{ CellColors::use_defaults(); } } QColor ViewColumnColors::get_default_color(int idx) const{ if(m_set){ return m_set->get_colors()->get_color(idx); }else{ return CellColors::get_default_color(idx); } } QSharedPointer<CellColorDef> ViewColumnColors::get_default_color_def(int idx){ if(m_set){ return m_set->get_colors()->get_color_defs().at(idx); }else{ return CellColors::get_default_color_def(idx); } } void ViewColumnColors::read_settings(){ if(m_set){ inherit_colors(*m_set->get_colors()); } }
945
4,742
# Copyright 2020 The SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from runtime import db def get_train_random_forest_pai_cmd(model_name, data_table, model_attrs, feature_column_names, label_name): """Get a command to submit a KMeans training task to PAI Args: model_name: model name on PAI data_table: input data table name model_attrs: model attributes for KMeans feature_column_names: names of feature columns label_name: name of the label column Returns: A string which is a PAI cmd """ # default use numTrees = 1 tree_num = model_attrs.get("tree_num", 1) assert isinstance(tree_num, six.integer_types), \ "tree_num must be an integer" feature_cols = ",".join(feature_column_names) return '''pai -name randomforests -DinputTableName="%s" -DmodelName="%s" -DlabelColName="%s" -DfeatureColNames="%s" -DtreeNum="%d"''' % ( data_table, model_name, label_name, feature_cols, tree_num) def get_explain_random_forest_pai_cmd(datasource, model_name, data_table, result_table, label_column): """Get a command to submit a PAI RandomForest explain task Args: datasource: current datasoruce model_name: model name on PAI data_table: input data table name result_table: name of the result table, PAI will automatically create this table label_column: name of the label column Returns: A string which is a PAI cmd """ # NOTE(typhoonzero): for PAI random forests predicting, we can not load # the TrainStmt since the model saving is fully done by PAI. We directly # use the columns in SELECT statement for prediction, error will be # reported by PAI job if the columns not match. if not label_column: return ("must specify WITH label_column when using " "pai random forest to explain models") conn = db.connect_with_data_source(datasource) schema = db.get_table_schema(conn, data_table) columns = [f[0] for f in schema] conn.execute("DROP TABLE IF EXISTS %s;" % result_table) return ( """pai -name feature_importance -project algo_public """ """-DmodelName="%s" -DinputTableName="%s" -DoutputTableName="%s" """ """-DlabelColName="%s" -DfeatureColNames="%s" """ ) % (model_name, data_table, result_table, label_column, ",".join(columns))
1,117
2,039
<reponame>dileeshvar/nd4j package org.nd4j.linalg.profiler.data; import org.nd4j.linalg.api.ops.CustomOp; import org.nd4j.linalg.api.ops.Op; import org.nd4j.linalg.profiler.data.primitives.ComparableAtomicLong; import org.nd4j.linalg.profiler.data.primitives.TimeSet; import org.nd4j.linalg.util.ArrayUtil; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; /** * @author <EMAIL> */ public class StringAggregator { private Map<String, TimeSet> times = new ConcurrentHashMap<>(); private Map<String, ComparableAtomicLong> longCalls = new ConcurrentHashMap<>(); private static final long THRESHOLD = 100000; public StringAggregator() { } public void reset() { for (String key : times.keySet()) { // times.remove(key); times.put(key, new TimeSet()); } for (String key : longCalls.keySet()) { // longCalls.remove(key); longCalls.put(key, new ComparableAtomicLong(0)); } } public void putTime(String key, Op op, long timeSpent) { if (!times.containsKey(key)) times.put(key, new TimeSet()); times.get(key).addTime(timeSpent); if (timeSpent > THRESHOLD) { String keyExt = key + " " + op.opName() + " (" + op.opNum() + ")"; if (!longCalls.containsKey(keyExt)) longCalls.put(keyExt, new ComparableAtomicLong(0)); longCalls.get(keyExt).incrementAndGet(); } } public void putTime(String key, CustomOp op, long timeSpent) { if (!times.containsKey(key)) times.put(key, new TimeSet()); times.get(key).addTime(timeSpent); if (timeSpent > THRESHOLD) { String keyExt = key + " " + op.opName() + " (" + op.opHash() + ")"; if (!longCalls.containsKey(keyExt)) longCalls.put(keyExt, new ComparableAtomicLong(0)); longCalls.get(keyExt).incrementAndGet(); } } public void putTime(String key, long timeSpent) { if (!times.containsKey(key)) times.put(key, new TimeSet()); times.get(key).addTime(timeSpent); } protected long getMedian(String key) { return times.get(key).getMedian(); } protected long getAverage(String key) { return times.get(key).getAverage(); } protected long getMaximum(String key) { return times.get(key).getMaximum(); } protected long getMinimum(String key) { return times.get(key).getMinimum(); } protected long getSum(String key) { return times.get(key).getSum(); } public String asPercentageString() { StringBuilder builder = new StringBuilder(); Map<String, TimeSet> sortedTimes = ArrayUtil.sortMapByValue(times); AtomicLong sum = new AtomicLong(0); for (String key : sortedTimes.keySet()) { sum.addAndGet(getSum(key)); } long lSum = sum.get(); builder.append("Total time spent: ").append(lSum / 1000000).append(" ms.").append("\n"); for (String key : sortedTimes.keySet()) { long currentSum = getSum(key); float perc; if (lSum == 0) { perc = 0.0f; } else { perc = currentSum * 100.0f / sum.get(); } long sumMs = currentSum / 1000000; builder.append(key).append(" >>> ").append(" perc: ").append(perc).append(" ").append("Time spent: ") .append(sumMs).append(" ms"); builder.append("\n"); } return builder.toString(); } public String asString() { StringBuilder builder = new StringBuilder(); Map<String, TimeSet> sortedTimes = ArrayUtil.sortMapByValue(times); for (String key : sortedTimes.keySet()) { long currentMax = getMaximum(key); long currentMin = getMinimum(key); long currentAvg = getAverage(key); long currentMed = getMedian(key); builder.append(key).append(" >>> "); if (longCalls.size() == 0) builder.append(" ").append(sortedTimes.get(key).size()).append(" calls; "); builder.append("Min: ").append(currentMin).append(" ns; ").append("Max: ").append(currentMax) .append(" ns; ").append("Average: ").append(currentAvg).append(" ns; ").append("Median: ") .append(currentMed).append(" ns; "); builder.append("\n"); } builder.append("\n"); Map<String, ComparableAtomicLong> sortedCalls = ArrayUtil.sortMapByValue(longCalls); for (String key : sortedCalls.keySet()) { long numCalls = sortedCalls.get(key).get(); builder.append(key).append(" >>> ").append(numCalls); builder.append("\n"); } builder.append("\n"); return builder.toString(); } }
2,309
348
<filename>docs/data/leg-t2/064/06404538.json {"nom":"Uhart-Cize","circ":"4ème circonscription","dpt":"Pyrénées-Atlantiques","inscrits":589,"abs":284,"votants":305,"blancs":16,"nuls":37,"exp":252,"res":[{"nuance":"DVD","nom":"<NAME>","voix":135},{"nuance":"REM","nom":"<NAME>","voix":117}]}
119
7,353
/** * @file StreamBuffer.h * @author <NAME> <<EMAIL>> * * @section LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef BADVPN_STREAMBUFFER_H #define BADVPN_STREAMBUFFER_H #include <misc/debug.h> #include <base/DebugObject.h> #include <flow/StreamRecvInterface.h> #include <flow/StreamPassInterface.h> /** * Buffer object which reads data from a \link StreamRecvInterface and writes * it to a \link StreamPassInterface. */ typedef struct { int buf_size; StreamRecvInterface *input; StreamPassInterface *output; uint8_t *buf; int buf_start; int buf_used; DebugObject d_obj; } StreamBuffer; /** * Initializes the buffer object. * * @param o object to initialize * @param buf_size size of the buffer. Must be >0. * @param input input interface * @param outout output interface * @return 1 on success, 0 on failure */ int StreamBuffer_Init (StreamBuffer *o, int buf_size, StreamRecvInterface *input, StreamPassInterface *output) WARN_UNUSED; /** * Frees the buffer object. * * @param o object to free */ void StreamBuffer_Free (StreamBuffer *o); #endif
763
4,640
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Defines functions to analyze available opcodes in the ARM ISA.""" import tvm.target ARM_MPROFILE_DSP_SUPPORT_LIST = [ "cortex-m7", "cortex-m4", "cortex-m33", "cortex-m35p", "cortex-m55", ] class IsaAnalyzer(object): """Checks ISA support for given target""" def __init__(self, target): self.target = tvm.target.Target(target) @property def has_dsp_support(self): return self.target.mcpu is not None and self.target.mcpu in ARM_MPROFILE_DSP_SUPPORT_LIST
407
898
#include "PlayerSettings.h"
10
1,306
// Copyright © 2017 <NAME> // Copyright © 2017-2020 Trust Wallet. // // This file is part of Trust. The full Trust copyright notice, including // terms governing use, modification, and redistribution, is contained in the // file LICENSE at the root of the source code distribution tree. #include "SegwitAddress.h" #include "../Bech32.h" #include <TrezorCrypto/ecdsa.h> #include <TrustWalletCore/TWHRP.h> using namespace TW::Bitcoin; bool SegwitAddress::isValid(const std::string& string) { return std::get<2>(decode(string)); } bool SegwitAddress::isValid(const std::string& string, const std::string& hrp) { auto decoded = decode(string); if (!std::get<2>(decoded)) { return false; } // extra step to check hrp auto dec = Bech32::decode(string); if (std::get<0>(dec) != hrp) { return false; } return true; } SegwitAddress::SegwitAddress(const PublicKey& publicKey, int witver, std::string hrp) : hrp(std::move(hrp)), witnessVersion(witver), witnessProgram() { if (publicKey.type != TWPublicKeyTypeSECP256k1) { throw std::invalid_argument("SegwitAddress needs a compressed SECP256k1 public key."); } witnessProgram.resize(20); ecdsa_get_pubkeyhash(publicKey.compressed().bytes.data(), HASHER_SHA2_RIPEMD, witnessProgram.data()); } std::tuple<SegwitAddress, std::string, bool> SegwitAddress::decode(const std::string& addr) { auto resp = std::make_tuple(SegwitAddress(), "", false); auto dec = Bech32::decode(addr); auto& hrp = std::get<0>(dec); auto& data = std::get<1>(dec); auto& variant = std::get<2>(dec); if (data.empty()) { // bech32 decode fails, or decoded data is empty return resp; } assert(data.size() >= 1); // First byte is Segwit version auto segwitVersion = data[0]; if (segwitVersion == 0) { // v0 uses Bech32 (not M) if (variant != Bech32::ChecksumVariant::Bech32) { return resp; } } else { // segwitVersion >= 1 // v1 uses Bech32M, BIP350 if (variant != Bech32::ChecksumVariant::Bech32M) { return resp; } } auto raw = fromRaw(hrp, data); return std::make_tuple(raw.first, hrp, raw.second); } std::string SegwitAddress::string() const { Data enc; enc.push_back(static_cast<uint8_t>(witnessVersion)); Bech32::convertBits<8, 5, true>(enc, witnessProgram); Bech32::ChecksumVariant variant = Bech32::ChecksumVariant::Bech32; if (witnessVersion== 0) { variant = Bech32::ChecksumVariant::Bech32; } else if (witnessVersion >= 1) { variant = Bech32::ChecksumVariant::Bech32M; } std::string result = Bech32::encode(hrp, enc, variant); if (!std::get<2>(decode(result))) { return {}; } return result; } std::pair<SegwitAddress, bool> SegwitAddress::fromRaw(const std::string& hrp, const Data& data) { auto resp = std::make_pair(SegwitAddress(), false); if (data.size() == 0) { return resp; } byte segwitVersion = data[0]; Data conv; if (!Bech32::convertBits<5, 8, false>(conv, Data(data.begin() + 1, data.end())) || conv.size() < 2 || conv.size() > 40 || segwitVersion > 16 || (segwitVersion == 0 && conv.size() != 20 && conv.size() != 32)) { return resp; } return std::make_pair(SegwitAddress(hrp, data[0], conv), true); }
1,404
1,561
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package secretmanager; // [START secretmanager_quickstart] import com.google.cloud.secretmanager.v1.AccessSecretVersionResponse; import com.google.cloud.secretmanager.v1.ProjectName; import com.google.cloud.secretmanager.v1.Replication; import com.google.cloud.secretmanager.v1.Secret; import com.google.cloud.secretmanager.v1.SecretManagerServiceClient; import com.google.cloud.secretmanager.v1.SecretPayload; import com.google.cloud.secretmanager.v1.SecretVersion; import com.google.protobuf.ByteString; public class Quickstart { public void quickstart() throws Exception { // TODO(developer): Replace these variables before running the sample. String projectId = "your-project-id"; String secretId = "your-secret-id"; quickstart(projectId, secretId); } public void quickstart(String projectId, String secretId) throws Exception { // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. After completing all of your requests, call // the "close" method on the client to safely clean up any remaining background resources. try (SecretManagerServiceClient client = SecretManagerServiceClient.create()) { // Build the parent name from the project. ProjectName projectName = ProjectName.of(projectId); // Create the parent secret. Secret secret = Secret.newBuilder() .setReplication( Replication.newBuilder() .setAutomatic(Replication.Automatic.newBuilder().build()) .build()) .build(); Secret createdSecret = client.createSecret(projectName, secretId, secret); // Add a secret version. SecretPayload payload = SecretPayload.newBuilder().setData(ByteString.copyFromUtf8("hello world!")).build(); SecretVersion addedVersion = client.addSecretVersion(createdSecret.getName(), payload); // Access the secret version. AccessSecretVersionResponse response = client.accessSecretVersion(addedVersion.getName()); // Print the secret payload. // // WARNING: Do not print the secret in a production environment - this // snippet is showing how to access the secret material. String data = response.getPayload().getData().toStringUtf8(); System.out.printf("Plaintext: %s\n", data); } } } // [END secretmanager_quickstart]
948
590
import os import torch import random import networkx as nx import pandas as pd import numpy as np from torch.utils import data from torch_geometric.data import Data from torch_geometric.data import InMemoryDataset from torch_geometric.data import Batch from itertools import repeat, product, chain from collections import Counter, deque from networkx.algorithms.traversal.breadth_first_search import generic_bfs_edges def nx_to_graph_data_obj(g, center_id, allowable_features_downstream=None, allowable_features_pretrain=None, node_id_to_go_labels=None): """ Converts nx graph of PPI to pytorch geometric Data object. :param g: nx graph object of ego graph :param center_id: node id of center node in the ego graph :param allowable_features_downstream: list of possible go function node features for the downstream task. The resulting go_target_downstream node feature vector will be in this order. :param allowable_features_pretrain: list of possible go function node features for the pretraining task. The resulting go_target_pretrain node feature vector will be in this order. :param node_id_to_go_labels: dict that maps node id to a list of its corresponding go labels :return: pytorch geometric Data object with the following attributes: edge_attr edge_index x species_id center_node_idx go_target_downstream (only if node_id_to_go_labels is not None) go_target_pretrain (only if node_id_to_go_labels is not None) """ n_nodes = g.number_of_nodes() n_edges = g.number_of_edges() # nodes nx_node_ids = [n_i for n_i in g.nodes()] # contains list of nx node ids # in a particular ordering. Will be used as a mapping to convert # between nx node ids and data obj node indices x = torch.tensor(np.ones(n_nodes).reshape(-1, 1), dtype=torch.float) # we don't have any node labels, so set to dummy 1. dim n_nodes x 1 center_node_idx = nx_node_ids.index(center_id) center_node_idx = torch.tensor([center_node_idx], dtype=torch.long) # edges edges_list = [] edge_features_list = [] for node_1, node_2, attr_dict in g.edges(data=True): edge_feature = [attr_dict['w1'], attr_dict['w2'], attr_dict['w3'], attr_dict['w4'], attr_dict['w5'], attr_dict['w6'], attr_dict['w7'], 0, 0] # last 2 indicate self-loop # and masking edge_feature = np.array(edge_feature, dtype=int) # convert nx node ids to data obj node index i = nx_node_ids.index(node_1) j = nx_node_ids.index(node_2) edges_list.append((i, j)) edge_features_list.append(edge_feature) edges_list.append((j, i)) edge_features_list.append(edge_feature) # data.edge_index: Graph connectivity in COO format with shape [2, num_edges] edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long) # data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features] edge_attr = torch.tensor(np.array(edge_features_list), dtype=torch.float) try: species_id = int(nx_node_ids[0].split('.')[0]) # nx node id is of the form: # species_id.protein_id species_id = torch.tensor([species_id], dtype=torch.long) except: # occurs when nx node id has no species id info. For the extract # substructure context pair transform, where we convert a data obj to # a nx graph obj (which does not have original node id info) species_id = torch.tensor([0], dtype=torch.long) # dummy species # id is 0 # construct data obj data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) data.species_id = species_id data.center_node_idx = center_node_idx if node_id_to_go_labels: # supervised case with go node labels # Construct a dim n_pretrain_go_classes tensor and a # n_downstream_go_classes tensor for the center node. 0 is no data # or negative, 1 is positive. downstream_go_node_feature = [0] * len(allowable_features_downstream) pretrain_go_node_feature = [0] * len(allowable_features_pretrain) if center_id in node_id_to_go_labels: go_labels = node_id_to_go_labels[center_id] # get indices of allowable_features_downstream that match with elements # in go_labels _, node_feature_indices, _ = np.intersect1d( allowable_features_downstream, go_labels, return_indices=True) for idx in node_feature_indices: downstream_go_node_feature[idx] = 1 # get indices of allowable_features_pretrain that match with # elements in go_labels _, node_feature_indices, _ = np.intersect1d( allowable_features_pretrain, go_labels, return_indices=True) for idx in node_feature_indices: pretrain_go_node_feature[idx] = 1 data.go_target_downstream = torch.tensor(np.array(downstream_go_node_feature), dtype=torch.long) data.go_target_pretrain = torch.tensor(np.array(pretrain_go_node_feature), dtype=torch.long) return data def graph_data_obj_to_nx(data): """ Converts pytorch geometric Data obj to network x data object. :param data: pytorch geometric Data object :return: nx graph object """ G = nx.Graph() # edges edge_index = data.edge_index.cpu().numpy() edge_attr = data.edge_attr.cpu().numpy() n_edges = edge_index.shape[1] for j in range(0, n_edges, 2): begin_idx = int(edge_index[0, j]) end_idx = int(edge_index[1, j]) w1, w2, w3, w4, w5, w6, w7, _, _ = edge_attr[j].astype(bool) if not G.has_edge(begin_idx, end_idx): G.add_edge(begin_idx, end_idx, w1=w1, w2=w2, w3=w3, w4=w4, w5=w5, w6=w6, w7=w7) # # add center node id information in final nx graph object # nx.set_node_attributes(G, {data.center_node_idx.item(): True}, 'is_centre') return G class BioDataset(InMemoryDataset): def __init__(self, root, data_type, empty=False, transform=None, pre_transform=None, pre_filter=None): """ Adapted from qm9.py. Disabled the download functionality :param root: the data directory that contains a raw and processed dir :param data_type: either supervised or unsupervised :param empty: if True, then will not load any data obj. For initializing empty dataset :param transform: :param pre_transform: :param pre_filter: """ self.root = root self.data_type = data_type super(BioDataset, self).__init__(root, transform, pre_transform, pre_filter) if not empty: self.data, self.slices = torch.load(self.processed_paths[0]) @property def raw_file_names(self): #raise NotImplementedError('Data is assumed to be processed') if self.data_type == 'supervised': # 8 labelled species file_name_list = ['3702', '6239', '511145', '7227', '9606', '10090', '4932', '7955'] else: # unsupervised: 8 labelled species, and 42 top unlabelled species by n_nodes. file_name_list = ['3702', '6239', '511145', '7227', '9606', '10090', '4932', '7955', '3694', '39947', '10116', '443255', '9913', '13616', '3847', '4577', '8364', '9823', '9615', '9544', '9796', '3055', '7159', '9031', '7739', '395019', '88036', '9685', '9258', '9598', '485913', '44689', '9593', '7897', '31033', '749414', '59729', '536227', '4081', '8090', '9601', '749927', '13735', '448385', '457427', '3711', '479433', '479432', '28377', '9646'] return file_name_list @property def processed_file_names(self): return 'geometric_data_processed.pt' def download(self): raise NotImplementedError('Must indicate valid location of raw data. ' 'No download allowed') def process(self): raise NotImplementedError('Data is assumed to be processed') if __name__ == "__main__": root_supervised = 'dataset/supervised' d_supervised = BioDataset(root_supervised, data_type='supervised') print(d_supervised) root_unsupervised = 'dataset/unsupervised' d_unsupervised = BioDataset(root_unsupervised, data_type='unsupervised') print(d_unsupervised)
3,826
990
''' Given a list of airline tickets represented by pairs of departure and arrival airports [from, to], reconstruct the itinerary in order. All of the tickets belong to a man who departs from JFK. Thus, the itinerary must begin with JFK. Note: If there are multiple valid itineraries, you should return the itinerary that has the smallest lexical order when read as a single string. For example, the itinerary ["JFK", "LGA"] has a smaller lexical order than ["JFK", "LGB"]. All airports are represented by three capital letters (IATA code). You may assume all tickets form at least one valid itinerary. Example 1: Input: tickets = [["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]] Output: ["JFK", "MUC", "LHR", "SFO", "SJC"] ''' from collections import defaultdict class Solution(object): def findItinerary(self, tickets): """ :type tickets: List[List[str]] :rtype: List[str] """ n = len(tickets) trips = defaultdict(list) for x in tickets: trips[x[0]].append(x[1]) for x in trips: trips[x].sort() iter = ["JFK"] def dfs(curr_iter): if len(curr_iter) == n+1: return curr_iter curr_stop = curr_iter[-1] if trips[curr_stop] == []: return None next_stops = trips[curr_stop] i = 0 for stop in next_stops: curr_iter.append(stop) del trips[curr_stop][i] if dfs(curr_iter): return curr_iter curr_iter.pop() trips[curr_stop].insert(i, stop) i += 1 return None return dfs(iter)
883
5,169
{ "name": "MobiBurn", "version": "1.0.6", "summary": "MobiBurn provides you more efficient way to integration for iOS.", "description": "TODO: Add long description of the pod here.", "homepage": "http://www.ihardiktrivedi.com", "authors": { "<NAME>": "<EMAIL>" }, "social_media_url": "http://twitter.com/iHardikTrivedi", "platforms": { "ios": "8.0" }, "source": { "git": "https://[email protected]/iHardikTrivedi/mobiburn_framework.git" }, "source_files": [ "MobiBurn", "MobiBurn/*.h" ], "frameworks": [ "SystemConfiguration", "MobileCoreServices", "CoreTelephony", "Security", "AdSupport" ] }
279
2,863
<reponame>bTest2018/spock /* * Copyright 2013 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.spockframework.util; import java.io.File; import java.net.URI; import java.net.URISyntaxException; public class ConsoleUtil { public static String asClickableFileUrl(File path) { try { // File.toURI().toURL().toString() isn't clickable in Mac Terminal return new URI("file", "", path.toURI().getPath(), null, null).toString(); } catch (URISyntaxException e) { throw new InternalSpockError(e); } } }
330
392
/* * Sym-edge. * * Sym-edges are quad-edges restricted to the primary oriented graph. Their * implementation does not maintain the data structures for accessing the * rotated, flipped, or dual graphs. Only the symmetric counterpart of each * edge is stored. * * Each vertex has an associated data item and each directed edge may * optionally have an associated data item. */ #ifndef MATH__GEOMETRY__SYM_EDGE_HH #define MATH__GEOMETRY__SYM_EDGE_HH #include "lang/exceptions/ex_not_found.hh" #include "lang/pointers/auto_ptr.hh" #include "math/geometry/point_2D.hh" namespace math { namespace geometry { /* * Imports. */ using lang::exceptions::ex_not_found; using lang::pointers::auto_ptr; /* * Sym-edge. * V is the type of data associated with each vertex. * E is the (optional) type of data associated with each directed edge. */ template <typename V = point_2D, typename E = void*> class sym_edge { public: /* * Constructors. * Optionally specify (undirected or directed) edge data. */ explicit sym_edge(V& /* origin */, V& /* destination */); explicit sym_edge(V& /* origin */, V& /* destination */, E& /* data */); explicit sym_edge(V& /* origin */, V& /* destination */, E&, E& /* data */); /* * Destructor. * Disconnect the edge from the edge structure and delete it. */ virtual ~sym_edge(); /* * Get/set origin. */ V& origin() const; void origin(V&); /* * Get/set destination. */ V& destination() const; void destination(V&); /* * Check whether the edge has associated data. */ bool has_data() const; /* * Get edge data. * Return the data associated with the edge. * Throw an ex_not_found exception if there is no associated data. */ E& data() const; /* * Set edge data. * Associate the given data item with the edge. */ void data(E&); /* * Return symmetric edge (with same orientation and opposite direction). */ sym_edge<V,E>& sym(); const sym_edge<V,E>& sym() const; /* * Return the previous edge with the same origin (move clockwise). */ sym_edge<V,E>& origin_prev(); const sym_edge<V,E>& origin_prev() const; /* * Return the next edge with the same origin (move counterclockwise). */ sym_edge<V,E>& origin_next(); const sym_edge<V,E>& origin_next() const; /* * Return the previous edge with the same destination. */ sym_edge<V,E>& dest_prev(); const sym_edge<V,E>& dest_prev() const; /* * Return the next edge with the same destination. */ sym_edge<V,E>& dest_next(); const sym_edge<V,E>& dest_next() const; /* * Return the previous edge on the left face. */ sym_edge<V,E>& left_prev(); const sym_edge<V,E>& left_prev() const; /* * Return the next edge on the left face. */ sym_edge<V,E>& left_next(); const sym_edge<V,E>& left_next() const; /* * Return the previous edge on the right face. */ sym_edge<V,E>& right_prev(); const sym_edge<V,E>& right_prev() const; /* * Return the next edge on the right face. */ sym_edge<V,E>& right_next(); const sym_edge<V,E>& right_next() const; /* * Swap the edge for the diagonal edge formed by sliding its endpoints * along their immediately previous edges. Return a reference to the edge. */ sym_edge<V,E>& swap(); /* * Disconnect the edge from the edge structure. * Return a reference to the edge. */ sym_edge<V,E>& disconnect(); /* * Reconnect an edge that has been disconnected. Set it to connect the two * given edges (from the destination of the first edge to the origin of the * second). Return a reference to the edge. */ sym_edge<V,E>& reconnect(sym_edge<V,E>&, sym_edge<V,E>&); /* * Create an edge connecting the two given edges (from the destination of * the first edge to the origin of the second). Return the new edge. */ static auto_ptr< sym_edge<V,E> > connect( sym_edge<V,E>&, sym_edge<V,E>& ); /* * Create an edge connecting the two given edges and set its edge data. * Return the new edge. */ static auto_ptr< sym_edge<V,E> > connect( sym_edge<V,E>&, sym_edge<V,E>&, E& ); /* * Create an edge connecting the two given edges and set its edge data. * Return the new edge. */ static auto_ptr< sym_edge<V,E> > connect( sym_edge<V,E>&, sym_edge<V,E>&, E&, E& ); /* * Splice two edges. */ static void splice(sym_edge<V,E>&, sym_edge<V,E>&); protected: /* * Protected constructor. * Create and return an empty sym-edge. */ sym_edge(); /* * Protected copy constructor. * Sym-edges should only be copied internally. */ sym_edge(const sym_edge<V,E>&); /* * Sym-edge data. */ V* _v_data; /* vertex data */ E* _e_data; /* edge data (optional) */ sym_edge<V,E>* _sym; /* same edge with opposite direction */ sym_edge<V,E>* _oprev; /* previous edge with the same origin */ sym_edge<V,E>* _onext; /* next edge with the same origin */ }; /*************************************************************************** * Constructors and destructor. ***************************************************************************/ /* * Protected constructor. * Create and return an empty sym-edge. */ template <typename V, typename E> sym_edge<V,E>::sym_edge() : _v_data(NULL), _e_data(NULL), _sym(NULL), _oprev(this), _onext(this) { } /* * Constructor. * Create a sym-edge with the given origin and destination. */ template <typename V, typename E> sym_edge<V,E>::sym_edge(V& v_origin, V& v_dest) : _v_data(&v_origin), _e_data(NULL), _sym(NULL), _oprev(this), _onext(this) { _sym = new sym_edge<V,E>(); _sym->_v_data = &v_dest; _sym->_sym = this; } /* * Constructor. * Create a sym-edge with the given origin and destination. * In addition, associate the given data item with the undirected edge. */ template <typename V, typename E> sym_edge<V,E>::sym_edge(V& v_origin, V& v_dest, E& e_data) : _v_data(&v_origin), _e_data(&e_data), _sym(NULL), _oprev(this), _onext(this) { _sym = new sym_edge<V,E>(); _sym->_v_data = &v_dest; _sym->_e_data = &e_data; _sym->_sym = this; } /* * Constructor. * Create a sym-edge with the given origin and destination. * In addition, associate the given data items with the directed edges. */ template <typename V, typename E> sym_edge<V,E>::sym_edge(V& v_origin, V& v_dest, E& e_data, E& e_sym_data) : _v_data(&v_origin), _e_data(&e_data), _sym(NULL), _oprev(this), _onext(this) { _sym = new sym_edge<V,E>(); _sym->_v_data = &v_dest; _sym->_e_data = &e_sym_data; _sym->_sym = this; } /* * Protected copy constructor. * Sym-edges should only be copied internally. */ template <typename V, typename E> sym_edge<V,E>::sym_edge(const sym_edge<V,E>& e) : _v_data(e._vdata), _e_data(e._e_data), _sym(e._sym), _oprev(e._oprev), _onext(e._onext) { /* unlink original edge */ e._sym = NULL; e._oprev = &e; e._onext = &e; /* link symmetric edge */ _sym->_sym = this; /* check previous and next edges for self-reference */ if (_oprev == &e) { _oprev = this; } if (_onext == &e) { _onext = this; } /* link previous and next edges */ _oprev->_onext = this; _onext->_oprev = this; } /* * Destructor. * Disconnect the edge from the edge structure and delete it. */ template <typename V, typename E> sym_edge<V,E>::~sym_edge() { /* disconnect edge form edge structure */ sym_edge<V,E>::splice(*this, *_oprev); /* delete symmetric edge */ if (_sym != NULL) { _sym->_sym = NULL; delete _sym; } } /*************************************************************************** * Vertex and edge data. ***************************************************************************/ /* * Get origin. */ template <typename V, typename E> V& sym_edge<V,E>::origin() const { return *_v_data; } /* * Set origin. */ template <typename V, typename E> void sym_edge<V,E>::origin(V& v) { _v_data = &v; } /* * Get destination. */ template <typename V, typename E> V& sym_edge<V,E>::destination() const { return *(_sym->_v_data); } /* * Set destination. */ template <typename V, typename E> void sym_edge<V,E>::destination(V& v) { _sym->_v_data = &v; } /* * Check whether the edge has associated data. */ template <typename V, typename E> bool sym_edge<V,E>::has_data() const { return (_e_data != NULL); } /* * Get edge data. * Return the data associated with the edge. * Throw an ex_not_found exception if there is no associated data. */ template <typename V, typename E> E& sym_edge<V,E>::data() const { if (_e_data == NULL) throw ex_not_found("no data associated with sym_edge"); else return *_e_data; } /* * Set edge data. * Associate the given data item with the edge. */ template <typename V, typename E> void sym_edge<V,E>::data(E& e) { _e_data = &e; } /*************************************************************************** * Edge counterparts. ***************************************************************************/ /* * Return edge with same orientation and opposite direction. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::sym() { return *_sym; } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::sym() const { return *_sym; } /* * Return the previous edge with the same origin (move clockwise). */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::origin_prev() { return *_oprev; } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::origin_prev() const { return *_oprev; } /* * Return the next edge with the same origin (move counterclockwise). */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::origin_next() { return *_onext; } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::origin_next() const { return *_onext; } /* * Return the previous edge with the same destination. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::dest_prev() { return *(_sym->_oprev->_sym); } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::dest_prev() const { return *(_sym->_oprev->_sym); } /* * Return the next edge with the same destination. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::dest_next() { return *(_sym->_onext->_sym); } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::dest_next() const { return *(_sym->_onext->_sym); } /* * Return the previous edge on the left face. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::left_prev() { return *(_onext->_sym); } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::left_prev() const { return *(_onext->_sym); } /* * Return the next edge on the left face. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::left_next() { return *(_sym->_oprev); } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::left_next() const { return *(_sym->_oprev); } /* * Return the previous edge on the right face. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::right_prev() { return *(_sym->_onext); } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::right_prev() const { return *(_sym->_onext); } /* * Return the next edge on the right face. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::right_next() { return *(_oprev->_sym); } template <typename V, typename E> const sym_edge<V,E>& sym_edge<V,E>::right_next() const { return *(_oprev->_sym); } /*************************************************************************** * Edge operations. ***************************************************************************/ /* * Swap the edge for the diagonal edge formed by sliding its endpoints * along their immediately previous edges. Return a reference to the edge. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::swap() { sym_edge<V,E>& a(*(_oprev)); sym_edge<V,E>& b(*(_sym->_oprev)); sym_edge<V,E>::splice(*this, a); sym_edge<V,E>::splice(*_sym, b); sym_edge<V,E>::splice(*this, a.left_next()); sym_edge<V,E>::splice(*_sym, b.left_next()); _v_data = &(a.destination()); _sym->_v_data = &(b.destination()); return *this; } /* * Disconnect the edge from the edge structure. * Return a reference to the edge. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::disconnect() { sym_edge<V,E>::splice(*this, *_oprev); sym_edge<V,E>::splice(*_sym, *(_sym->_oprev)); return *this; } /* * Reconnect an edge that has been disconnected. Set it to connect the two * given edges (from the destination of the first edge to the origin of the * second). Return a reference to the edge. */ template <typename V, typename E> sym_edge<V,E>& sym_edge<V,E>::reconnect(sym_edge<V,E>& a, sym_edge<V,E>& b) { /* set edge origin and destination */ this->origin(a.destination()); this->destination(b.origin()); /* set edge to connect a to b */ sym_edge<V,E>::splice(*this, a.left_next()); sym_edge<V,E>::splice(this->sym(), b); return *this; } /* * Create an edge connecting the two given edges (from the destination of * the first edge to the origin of the second). Return the new edge. */ template <typename V, typename E> auto_ptr< sym_edge<V,E> > sym_edge<V,E>::connect( sym_edge<V,E>& a, sym_edge<V,E>& b) { auto_ptr< sym_edge<V,E> > e(new sym_edge<V,E>(a.destination(), b.origin())); sym_edge<V,E>::splice(*e, a.left_next()); sym_edge<V,E>::splice(e->sym(), b); return e; } /* * Create an edge connecting the two given edges and set its edge data. * Return the new edge. */ template <typename V, typename E> auto_ptr< sym_edge<V,E> > sym_edge<V,E>::connect( sym_edge<V,E>& a, sym_edge<V,E>& b, E& e_data) { auto_ptr< sym_edge<V,E> > e = sym_edge<V,E>::connect(a, b); e->data(e_data); return e; } /* * Create an edge connecting the two given edges and set its edge data. * Return the new edge. */ template <typename V, typename E> auto_ptr< sym_edge<V,E> > sym_edge<V,E>::connect( sym_edge<V,E>& a, sym_edge<V,E>& b, E& e_data, E& e_sym_data) { auto_ptr< sym_edge<V,E> > e = sym_edge<V,E>::connect(a, b); e->data(e_data); e->_sym->data(e_sym_data); return e; } /* * Splice two edges. */ template <typename V, typename E> void sym_edge<V,E>::splice(sym_edge<V,E>& a, sym_edge<V,E>& b) { sym_edge<V,E>* temp = a._onext; a._onext = b._onext; b._onext->_oprev = &a; b._onext = temp; temp->_oprev = &b; } } /* namespace geometry */ } /* namespace math */ #endif
5,874
3,428
<filename>lib/node_modules/@stdlib/datasets/spam-assassin/data/easy-ham-1/00011.fbcde1b4833bdbaaf0ced723edd6e355.json {"id":"00011","group":"easy-ham-1","checksum":{"type":"MD5","value":"fbcde1b4833bdbaaf0ced723edd6e355"},"text":"From <EMAIL> Thu Aug 22 15:25:29 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: zzz<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id AE2D043F9B\n\tfor <zzzz@localhost>; Thu, 22 Aug 2002 10:25:29 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor zzzz@localhost (single-drop); Thu, 22 Aug 2002 15:25:29 +0100 (IST)\nReceived: from usw-sf-list2.sourceforge.net (usw-sf-fw2.sourceforge.net\n [192.168.127.12]) by dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id\n g7MENlZ09984 for <<EMAIL>>; Thu, 22 Aug 2002 15:23:47 +0100\nReceived: from usw-sf-list1-b.sourceforge.net ([10.3.1.13]\n helo=usw-sf-list1.sourceforge.net) by usw-sf-list2.sourceforge.net with\n esmtp (Exim 3.31-VA-mm2 #1 (Debian)) id 17hsof-00042r-00; Thu,\n 22 Aug 2002 07:20:05 -0700\nReceived: from vivi.uptime.at ([192.168.3.11] helo=mail.uptime.at) by\n usw-sf-list1.sourceforge.net with esmtp (Exim 3.31-VA-mm2 #1 (Debian)) id\n 17hsoM-0000Ge-00 for <<EMAIL>>;\n Thu, 22 Aug 2002 07:19:47 -0700\nReceived: from [192.168.0.4] (chello062178142216.4.14.vie.surfer.at\n [172.16.17.32]) (authenticated bits=0) by mail.uptime.at (8.12.5/8.12.5)\n with ESMTP id g7MEI7Vp022036 for\n <<EMAIL>>; Thu, 22 Aug 2002 16:18:07\n +0200\nUser-Agent: Microsoft-Entourage/10.0.0.1309\nFrom: David H=?ISO-8859-1?B?9g==?=hn <<EMAIL>>\nTo: <<EMAIL>>\nMessage-Id: <B98ABFA4.1F87%<EMAIL>>\nMIME-Version: 1.0\nX-Trusted: YES\nX-From-Laptop: YES\nContent-Type: text/plain; charset=\"US-ASCII\"\nContent-Transfer-Encoding: 7bit\nX-Mailscanner: Nothing found, baby\nSubject: [SAdev] Interesting approach to Spam handling..\nSender: <EMAIL>.sourceforge.net\nErrors-To: <EMAIL>\nX-Beenthere: <EMAIL>\nX-Mailman-Version: 2.0.9-sf.net\nPrecedence: bulk\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <https://example.sourceforge.net/lists/listinfo/spamassassin-devel>,\n <mailto:<EMAIL>?subject=subscribe>\nList-Id: SpamAssassin Developers <spamassassin-devel.example.sourceforge.net>\nList-Unsubscribe: <https://example.sourceforge.net/lists/listinfo/spamassassin-devel>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://www.geocrawler.com/redir-sf.php3?list=spamassassin-devel>\nX-Original-Date: Thu, 22 Aug 2002 16:19:48 +0200\nDate: Thu, 22 Aug 2002 16:19:48 +0200\n\nHello, have you seen and discussed this article and his approach?\n\nThank you\n\nhttp://www.paulgraham.com/spam.html\n-- \"Hell, there are no rules here-- we're trying to accomplish something.\"\n-- <NAME>\n\n\n\n\n-------------------------------------------------------\nThis sf.net email is sponsored by: OSDN - Tired of that same old\ncell phone? Get a new here for FREE!\nhttps://www.inphonic.com/r.asp?r=sourceforge1&refcode1=vs3390\n_______________________________________________\nSpamassassin-devel mailing list\n<EMAIL>.sourceforge.net\nhttps://lists.sourceforge.net/lists/listinfo/spamassassin-devel\n\n"}
1,389
1,337
/* * Copyright (c) 2008-2017 Haulmont. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.haulmont.cuba.web.gui.components; import com.haulmont.bali.util.Preconditions; import com.haulmont.chile.core.datatypes.Datatype; import com.haulmont.chile.core.datatypes.DatatypeRegistry; import com.haulmont.chile.core.datatypes.ValueConversionException; import com.haulmont.chile.core.model.MetaProperty; import com.haulmont.chile.core.model.MetaPropertyPath; import com.haulmont.cuba.core.entity.annotation.CurrencyValue; import com.haulmont.cuba.core.global.Messages; import com.haulmont.cuba.core.global.UserSessionSource; import com.haulmont.cuba.gui.components.CurrencyField; import com.haulmont.cuba.gui.components.data.ConversionException; import com.haulmont.cuba.gui.components.data.DataAwareComponentsTools; import com.haulmont.cuba.gui.components.data.ValueSource; import com.haulmont.cuba.gui.components.data.meta.EntityValueSource; import org.apache.commons.lang3.StringUtils; import javax.inject.Inject; import java.math.BigDecimal; import java.text.ParseException; import java.util.Locale; import java.util.Map; import static com.google.common.base.Strings.emptyToNull; import static com.google.common.base.Strings.nullToEmpty; public class WebCurrencyField<V extends Number> extends WebV8AbstractField<CubaCurrencyField, String, V> implements CurrencyField<V> { protected Locale locale; protected Datatype<V> datatype; protected Datatype<V> defaultDatatype; protected String conversionErrorMessage; protected DataAwareComponentsTools dataAwareComponentsTools; public WebCurrencyField() { component = new CubaCurrencyField(); component.setCurrencyLabelPosition(toWidgetLabelPosition(CurrencyLabelPosition.RIGHT)); attachValueChangeListener(component); } @Inject public void setDataAwareComponentsTools(DataAwareComponentsTools dataAwareComponentsTools) { this.dataAwareComponentsTools = dataAwareComponentsTools; } @Inject public void setDatatypeRegistry(DatatypeRegistry datatypeRegistry) { //noinspection unchecked this.defaultDatatype = (Datatype<V>) datatypeRegistry.get(BigDecimal.class); } @Override protected void attachValueChangeListener(CubaCurrencyField component) { component.getInternalComponent() .addValueChangeListener(event -> componentValueChanged(event.getOldValue(), event.getValue(), event.isUserOriginated())); } @Inject public void setUserSessionSource(UserSessionSource userSessionSource) { this.locale = userSessionSource.getLocale(); } @Override protected String convertToPresentation(V modelValue) throws ConversionException { Datatype<V> datatype = getDatatypeInternal(); // Vaadin TextField does not permit `null` value if (datatype != null) { return nullToEmpty(datatype.format(modelValue, locale)); } if (valueBinding != null && valueBinding.getSource() instanceof EntityValueSource) { EntityValueSource entityValueSource = (EntityValueSource) valueBinding.getSource(); Datatype<V> propertyDataType = entityValueSource.getMetaPropertyPath().getRange().asDatatype(); return nullToEmpty(propertyDataType.format(modelValue, locale)); } return nullToEmpty(super.convertToPresentation(modelValue)); } @Override protected V convertToModel(String componentRawValue) throws ConversionException { String value = StringUtils.trimToNull(emptyToNull(componentRawValue)); Datatype<V> datatype = getDatatypeInternal(); if (datatype != null) { try { return datatype.parse(value, locale); } catch (ValueConversionException e) { throw new ConversionException(e.getLocalizedMessage(), e); } catch (ParseException e) { throw new ConversionException(getConversionErrorMessageInternal(), e); } } if (valueBinding != null && valueBinding.getSource() instanceof EntityValueSource) { EntityValueSource entityValueSource = (EntityValueSource) valueBinding.getSource(); Datatype<V> propertyDataType = entityValueSource.getMetaPropertyPath().getRange().asDatatype(); try { return propertyDataType.parse(componentRawValue, locale); } catch (ValueConversionException e) { throw new ConversionException(e.getLocalizedMessage(), e); } catch (ParseException e) { throw new ConversionException(getConversionErrorMessageInternal(), e); } } return super.convertToModel(componentRawValue); } @Override public void setConversionErrorMessage(String conversionErrorMessage) { this.conversionErrorMessage = conversionErrorMessage; } @Override public String getConversionErrorMessage() { return conversionErrorMessage; } protected String getConversionErrorMessageInternal() { String customErrorMessage = getConversionErrorMessage(); if (StringUtils.isNotEmpty(customErrorMessage)) { return customErrorMessage; } Datatype<V> datatype = this.datatype; if (datatype == null && valueBinding != null && valueBinding.getSource() instanceof EntityValueSource) { EntityValueSource entityValueSource = (EntityValueSource) valueBinding.getSource(); datatype = entityValueSource.getMetaPropertyPath().getRange().asDatatype(); } if (datatype != null) { String msg = getDatatypeConversionErrorMsg(datatype); if (StringUtils.isNotEmpty(msg)) { return msg; } } return beanLocator.get(Messages.class) .getMainMessage("databinding.conversion.error"); } @Override public void setCurrency(String currency) { component.setCurrency(currency); } @Override public String getCurrency() { return component.getCurrency(); } @Override public void setShowCurrencyLabel(boolean showCurrencyLabel) { component.setShowCurrencyLabel(showCurrencyLabel); } @Override public boolean getShowCurrencyLabel() { return component.getShowCurrencyLabel(); } @Override public void setCurrencyLabelPosition(CurrencyLabelPosition currencyLabelPosition) { Preconditions.checkNotNullArgument(currencyLabelPosition); component.setCurrencyLabelPosition(toWidgetLabelPosition(currencyLabelPosition)); } @Override public CurrencyLabelPosition getCurrencyLabelPosition() { return fromWidgetLabelPosition(component.getCurrencyLabelPosition()); } @Override protected void valueBindingConnected(ValueSource<V> valueSource) { super.valueBindingConnected(valueSource); if (valueSource instanceof EntityValueSource) { MetaPropertyPath metaPropertyPath = ((EntityValueSource) valueSource).getMetaPropertyPath(); if (metaPropertyPath.getRange().isDatatype()) { Datatype datatype = metaPropertyPath.getRange().asDatatype(); if (!Number.class.isAssignableFrom(datatype.getJavaClass())) { throw new IllegalArgumentException("CurrencyField doesn't support Datatype with class: " + datatype.getJavaClass()); } } else { throw new IllegalArgumentException("CurrencyField doesn't support properties with association"); } MetaProperty metaProperty = metaPropertyPath.getMetaProperty(); Object annotation = metaProperty.getAnnotations() .get(CurrencyValue.class.getName()); if (annotation == null) { return; } //noinspection unchecked Map<String, Object> annotationProperties = (Map<String, Object>) annotation; String currencyName = (String) annotationProperties.get("currency"); component.setCurrency(currencyName); String labelPosition = ((com.haulmont.cuba.core.entity.annotation.CurrencyLabelPosition) annotationProperties.get("labelPosition")).name(); setCurrencyLabelPosition(CurrencyLabelPosition.valueOf(labelPosition)); } } @Override public void setDatatype(Datatype<V> datatype) { Preconditions.checkNotNullArgument(datatype); dataAwareComponentsTools.checkValueSourceDatatypeMismatch(datatype, getValueSource()); if (!Number.class.isAssignableFrom(datatype.getJavaClass())) { throw new IllegalArgumentException("CurrencyField doesn't support Datatype with class: " + datatype.getJavaClass()); } this.datatype = datatype; } @Override public Datatype<V> getDatatype() { return datatype; } protected Datatype<V> getDatatypeInternal() { if (datatype != null) { return datatype; } return valueBinding == null ? defaultDatatype : null; } @Override public void commit() { super.commit(); } @Override public void discard() { super.discard(); } @Override public boolean isBuffered() { return super.isBuffered(); } @Override public void setBuffered(boolean buffered) { super.setBuffered(buffered); } @Override public boolean isModified() { return super.isModified(); } @Override public void focus() { component.focus(); } @Override public int getTabIndex() { return component.getTabIndex(); } @Override public void setTabIndex(int tabIndex) { component.setTabIndex(tabIndex); } protected com.haulmont.cuba.web.widgets.CurrencyLabelPosition toWidgetLabelPosition(CurrencyLabelPosition labelPosition) { return com.haulmont.cuba.web.widgets.CurrencyLabelPosition.valueOf(labelPosition.name()); } protected CurrencyLabelPosition fromWidgetLabelPosition(com.haulmont.cuba.web.widgets.CurrencyLabelPosition wLabelPosition) { return CurrencyLabelPosition.valueOf(wLabelPosition.name()); } }
4,125
335
<gh_stars>100-1000 { "word": "Nautilus", "definitions": [ "A cephalopod mollusc with a light external spiral shell and numerous short tentacles around the mouth." ], "parts-of-speech": "Noun" }
85
3,084
/*++ Copyright (c) Realtek Semiconductor Corp. All rights reserved. Module Name: WOLPattern.c Abstract: 1. This source file is going to be implemented the functions about Wake-on WLAN (WOL). Major Change History: When Who What ---------- --------------- ------------------------------- 2009.06.15 tynli Create version 0. Implement GetWOLWakeUpPattern(). 2009.06.16 tynli Implement CalculateWOLPatternCRC(), CRC16_CCITT(). 2009.06.19 tynli Implement ResetWoLPara(). --*/ #include "Mp_Precomp.h" #if WPP_SOFTWARE_TRACE #include "WOLPattern.tmh" #endif // // Description: Get the wake up frame pattern for wake-on WLAN (WOL). // // pWOLMaskToHW -It is an output. We must to set bit mask to HW in a reverse order, and // sometimes to shift some bits because the payload offset from OS may // be different from real wake up pattern. // 2009.06.15. by tynli. // VOID GetWOLWakeUpPattern( IN PADAPTER pAdapter, IN pu1Byte pWOLPatternMask, IN u4Byte WOLPatternMaskSize, IN pu1Byte pWOLPatternContent, IN u4Byte WOLPatternContentSize, IN u1Byte Index, IN BOOLEAN bMgntFrame ) { u4Byte i=0, j=0; u4Byte len=0, mask[4]; u2Byte CRCRemainder; u1Byte MaskToHW[MAX_WOL_BIT_MASK_SIZE]; u1Byte WOLWakeupPattern[MAX_WOL_PATTERN_SIZE]; PMGNT_INFO pMgntInfo = &(pAdapter->MgntInfo); PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); PRT_PM_WOL_PATTERN_INFO pWoLPatternInfo = &(pPSC->PmWoLPatternInfo[0]); // Set the pattern match flag. pWoLPatternInfo[Index].IsPatternMatch = 1; // <Case 1> 8723A Hw just can save 12 patterns mask and CRC in registers, for the DTM reqirement, // the device should support 16 patterns, so the remain 4 patterns will be handled by Fw. // Keep all the pattern masks and contents, then download them in HaltAdapter(). 2012.03.19. by tynli. // <Case 2> To support wake pattern parsing so we need to store the pattern info. 2013.01.10, by tynli. AddWoLPatternEntry(pAdapter, pWOLPatternMask, WOLPatternMaskSize, pWOLPatternContent, WOLPatternContentSize, Index); PlatformZeroMemory((pu1Byte)WOLWakeupPattern, MAX_WOL_PATTERN_SIZE); PlatformZeroMemory((pu1Byte)mask, MAX_WOL_BIT_MASK_SIZE); PlatformZeroMemory((pu1Byte)MaskToHW, MAX_WOL_BIT_MASK_SIZE); //RT_PRINT_DATA( (COMP_OID_QUERY|COMP_AP), DBG_LOUD, ("GetWOLWakeUpPattern() Mask: "), //pWOLPatternMask, WOLPatternMaskSize); //RT_PRINT_DATA( (COMP_OID_QUERY|COMP_AP), DBG_LOUD, ("GetWOLWakeUpPattern() Pattern: "), //pWOLPatternContent, WOLPatternContentSize); //1. Compare if DA = our MAC ADDR /* // 2009.07.09. //for DTM bit mask, it will macth from the highest bit of a byte, so we reverse each of byte. for(i=0; i<WOLPatternMaskSize; i++) { pWOLPatternMask[i] = ReverseBit(pWOLPatternMask[i]); } */ if(bMgntFrame) { // The mask is begin from frame content, because Hw will ignore MAC header. for(i=0; i<WOLPatternMaskSize; i++) { MaskToHW[i] = pWOLPatternMask[i]; } // Remove MAC header 24 bytes. for(i=0; i<WOLPatternMaskSize*8; i++) { if((pWOLPatternMask[i/8]>>(i%8))&0x01) { WOLWakeupPattern[len] = pWOLPatternContent[i+24]; //DbgPrint("pWOLWakeupPattern[%d] = 0x%x\n", len, WOLWakeupPattern[len]); len++; } } } else { //2. To macth HW design, we need to change bit mask. HW catch payload which begin from LLC //in 802.11 packet, but OS set the wake up pattern in 802.3 format (DA[6]|SA[6]|Type[2]|Data), //so (1) we need to shift bit mask left for 6 bits to filter out DA[6], and the type is at the last 2 bits //in LLC[8], so (2) we also need to set bit 0-5 to zero in the new bit mask which means SA[6] //to prevent CRC error (HW count from LLC, it is not a SA). by tynli. 2009.07.03. for(i=0; i<(WOLPatternMaskSize-1); i++) //(1) Shift 6 bits { MaskToHW[i] = pWOLPatternMask[i]>>6; MaskToHW[i] |= (pWOLPatternMask[i+1]&0x3F)<<2; } MaskToHW[i] = (pWOLPatternMask[i]>>6)&0x3F; MaskToHW[0] &= 0xC0; //(2) Set bit 0-5 to zero //3. To get the wake up pattern from the mask. //We do not count first 12 bits which means DA[6] and SA[6] in the pattern to match HW design. for(i=12; i<WOLPatternMaskSize*8; i++) { if((pWOLPatternMask[i/8]>>(i%8))&0x01) { WOLWakeupPattern[len] = pWOLPatternContent[i]; //DbgPrint("pWOLWakeupPattern[%d] = 0x%x\n", len, WOLWakeupPattern[len]); len++; } } } //4. Calculate CRC remainder CRCRemainder = CalculateWOLPatternCRC(WOLWakeupPattern, len); pWoLPatternInfo[Index].CrcRemainder = CRCRemainder; RT_TRACE(COMP_POWER, DBG_LOUD, ("GetWOLWakeUpPattern(): CrcRemainder = %x\n",pWoLPatternInfo[Index].CrcRemainder)); //5. Change the byte order of the bit mask to macth HW design. for(i=0; i<= (MAX_WOL_BIT_MASK_SIZE-4); i=i+4, j++) { mask[j] = MaskToHW[i]; mask[j] |= (MaskToHW[i+1]<<8); mask[j] |= (MaskToHW[i+2]<<16); mask[j] |= (MaskToHW[i+3]<<24); //DbgPrint("mask[%d] = %x\n", j, mask[j]); pWoLPatternInfo[Index].Mask[j] = mask[j]; //DbgPrint("pWoLPatternInfo[Index].Mask[%d] = %x\n", j, pWoLPatternInfo[Index].Mask[j]); } { // Download them in HaltAdapter() on 8723A. 2012.07.17, by tynli. pAdapter->HalFunc.SetHwRegHandler(pAdapter, HW_VAR_WF_MASK, (pu1Byte)(&Index)); pAdapter->HalFunc.SetHwRegHandler(pAdapter, HW_VAR_WF_CRC, (pu1Byte)(&Index)); } } // // Description: // To calculate the CRC remainder for the WOL wake up pattern. // Input: // A WOL Pattern, pattern length // Output: // The CRC remainder for the pattern // 2009.06.16. by tynli. // u2Byte CalculateWOLPatternCRC( pu1Byte Pattern, u4Byte PatternLength ) { // unsigned char data[2]={0xC6,0xAA}; u2Byte CRC=0xffff; u4Byte i; for(i=0; i<PatternLength; i++) { CRC=CRC16_CCITT(Pattern[i], CRC); //DbgPrint("Pattern[%d] = %x, ", i, Pattern[i]); } //DbgPrint("\n"); CRC=~CRC; //DbgPrint("CRC =%x\n",CRC); return CRC; } // // Description: // This is not a standard CRC16-CCITT algorithm, we re-write it to C code from // VR code which is from HW designer. // Input: // 1 byte data, CRC remainder // Output: // The CRC remainder for each byte // 2009.06.16. by tynli and SD1 Isaac. // u2Byte CRC16_CCITT( u1Byte data, u2Byte CRC ) { u1Byte shift_in, DataBit, CRC_BIT11, CRC_BIT4, CRC_BIT15 ; u1Byte index; u2Byte CRC_Result; for(index=0;index<8;index++) { CRC_BIT15=((CRC&BIT15) ? 1:0); DataBit =(data&(BIT0<<index) ? 1:0); shift_in=CRC_BIT15^DataBit; //printf("CRC_BIT15=%d, DataBit=%d, shift_in=%d \n",CRC_BIT15,DataBit,shift_in); CRC_Result=CRC<<1; //set BIT0 // printf("CRC =%x\n",CRC_Result); //CRC bit 0 =shift_in, if(shift_in==0) CRC_Result&=(~BIT0); else CRC_Result|=BIT0; //printf("CRC =%x\n",CRC_Result); CRC_BIT11 = ((CRC&BIT11) ? 1:0)^shift_in; if(CRC_BIT11==0) CRC_Result&=(~BIT12); else CRC_Result|=BIT12; //printf("bit12 CRC =%x\n",CRC_Result); CRC_BIT4 = ((CRC&BIT4) ? 1:0)^shift_in; if(CRC_BIT4==0) CRC_Result&=(~BIT5); else CRC_Result|=BIT5; //printf("bit5 CRC =%x\n",CRC_Result); CRC=CRC_Result; } return CRC; } // // Description: For wake-on WLAN, the CRC register value should be set to a non-zero value when // there is no wake up frame comeing to calculate CRC result or it will hit the HW bug when CRC value = 0. // // 2009.06.19. by tynli. VOID ResetWoLPara( IN PADAPTER Adapter ) { PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); PRT_PM_WOL_PATTERN_INFO pPmWOLPatternInfo = &(pPSC->PmWoLPatternInfo[0]); u1Byte i; PlatformZeroMemory(pPmWOLPatternInfo, sizeof(RT_PM_WOL_PATTERN_INFO)*(MAX_SUPPORT_WOL_PATTERN_NUM(Adapter))); for(i=0; i<MAX_SUPPORT_WOL_PATTERN_NUM(Adapter); i++) //reset structure content { pPmWOLPatternInfo[i].CrcRemainder = 0xffff; pPmWOLPatternInfo[i].PatternType = eUnknownType; //YJ,add,110726 pPmWOLPatternInfo[i].HwWFMIndex = 0xff; } Adapter->HalFunc.SetHwRegHandler(Adapter, HW_VAR_RESET_WFCRC, 0); } VOID ConstructUserDefinedWakeUpPattern( IN PADAPTER Adapter ) { PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); //u1Byte AuthBuf[100]; u4Byte AuthBufLen; OCTET_STRING AuthChallengetext; u1Byte AuthMaskBuf; u1Byte AuthMaskBufLen; u1Byte Index; PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); PRT_PM_WOL_PATTERN_INFO pPmWoLPatternInfo = &(pPSC->PmWoLPatternInfo[0]); pu1Byte AuthBuf; PRT_GEN_TEMP_BUFFER pGenBufAuthPacket; RT_TRACE(COMP_POWER, DBG_LOUD, ("===> ConstructUserDefinedWakeUpPattern()\n")); if(ACTING_AS_AP(Adapter) && pPSC->APOffloadEnable) { pGenBufAuthPacket = GetGenTempBuffer (Adapter, 100); AuthBuf = (u1Byte *)pGenBufAuthPacket->Buffer.Ptr; // // 1. Auth // //Since this is always the 1st authentication frame AuthChallengetext.Length = 0; //Send authentication frame ConstructAuthenticatePacket( Adapter, AuthBuf, &AuthBufLen, pMgntInfo->Bssid, 0, 1, StatusCode_success, AuthChallengetext); RT_PRINT_DATA(COMP_POWER, DBG_TRACE, "ConstructUserDefinedWakeUpPattern(): Auth ", &AuthBuf, AuthBufLen); AuthMaskBuf = 0x0F; AuthMaskBufLen = 1; //Find the index of the first empty entry. for(Index=0; Index<MAX_SUPPORT_WOL_PATTERN_NUM(Adapter); Index++) { if(pPmWoLPatternInfo[Index].PatternId == 0) break; } if(Index >= MAX_SUPPORT_WOL_PATTERN_NUM(Adapter)) { RT_TRACE(COMP_POWER, DBG_LOUD, ("SET OID_PM_ADD_WOL_PATTERN: The number of wake up pattern is more than MAX_SUPPORT_WOL_PATTERN_NUM or the pattern Id is exist.\n")); } // Set the pattern information. pPmWoLPatternInfo[Index].PatternId = 0xFFFF; //for temp pPmWoLPatternInfo[Index].PatternType = eUnicastPattern; pPmWoLPatternInfo[Index].IsUserDefined = 1; GetWOLWakeUpPattern( Adapter, &AuthMaskBuf, AuthMaskBufLen, AuthBuf, AuthBufLen, Index, TRUE); pPSC->WoLPatternNum++; ReturnGenTempBuffer(Adapter, pGenBufAuthPacket); } } // // Description: // When GTK is updated, check if the Wake on WLAN event happened before, and if yes indicate // event to the upper layer. // Argumets: // [in] pAdapter - // The adapter context. // Return: // NONE. // By Bruce, 2011-06-09. // VOID WolByGtkUpdate( IN PADAPTER pAdapter ) { PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL((&pAdapter->MgntInfo)); if((pPSC->WakeUpReason & (WOL_REASON_GTK_UPDATE | WOL_REASON_PTK_UPDATE)) != 0) { if(PlatformGetCurrentTime() <= pPSC->LastWakeUpTime + 20000000) // 10 sec { RT_TRACE_F(COMP_POWER, DBG_LOUD, ("Wake up by GTK and Indicate the WOL by GTK event!\n")); PlatformIndicateCustomStatus( pAdapter, RT_CUSTOM_EVENT_WOL_GTK, RT_CUSTOM_INDI_TARGET_IHV, &pPSC->SleepMode, sizeof(pPSC->SleepMode)); } else { RT_TRACE_F(COMP_POWER, DBG_LOUD, ("The current timt of updating GTK is too long from the last WOL time! Skip indication....\n")); } pPSC->WakeUpReason &= ~(WOL_REASON_GTK_UPDATE | WOL_REASON_PTK_UPDATE); } } VOID RemoveUserDefinedWoLPattern( IN PADAPTER Adapter ) { PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); PRT_PM_WOL_PATTERN_INFO pPmWoLPatternInfo = &(pPSC->PmWoLPatternInfo[0]); u1Byte Index; for(Index=0; Index<MAX_SUPPORT_WOL_PATTERN_NUM(Adapter); Index++) { if(pPmWoLPatternInfo[Index].IsUserDefined == 1) { //Reset the structure and set WFCRC register to non-zero value. pPmWoLPatternInfo[Index].PatternId = 0; PlatformZeroMemory(pPmWoLPatternInfo[Index].Mask, sizeof(pPmWoLPatternInfo[Index].Mask)); pPmWoLPatternInfo[Index].CrcRemainder = 0xffff; pPmWoLPatternInfo[Index].IsPatternMatch = 0; pPmWoLPatternInfo[Index].IsUserDefined = 0; pPmWoLPatternInfo[Index].IsSupportedByFW = 0; Adapter->HalFunc.SetHwRegHandler(Adapter, HW_VAR_WF_MASK, (pu1Byte)(&Index)); Adapter->HalFunc.SetHwRegHandler(Adapter, HW_VAR_WF_CRC, (pu1Byte)(&Index)); pPmWoLPatternInfo[Index].HwWFMIndex = 0xff; // reset the value after clear HW/CAM entry. pPSC->WoLPatternNum--; } } } // // Description: Save the wake patten masks and contents. // VOID AddWoLPatternEntry( IN PADAPTER Adapter, IN pu1Byte pWOLPatternMask, IN u4Byte WOLPatternMaskSize, IN pu1Byte pWOLPatternContent, IN u4Byte WOLPatternContentSize, IN u1Byte Index ) { PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); PRT_PM_WOL_PATTERN_INFO pWoLPatternInfo = &(pPSC->PmWoLPatternInfo[0]); u1Byte i, j; // Reset the entry. PlatformZeroMemory(&(pWoLPatternInfo[Index].FwPattern),sizeof(H2C_WOL_PATTERN_MATCH_INFO)); // // Add bit mask entry. // for(i=0; i<WOLPatternMaskSize; i++) { pWoLPatternInfo[Index].FwPattern.BitMask[i] = pWOLPatternMask[i]; for(j=0; j<8; j++) { if((pWoLPatternInfo[Index].FwPattern.BitMask[i]>>j)&0x01) { pWoLPatternInfo[Index].FwPattern.ValidBitNum++; } } } // // Add pattern content entry. // for(i=0; i<WOLPatternContentSize; i++) { pWoLPatternInfo[Index].FwPattern.PatternContent[i] = pWOLPatternContent[i]; } RT_PRINT_DATA(COMP_POWER, DBG_TRACE,"PatternContent: \n", &(pWoLPatternInfo[Index].FwPattern.PatternContent[0]), WOLPatternContentSize); } // // Description: Get the entrties to download to Fw. // Input: // - EnteryNum: [total pattern number (set by the OID) - 12 (Hw capability)] // VOID GetWoLPatternMatchOffloadEntries( IN PADAPTER Adapter, IN u1Byte EntryNum ) { PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); PRT_PM_WOL_PATTERN_INFO pWoLPatternInfo = &(pPSC->PmWoLPatternInfo[0]); u1Byte i=0; u1Byte MinNum = 0xFF, NextMinNum=0xFF; u1Byte NumOfSelected=0; RT_TRACE(COMP_POWER, DBG_LOUD, ("GetWoLPatternMatchOffloadEntries(): Number of entries=%d\n", EntryNum)); // Find the minimun number of bits. for(i=0; i<MAX_SUPPORT_WOL_PATTERN_NUM(Adapter); i++) { if(pWoLPatternInfo[i].FwPattern.ValidBitNum == 0) break; if(pWoLPatternInfo[i].FwPattern.ValidBitNum <= MinNum) { MinNum = pWoLPatternInfo[i].FwPattern.ValidBitNum; } } // // Find the smallest four entries. // do { NextMinNum = 0xFF; for(i=0; i<MAX_SUPPORT_WOL_PATTERN_NUM(Adapter); i++) { if(pWoLPatternInfo[i].FwPattern.ValidBitNum == 0) break; if(pWoLPatternInfo[i].FwPattern.ValidBitNum == MinNum) { if(NumOfSelected > EntryNum) break; pWoLPatternInfo[i].IsSupportedByFW = 1; NumOfSelected++; //DbgPrint("Find the littler index(%d)\n", i); } else if(pWoLPatternInfo[i].FwPattern.ValidBitNum > MinNum) { if(pWoLPatternInfo[i].FwPattern.ValidBitNum <= NextMinNum) { NextMinNum = pWoLPatternInfo[i].FwPattern.ValidBitNum; } } } //DbgPrint("NextMinNum=%d\n", NextMinNum); MinNum = NextMinNum; } while((NumOfSelected < EntryNum) && (NumOfSelected < pPSC->WoLPatternNum)); } VOID WoL_TranslateDot11FrameToDot3( PADAPTER Adapter, PRT_RFD pRfd, pu1Byte pDot3Buffer, pu2Byte pDot3BufLen ) { OCTET_STRING frame = {NULL, 0}; pu1Byte pHeader; BOOLEAN bToDS, bFromDS; u1Byte MacDestAddr[6]={0}, MacSrcAddr[6]={0}; u1Byte offset=0; FillOctetString(frame, pRfd->Buffer.VirtualAddress, pRfd->PacketLength); pHeader = frame.Octet; RT_PRINT_DATA(COMP_INIT, DBG_TRACE, "WoL_TranlateDot11FrameToDot3(): frame \n",frame.Octet,frame.Length); bToDS = (Frame_ToDS(frame) ? TRUE : FALSE); bFromDS = (Frame_FromDS(frame) ? TRUE : FALSE); if(bToDS && !bFromDS) { GET_80211_HDR_ADDRESS3(pHeader, MacDestAddr); // DA GET_80211_HDR_ADDRESS2(pHeader, MacSrcAddr); // SA } else if(!bToDS && bFromDS) { GET_80211_HDR_ADDRESS1(pHeader, MacDestAddr); GET_80211_HDR_ADDRESS3(pHeader, MacSrcAddr); } else if(bToDS && bFromDS) { GET_80211_HDR_ADDRESS3(pHeader, MacDestAddr); GET_80211_HDR_ADDRESS4(pHeader, MacSrcAddr); } else { GET_80211_HDR_ADDRESS1(pHeader, MacDestAddr); GET_80211_HDR_ADDRESS2(pHeader, MacSrcAddr); } offset += sMacHdrLng; if( pRfd->Status.bIsQosData ) { offset += sQoSCtlLng; } if( pRfd->Status.bContainHTC) { offset += sHTCLng; } if(Frame_ValidAddr4(frame)) { offset += 6; } if(Frame_WEP(frame)) { offset += Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead; } if(frame.Length <= offset) { RT_TRACE(COMP_INIT, DBG_LOUD, ("WoL_TranslateDot11FrameToDot3(): Error frame length!\n")); return; } // if(IsDataFrame(frame.Octet)) { // Remove LLC header length (6 bytes) to get IP type feild.] // LLC_HEADER_SIZE if((frame.Length-offset) > LLC_HEADER_SIZE) // check length { if(*(pHeader+offset) == 0xaa && *(pHeader+offset+1) == 0xaa && *(pHeader+offset+2) == 0x03) { offset += LLC_HEADER_SIZE; //DbgPrint("Find LLC header!!\n"); } } } RT_TRACE(COMP_INIT, DBG_TRACE, ("WoL_TranslateDot11FrameToDot3(): offset = %d\n", offset)); // Mapping to 802.3 packet format. PlatformMoveMemory(pDot3Buffer, MacDestAddr, 6); // DA PlatformMoveMemory(pDot3Buffer+6, MacSrcAddr, 6); // SA PlatformMoveMemory(pDot3Buffer+12, pHeader+offset, (frame.Length-offset)); //IP type+data *pDot3BufLen = 12 + frame.Length - offset; RT_PRINT_DATA(COMP_INIT, DBG_TRACE, "WoL_TranlateDot11FrameToDot3(): pDot3Buffer \n",pDot3Buffer, *pDot3BufLen); } // // Description: Check the received packet if a magic packet. // 2012.07.31, by tynli. // BOOLEAN WoL_IsMagicPacket( PADAPTER Adapter, PRT_RFD pRfd ) { OCTET_STRING frame = {NULL, 0}; pu1Byte pHeader; u1Byte offset=0, i, j; u2Byte PayloadLen; BOOLEAN bMatchPacket=FALSE; FillOctetString(frame, pRfd->Buffer.VirtualAddress, pRfd->PacketLength); pHeader = frame.Octet; //RT_PRINT_DATA(COMP_RECV, DBG_LOUD,"WoL_IsMagicPacket(): frame \n",frame.Octet,frame.Length); // Get payload start offset. offset += sMacHdrLng; if( pRfd->Status.bIsQosData ) { offset += sQoSCtlLng; } if( pRfd->Status.bContainHTC) { offset += sHTCLng; } if(Frame_ValidAddr4(frame)) { offset += 6; } if(Frame_WEP(frame)) { offset += Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead; } if(frame.Length <= offset) { RT_TRACE(COMP_INIT, DBG_LOUD, ("WoL_IsMagicPacket(): Error frame length!\n")); return bMatchPacket; } RT_TRACE(COMP_RECV, DBG_TRACE, ("WoL_IsMagicPacket(): offset = %d\n", offset)); pHeader += offset; PayloadLen = frame.Length - offset; // Search for magic packet pattern for(i = 0; i<PayloadLen; ) { if(*(pHeader+i) == 0xFF) { if((i+6) > PayloadLen) // check remain buffer length { RT_TRACE(COMP_RECV, DBG_TRACE, ("WoL_IsMagicPacket(): Packet length error. 1\n")); break; } // Find FF FF FF FF FF FF pattern. if(*(pHeader+i+1) == 0xFF && *(pHeader+i+2) == 0xFF && *(pHeader+i+3) == 0xFF && *(pHeader+i+4) == 0xFF && *(pHeader+i+5) == 0xFF) { i += 6; if((i+16*6) > PayloadLen) // check remain buffer length { RT_TRACE(COMP_RECV, DBG_TRACE, ("WoL_IsMagicPacket(): Packet length error. 2\n")); break; } // Repeat STA addr 16 times. for(j=0; j<16; j++) { if(PlatformCompareMemory((pHeader+i+j*6), Adapter->CurrentAddress, 6) == 0) { // Match if(j == 15) { //Adapter->HwWakeUpEvent = 2; bMatchPacket =TRUE; RT_TRACE(COMP_POWER, DBG_LOUD, ("WoL_IsMagicPacket(): Find magic packet\n")); } } else { RT_TRACE(COMP_POWER, DBG_TRACE, ("WoL_IsMagicPacket(): Unmatch!!\n")); break; } } } else { i += 1; } } else { i += 1; } } return bMatchPacket; } // // Description: Check the received packet if a pattern match packet. // We should translate the reveived packet format to 802.3 format to match the pattern content // set from the upper layer. If there is one pattern packet be matched, then return TRUE. // 2012.07.31, by tynli. // BOOLEAN WoL_IsPatternMatchPacket( PADAPTER Adapter, PRT_RFD pRfd ) { PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); PRT_PM_WOL_PATTERN_INFO pPmWoLPatternInfo = &(pPSC->PmWoLPatternInfo[0]); u1Byte Index, i, j; BOOLEAN bMatchPacket=TRUE; u1Byte offset = 0; pu1Byte pDot3Packet; PRT_GEN_TEMP_BUFFER pGenBufPacket; u2Byte Dot3PacketLen; pGenBufPacket = GetGenTempBuffer (Adapter, pRfd->PacketLength); pDot3Packet = (u1Byte *)pGenBufPacket->Buffer.Ptr; // Translate 802.11 packet format to 802.3 format to match the pattern match content set by OID. WoL_TranslateDot11FrameToDot3(Adapter, pRfd, pDot3Packet, &Dot3PacketLen); for(Index=0; Index<MAX_SUPPORT_WOL_PATTERN_NUM(Adapter); Index++) { bMatchPacket = TRUE; if(pPmWoLPatternInfo[Index].PatternId != 0) { for(i=0; i<16; i++) { for(j=0; j<8; j++) { offset = i*8+j; if(pPmWoLPatternInfo[Index].FwPattern.BitMask[i] & (0x01<<j)) // bitmask == 1 { if(Dot3PacketLen < offset) { bMatchPacket = FALSE; break; } if(pDot3Packet[offset] != pPmWoLPatternInfo[Index].FwPattern.PatternContent[offset]) { bMatchPacket = FALSE; break; } } } if(!bMatchPacket) break; } if(bMatchPacket) { //Adapter->HwWakeUpEvent = 3 | (Index<<4); RT_TRACE(COMP_POWER, DBG_LOUD, ("Find the pattern match wake packet!!Index(%d)\n", Index)); break; } } else { bMatchPacket = FALSE; } } if(bMatchPacket) { RT_PRINT_DATA(COMP_DBG, DBG_TRACE, "Dump pattern bitmask \n", pPmWoLPatternInfo[Index].FwPattern.BitMask, 16); RT_PRINT_DATA(COMP_DBG, DBG_TRACE, "Dump pattern content \n", pPmWoLPatternInfo[Index].FwPattern.PatternContent, 128); } ReturnGenTempBuffer(Adapter, pGenBufPacket); return bMatchPacket; } // // Description: Handle WoWLAN related Rx packets. // // Return value: TRUE - It is a wake packet. // FALSE - It is a normal packet, and should be handled by normal Rx path. // BOOLEAN WoL_HandleReceivedPacket( PADAPTER Adapter, PRT_RFD pRfd ) { //OCTET_STRING frame = {NULL, 0}; PMGNT_INFO pMgntInfo = &(Adapter->MgntInfo); PRT_POWER_SAVE_CONTROL pPSC = GET_POWER_SAVE_CONTROL(pMgntInfo); //static BOOLEAN bWakePacket = FALSE; PRT_RFD_STATUS pRtRfdStatus = &pRfd->Status; BOOLEAN bResult=FALSE; if(pPSC->bFindWakePacket) { RT_TRACE(COMP_POWER, DBG_TRACE, ("Already found the first wake packet and return!\n")); return FALSE; } if(HW_SUPPORT_PARSING_WAKE_PACKET(Adapter)) { if(!pRtRfdStatus->WakeMatch) { RT_TRACE(COMP_POWER, DBG_TRACE, ("Not a wake packet and return!\n")); return FALSE; } bResult = TRUE; pPSC->bFindWakePacket = TRUE; if(pPSC->WakeUpReason == 0) // To prevent from covering Hw reason { if(pRtRfdStatus->WakeMatch & BIT0) pPSC->WakeUpReason = WOL_REASON_UNICAST_PKT; else if(pRtRfdStatus->WakeMatch & BIT1) pPSC->WakeUpReason = WOL_REASON_MAGIC_PKT; else if(pRtRfdStatus->WakeMatch & BIT2) pPSC->WakeUpReason = WOL_REASON_PATTERN_PKT; } RT_TRACE(COMP_POWER, DBG_LOUD, ("~~~~~~~~ It is a wake packet(%d)!\n", pRtRfdStatus->WakeMatch)); //RT_PRINT_DATA(COMP_INIT, DBG_LOUD,"WoL_HandleReceivedPacket: \n", pRfd->Buffer.VirtualAddress, pRfd->PacketLength); } else { // RT_PRINT_DATA(COMP_INIT, DBG_LOUD,"WoL_HandleReceivedPacket: \n",frame.Octet,frame.Length); // Find the wake packet. if(WoL_IsMagicPacket(Adapter, pRfd)) { pPSC->bFindWakePacket = TRUE; bResult = TRUE; if(pPSC->WakeUpReason == 0) // To prevent from covering Hw reason pPSC->WakeUpReason = WOL_REASON_MAGIC_PKT; } else if(WoL_IsPatternMatchPacket(Adapter, pRfd)) { pPSC->bFindWakePacket = TRUE; bResult = TRUE; if(pPSC->WakeUpReason == 0) // To prevent from covering Hw reason pPSC->WakeUpReason = WOL_REASON_PATTERN_PKT; } } // Indicate PM wake reason and packet to the OS. if(pPSC->bFindWakePacket) { PlatformIndicatePMWakeReason(Adapter, TRUE, pRfd->Buffer.VirtualAddress, pRfd->PacketLength); } return bResult; }
12,167
746
<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Example .impalarc file: # # [impala] # impalad=localhost:21002 # verbose=false # # [impala.query_options] # EXPLAIN_LEVEL=2 # MT_DOP=2 from __future__ import print_function, unicode_literals import sys try: from configparser import ConfigParser # python3 except ImportError: from ConfigParser import ConfigParser # python2 from impala_shell_config_defaults import impala_shell_defaults from optparse import OptionParser, SUPPRESS_HELP class ConfigFileFormatError(Exception): """Raised when the config file cannot be read by ConfigParser.""" pass class InvalidOptionValueError(Exception): """Raised when an option contains an invalid value.""" pass def parse_bool_option(value): """Returns True for '1' and 'True', and False for '0' and 'False'. Throws ValueError for other values. """ if value.lower() in ["true", "1"]: return True elif value.lower() in ["false", "0"]: return False else: raise InvalidOptionValueError("Unexpected value in configuration file. '" + value + "' is not a valid value for a boolean option.") def parse_shell_options(options, defaults, option_list): """Filters unknown options and converts some values from string to their corresponding python types (booleans and None). 'option_list' contains the list of valid options, and 'defaults' is used to deduce the type of some options (only bool at the moment). Returns a dictionary with option names as keys and option values as values. """ # Build a dictionary that maps short and long option name to option for a quick lookup. option_dests = dict() for option in option_list: if len(option._short_opts) > 0: option_dests[option._short_opts[0][1:]] = option if len(option._long_opts) > 0: option_dests[option._long_opts[0][2:]] = option if option.dest not in option_dests: # Allowing dest name for backward compatibility. option_dests[option.dest] = option result = {} for option, value in options: opt = option_dests.get(option) if opt is None: warn_msg = ( "WARNING: Unable to read configuration file correctly. " "Ignoring unrecognized config option: '%s'" % option ) print('\n{0}'.format(warn_msg), file=sys.stderr) elif isinstance(defaults.get(option), bool) or \ opt.action == "store_true" or opt.action == "store_false": result[option] = parse_bool_option(value) elif opt.action == "append": result[option] = value.split(",%s=" % option) elif value.lower() == "none": result[option] = None else: result[option] = value return result def get_config_from_file(config_filename, option_list): """Reads contents of configuration file Two config sections are supported: "[impala]": Overrides the defaults of the shell arguments. Unknown options are filtered and some values are converted from string to their corresponding python types (booleans and None). Multiple flags are appended with ",option_name=" as its delimiter, e.g. The delimiter is for multiple options is ,<option>=. For example: var=msg1=hello,var=msg2=world. Setting 'config_filename' in the config file would have no effect, so its original value is kept. "[impala.query_options]" Overrides the defaults of the query options. Not validated here, because validation will take place after connecting to impalad. Returns a pair of dictionaries (shell_options, query_options), with option names as keys and option values as values. """ try: config = ConfigParser(strict=False) # python3 except TypeError: config = ConfigParser() # python2 # Preserve case-sensitivity since flag names are case sensitive. config.optionxform = str try: config.read(config_filename) except Exception as e: raise ConfigFileFormatError( "Unable to read configuration file correctly. Check formatting: %s" % e) shell_options = {} if config.has_section("impala"): shell_options = parse_shell_options(config.items("impala"), impala_shell_defaults, option_list) if "config_file" in shell_options: warn_msg = "WARNING: Option 'config_file' can be only set from shell." print('\n{0}'.format(warn_msg), file=sys.stderr) shell_options["config_file"] = config_filename query_options = {} if config.has_section("impala.query_options"): # Query option keys must be "normalized" to upper case before updating with # options coming from command line. query_options = dict( [(k.upper(), v) for k, v in config.items("impala.query_options")]) return shell_options, query_options def get_option_parser(defaults): """Creates OptionParser and adds shell options (flags) Default values are loaded in initially """ parser = OptionParser() parser.add_option("-i", "--impalad", dest="impalad", help="<host:port> of impalad to connect to \t\t") parser.add_option("-b", "--kerberos_host_fqdn", dest="kerberos_host_fqdn", help="If set, overrides the expected hostname of the Impalad's " "kerberos service principal. impala-shell will check that " "the server's principal matches this hostname. This may be " "used when impalad is configured to be accessed via a " "load-balancer, but it is desired for impala-shell to talk " "to a specific impalad directly.") parser.add_option("-q", "--query", dest="query", help="Execute a query without the shell") parser.add_option("-f", "--query_file", dest="query_file", help="Execute the queries in the query file, delimited by ;." " If the argument to -f is \"-\", then queries are read from" " stdin and terminated with ctrl-d.") parser.add_option("-k", "--kerberos", dest="use_kerberos", action="store_true", help="Connect to a kerberized impalad") parser.add_option("-o", "--output_file", dest="output_file", help=("If set, query results are written to the " "given file. Results from multiple semicolon-terminated " "queries will be appended to the same file")) parser.add_option("-B", "--delimited", dest="write_delimited", action="store_true", help="Output rows in delimited mode") parser.add_option("--print_header", dest="print_header", action="store_true", help="Print column names in delimited mode" " when pretty-printed.") parser.add_option("--output_delimiter", dest="output_delimiter", help="Field delimiter to use for output in delimited mode") parser.add_option("-s", "--kerberos_service_name", dest="kerberos_service_name", help="Service name of a kerberized impalad") parser.add_option("-V", "--verbose", dest="verbose", action="store_true", help="Verbose output") parser.add_option("-p", "--show_profiles", dest="show_profiles", action="store_true", help="Always display query profiles after execution") parser.add_option("--quiet", dest="verbose", action="store_false", help="Disable verbose output") parser.add_option("-v", "--version", dest="version", action="store_true", help="Print version information") parser.add_option("-c", "--ignore_query_failure", dest="ignore_query_failure", action="store_true", help="Continue on query failure") parser.add_option("-d", "--database", dest="default_db", help="Issues a use database command on startup \t") parser.add_option("-l", "--ldap", dest="use_ldap", action="store_true", help="Use LDAP to authenticate with Impala. Impala must be configured" " to allow LDAP authentication. \t\t") parser.add_option("-u", "--user", dest="user", help="User to authenticate with.") parser.add_option("--ssl", dest="ssl", action="store_true", help="Connect to Impala via SSL-secured connection \t") parser.add_option("--ca_cert", dest="ca_cert", help=("Full path to " "certificate file used to authenticate Impala's SSL certificate." " May either be a copy of Impala's certificate (for self-signed " "certs) or the certificate of a trusted third-party CA. If not set, " "but SSL is enabled, the shell will NOT verify Impala's server " "certificate")) parser.add_option("--config_file", dest="config_file", help=("Specify the configuration file to load options. " "The following sections are used: [impala], " "[impala.query_options]. Section names are case sensitive. " "Specifying this option within a config file will have " "no effect. Only specify this as an option in the commandline." )) parser.add_option("--history_file", dest="history_file", help=("The file in which to store shell history. This may also be " "configured using the IMPALA_HISTFILE environment variable.")) parser.add_option("--live_summary", dest="live_summary", action="store_true", help="Print a query summary every 1s while the query is running.") parser.add_option("--live_progress", dest="live_progress", action="store_true", help="Print a query progress every 1s while the query is running." " The default value of the flag is True in the interactive mode." " If live_progress is set to False in a config file, this flag" " will override it") parser.add_option("--disable_live_progress", dest="live_progress", action="store_false", help="A command line flag allows users to disable live_progress in" " the interactive mode.") parser.add_option("--auth_creds_ok_in_clear", dest="creds_ok_in_clear", action="store_true", help="If set, LDAP authentication " + "may be used with an insecure connection to Impala. " + "WARNING: Authentication credentials will therefore be sent " + "unencrypted, and may be vulnerable to attack.") parser.add_option("--ldap_password_cmd", dest="ldap_password_cmd", help="Shell command to run to retrieve the LDAP password") parser.add_option("--var", dest="keyval", action="append", help="Defines a variable to be used within the Impala session." " Can be used multiple times to set different variables." " It must follow the pattern \"KEY=VALUE\"," " KEY starts with an alphabetic character and" " contains alphanumeric characters or underscores.") parser.add_option("-Q", "--query_option", dest="query_options", action="append", help="Sets the default for a query option." " Can be used multiple times to set different query options." " It must follow the pattern \"KEY=VALUE\"," " KEY must be a valid query option. Valid query options " " can be listed by command 'set'.") parser.add_option("-t", "--client_connect_timeout_ms", help="Timeout in milliseconds after which impala-shell will time out" " if it fails to connect to Impala server. Set to 0 to disable any" " timeout.") parser.add_option("--protocol", dest="protocol", default="hs2", help="Protocol to use for client/server connection. Valid inputs are " "['hs2', 'hs2-http', 'beeswax']. 'hs2-http' uses HTTP transport " "to speak to the coordinator while 'hs2' and 'beeswax' use the " "binary TCP based transport. Beeswax support is deprecated " "and will be removed in the future.") parser.add_option("--strict_hs2_protocol", dest="strict_hs2_protocol", action="store_true", help="True if the hs2 connection is using the strict hs2 protocol." "Only useful if connecting straight to hs2 instead of Impala." "The default hs2 port is 11050 and the default hs2 http port " "is 10001.") parser.add_option("--use_ldap_test_password", dest="use_ldap_test_password", action="store_true", help="True if need to use the default LDAP password. This is needed " "when running tests in strict mode.") parser.add_option("--http_path", dest="http_path", default="cliservice", help="Default http path on the coordinator to connect to. The final " "connection URL looks like <http(s)>://<coordinator-host>:<port>/" "<http_path>. While the coordinator server implementation does not " "enforce any http path for the incoming requests, deployments could " "still put it behind a loadbalancer that can expect the traffic at a " "certain path.") parser.add_option("--fetch_size", type="int", dest="fetch_size", default=10240, help="The fetch size when fetching rows from the Impala coordinator. " "The fetch size controls how many rows a single fetch RPC request " "(RPC from the Impala shell to the Impala coordinator) reads at a " "time. This option is most effective when result spooling is enabled " "('spool_query_results'=true). When result spooling is enabled " "values over the batch_size are honored. When result spooling is " "disabled, values over the batch_size have no affect. By default, " "the fetch_size is set to 10240 which is equivalent to 10 row " "batches (assuming the default batch size). Note that if result " "spooling is disabled only a single row batch can be fetched at a " "time regardless of the specified fetch_size.") parser.add_option("--http_cookie_names", dest="http_cookie_names", default="impala.auth,impala.session.id", help="A comma-separated list of HTTP cookie names that are supported " "by the impala-shell. If a cookie with one of these names is " "returned in an http response by the server or an intermediate proxy " "then it will be included in each subsequent request for the same " "connection.") # add default values to the help text for option in parser.option_list: if option.dest is not None: # option._short_opts returns a list of short options, e.g. ["-Q"]. # option._long_opts returns a list of long options, e.g. ["--query_option"]. # The code below removes the - from the short option and -- from the long option. short_opt = option._short_opts[0][1:] if len(option._short_opts) > 0 else None long_opt = option._long_opts[0][2:] if len(option._long_opts) > 0 else None # In order to set the default flag values, optparse requires the keys to be the # dest names. The default flag values are set in impala_shell_config_defaults.py and # the default flag values may contain default values that are not for flags. if short_opt in defaults: if option.dest not in defaults: defaults[option.dest] = defaults[short_opt] elif type(defaults[option.dest]) == list: defaults[option.dest].extend(defaults[short_opt]) elif long_opt in defaults: if option.dest not in defaults: defaults[option.dest] = defaults[long_opt] elif type(defaults[option.dest]) == list: defaults[option.dest].extend(defaults[long_opt]) # since the quiet flag is the same as the verbose flag # we need to make sure to print the opposite value for it # (print quiet is false since verbose is true) if option == parser.get_option('--quiet'): option.help += " [default: %s]" % (not defaults['verbose']) # print default value of disable_live_progress in the help messages as opposite # value for default value of live_progress # (print disable_live_progress is false since live_progress is true) elif option == parser.get_option('--disable_live_progress'): option.help += " [default: %s]" % (not defaults['live_progress']) elif option != parser.get_option('--help') and option.help is not SUPPRESS_HELP: # don't want to print default value for help or options without help text option.help += " [default: %default]" # mutually exclusive flags should not be used in the same time if '--live_progress' in sys.argv and '--disable_live_progress' in sys.argv: parser.error("options --live_progress and --disable_live_progress are mutually " "exclusive") if '--verbose' in sys.argv and '--quiet' in sys.argv: parser.error("options --verbose and --quiet are mutually exclusive") parser.set_defaults(**defaults) return parser
7,435
1,176
<reponame>ValtoGameEngines/Intrinsic-Engine<gh_stars>1000+ // Copyright 2017 <NAME> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Precompiled header file #include "stdafx.h" namespace Intrinsic { namespace Renderer { _INTR_ARRAY(VkSampler) Samplers::samplers; void Samplers::init() { samplers.resize(kCount); { VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.pNext = nullptr; samplerCreateInfo.flags = 0u; samplerCreateInfo.magFilter = VK_FILTER_LINEAR; samplerCreateInfo.minFilter = VK_FILTER_LINEAR; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.mipLodBias = 0.0f; samplerCreateInfo.compareEnable = false; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0f; samplerCreateInfo.maxLod = FLT_MAX; samplerCreateInfo.maxAnisotropy = 8.0f; samplerCreateInfo.anisotropyEnable = VK_TRUE; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; VkResult result = vkCreateSampler(RenderSystem::_vkDevice, &samplerCreateInfo, nullptr, &samplers[kLinearClamp]); _INTR_VK_CHECK_RESULT(result); } { VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.pNext = nullptr; samplerCreateInfo.flags = 0u; samplerCreateInfo.magFilter = VK_FILTER_LINEAR; samplerCreateInfo.minFilter = VK_FILTER_LINEAR; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.mipLodBias = 0.0f; samplerCreateInfo.compareEnable = false; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0f; samplerCreateInfo.maxLod = FLT_MAX; samplerCreateInfo.maxAnisotropy = 8.0f; samplerCreateInfo.anisotropyEnable = VK_TRUE; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; VkResult result = vkCreateSampler(RenderSystem::_vkDevice, &samplerCreateInfo, nullptr, &samplers[kLinearRepeat]); _INTR_VK_CHECK_RESULT(result); } { VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.pNext = nullptr; samplerCreateInfo.flags = 0u; samplerCreateInfo.magFilter = VK_FILTER_NEAREST; samplerCreateInfo.minFilter = VK_FILTER_NEAREST; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; samplerCreateInfo.mipLodBias = 0.0f; samplerCreateInfo.compareEnable = false; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0f; samplerCreateInfo.maxLod = FLT_MAX; samplerCreateInfo.anisotropyEnable = VK_FALSE; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; VkResult result = vkCreateSampler(RenderSystem::_vkDevice, &samplerCreateInfo, nullptr, &samplers[kNearestRepeat]); _INTR_VK_CHECK_RESULT(result); } { VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.pNext = nullptr; samplerCreateInfo.flags = 0u; samplerCreateInfo.magFilter = VK_FILTER_NEAREST; samplerCreateInfo.minFilter = VK_FILTER_NEAREST; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.mipLodBias = 0.0f; samplerCreateInfo.compareEnable = false; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0f; samplerCreateInfo.maxLod = FLT_MAX; samplerCreateInfo.anisotropyEnable = VK_FALSE; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; VkResult result = vkCreateSampler(RenderSystem::_vkDevice, &samplerCreateInfo, nullptr, &samplers[kNearestClamp]); _INTR_VK_CHECK_RESULT(result); } { VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.pNext = nullptr; samplerCreateInfo.flags = 0u; samplerCreateInfo.magFilter = VK_FILTER_LINEAR; samplerCreateInfo.minFilter = VK_FILTER_LINEAR; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.mipLodBias = 0.0f; samplerCreateInfo.compareEnable = true; samplerCreateInfo.compareOp = VK_COMPARE_OP_LESS; samplerCreateInfo.minLod = 0.0f; samplerCreateInfo.maxLod = FLT_MAX; samplerCreateInfo.anisotropyEnable = VK_FALSE; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; VkResult result = vkCreateSampler(RenderSystem::_vkDevice, &samplerCreateInfo, nullptr, &samplers[kShadow]); _INTR_VK_CHECK_RESULT(result); } } } }
2,718
376
// Copyright (C) 2020 <NAME> // This file is part of the "Nazara Engine - Graphics module" // For conditions of distribution and use, see copyright notice in Config.hpp #include <Nazara/Widgets/Widgets.hpp> #include <Nazara/Widgets/Debug.hpp> namespace Nz { /*! * \ingroup widgets * \class Nz::Widgets * \brief Widgets class that represents the module initializer of Widgets */ Widgets::Widgets(Config config) : ModuleBase("Widgets", this) { ECS::RegisterComponents(); } Widgets* Widgets::s_instance = nullptr; }
188
716
import pinocchio as pin import numpy as np model = pin.buildSampleModelHumanoid() model.lowerPositionLimit[:7] = -np.ones(7) model.upperPositionLimit[:7] = +np.ones(7) pool = pin.ModelPool(model) num_threads = pin.omp_get_max_threads() batch_size = 128 q = np.empty((model.nq,batch_size)) for k in range(batch_size): q[:,k] = pin.randomConfiguration(model) v = np.zeros((model.nv,batch_size)) a = np.zeros((model.nv,batch_size)) tau = np.zeros((model.nv,batch_size)) print("num_threads: {}".format(num_threads)) print("batch_size: {}".format(batch_size)) # Call RNEA res_rnea = np.empty((model.nv,batch_size)) pin.rnea(num_threads,pool,q,v,a,res_rnea) # Without allocation res_rnea2 = pin.rnea(num_threads,pool,q,v,a) # With allocation # Call ABA res_aba = np.empty((model.nv,batch_size)) pin.aba(num_threads,pool,q,v,tau,res_aba) # Without allocation res_aba2 = pin.aba(num_threads,pool,q,v,tau) # With allocation
378
402
# ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ from __future__ import annotations import logging from collections import OrderedDict from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, Generic, Iterable, List, Optional, Sequence, Tuple, TypeVar, Union import numpy as np import pandas as pd from more_itertools import flatten from pandas._typing import FilePathOrBuffer from sklearn.metrics import auc, log_loss, precision_recall_curve, roc_auc_score, roc_curve from InnerEye.Azure.azure_util import DEFAULT_CROSS_VALIDATION_SPLIT_INDEX from InnerEye.Common.common_util import check_properties_are_not_none from InnerEye.Common.metrics_constants import INTERNAL_TO_LOGGING_COLUMN_NAMES, LoggingColumns, MetricType, \ MetricTypeOrStr, SEQUENCE_POSITION_HUE_NAME_PREFIX from InnerEye.ML.common import ModelExecutionMode from InnerEye.ML.utils.metrics_util import binary_classification_accuracy, mean_absolute_error, \ mean_squared_error, r2_score FloatOrInt = Union[float, int] T = TypeVar('T', np.ndarray, float) MetricsPerExecutionModeAndEpoch = Dict[ModelExecutionMode, Dict[Union[int, str], 'ScalarMetricsDict']] DEFAULT_KEY = "Default" def average_metric_values(values: List[float], skip_nan_when_averaging: bool) -> float: """ Returns the average (arithmetic mean) of the values provided. If skip_nan_when_averaging is True, the mean will be computed without any possible NaN values in the list. :param values: The individual values that should be averaged. :param skip_nan_when_averaging: If True, compute mean with any NaN values. If False, any NaN value present in the argument will make the function return NaN. :return: The average of the provided values. If the argument is an empty list, NaN will be returned. """ if skip_nan_when_averaging: return np.nanmean(values).item() else: return np.mean(values).item() @dataclass(frozen=True) class PredictionEntry(Generic[T]): subject_id: str predictions: T labels: T def __post_init__(self) -> None: check_properties_are_not_none(self) def get_column_name_for_logging(metric_name: Union[str, MetricType], hue_name: Optional[str] = None) -> str: """ Computes the column name that should be used when logging a metric to disk. Raises a value error when no column name has yet been defined. :param metric_name: The name of the metric. :param hue_name: If provided will be used as a prefix hue_name/column_name """ metric_str = metric_name if isinstance(metric_name, str) else metric_name.value if metric_str in INTERNAL_TO_LOGGING_COLUMN_NAMES: return get_metric_name_with_hue_prefix(INTERNAL_TO_LOGGING_COLUMN_NAMES[metric_str].value, hue_name) raise ValueError(f"No column name mapping defined for metric '{metric_str}'") def get_metric_name_with_hue_prefix(metric_name: str, hue_name: Optional[str] = None) -> str: """ If hue_name is provided and is not equal to the default hue then it will be used as a prefix hue_name/column_name, otherwise metric_name will be returned. """ prefix = f"{hue_name}/" if hue_name and hue_name is not MetricsDict.DEFAULT_HUE_KEY else '' return f"{prefix}{metric_name}" @dataclass class Hue: """ Dataclass to encapsulate hue specific data related for metrics computation. """ name: str values: Dict[str, List[FloatOrInt]] = field(default_factory=dict) predictions: List[np.ndarray] = field(default_factory=list) labels: List[np.ndarray] = field(default_factory=list) subject_ids: List[str] = field(default_factory=list) @property def has_prediction_entries(self) -> bool: """ Returns True if the present object stores any entries for computing the Area Under Roc Curve metric. """ _labels = self.labels return len(_labels) > 0 if _labels else False def add_predictions(self, subject_ids: Sequence[str], predictions: np.ndarray, labels: np.ndarray) -> None: """ Adds predictions and labels for later computing the area under the ROC curve. :param subject_ids: Subject ids associated with the predictions and labels. :param predictions: A numpy array with model predictions, of size [N x C] for N samples in C classes, or size [N x 1] or size [N] for binary. :param labels: A numpy array with labels, of size [N x C] for N samples in C classes, or size [N x 1] or size [N] for binary. """ if predictions.ndim == 1: predictions = np.expand_dims(predictions, axis=1) if labels.ndim == 1: labels = np.expand_dims(labels, axis=1) if not (len(predictions) == len(labels) == len(subject_ids)): raise ValueError("predictions, labels and subject_ids must have the same length in dimension 0 " f"found predictions={len(predictions)}, labels={len(labels)}, " f"and subject_ids={len(subject_ids)}") self.subject_ids += subject_ids self.predictions.append(predictions) self.labels.append(labels) def get_predictions(self) -> np.ndarray: """ Return a concatenated copy of the roc predictions stored internally. """ return Hue._concat_if_needed(self.predictions) def get_labels(self) -> np.ndarray: """ Return a concatenated copy of the roc labels stored internally. """ return Hue._concat_if_needed(self.labels) def get_predictions_and_labels_per_subject(self) -> List[PredictionEntry[float]]: """ Gets the per-subject predictions that are stored in the present object. """ predictions = self.get_predictions() labels = self.get_labels() if not (len(self.subject_ids) == len(labels) == len(predictions)): raise ValueError(f"Inconsistent number of predictions stored: " f"{len(self.subject_ids)} subjects, " f"{len(labels)} labels, " f"{len(predictions)} predictions.") return [PredictionEntry(subject_id=x, predictions=predictions[i][0], labels=labels[i][0]) for i, x in enumerate(self.subject_ids)] @staticmethod def _concat_if_needed(arrays: List[np.ndarray]) -> np.ndarray: """ Joins a list of arrays into a single array, taking empty lists into account correctly. :param arrays: Array list to be concatenated. """ if arrays: return np.concatenate(arrays, axis=0) return np.array([]) def enumerate_single_values(self) -> Iterable[Tuple[str, float]]: """ Returns an iterator that contains all (metric name, metric value) tuples that are stored in the present object. The method assumes that there is exactly 1 metric value stored per name, and throws a ValueError if that is not the case. :return: An iterator with (metric name, metric value) pairs. """ for metric_name, metric_value in self.values.items(): if len(metric_value) == 1: yield metric_name, metric_value[0] else: raise ValueError(f"Expected that all metrics lists only hold 1 item, " f"but got this list for Hue {self.name} : metric " f"'{metric_name}': {metric_value}") class MetricsDict: """ This class helps aggregate an arbitrary number of metrics across multiple batches or multiple samples. Metrics are identified by a string name. Metrics can have further hues which are isolated metrics records, and can be used for cases such as different anatomical structures, where we might want to maintain separate metrics for each structure, to perform independent aggregations. """ DEFAULT_HUE_KEY = DEFAULT_KEY # the columns used when metrics dict is converted to a data frame/string representation DATAFRAME_COLUMNS = [LoggingColumns.Hue.value, "metrics"] def __init__(self, hues: Optional[List[str]] = None, is_classification_metrics: bool = True) -> None: """ :param hues: Supported hues for this metrics dict, otherwise all records will belong to the default hue. :param is_classification_metrics: If this is a classification metrics dict """ _hues = hues.copy() if hues else None if _hues and MetricsDict.DEFAULT_HUE_KEY in _hues: _hues.remove(MetricsDict.DEFAULT_HUE_KEY) self.hues_without_default = _hues or [] _hue_keys = self.hues_without_default + [MetricsDict.DEFAULT_HUE_KEY] self.hues: OrderedDict[str, Hue] = OrderedDict([(x, Hue(name=x)) for x in _hue_keys]) self.skip_nan_when_averaging: Dict[str, bool] = dict() self.row_labels: List[str] = list() self.is_classification_metrics = is_classification_metrics self.diagnostics: Dict[str, List[Any]] = dict() def subject_ids(self, hue: str = DEFAULT_HUE_KEY) -> List[str]: """ Return the subject ids that have metrics associated with them in this dictionary. :param hue: If provided then subject ids belonging to this hue only will be returned. Otherwise subject ids for the default hue will be returned. """ return self._get_hue(hue=hue).subject_ids def get_hue_names(self, include_default: bool = True) -> List[str]: """ Returns all of the hues supported by this metrics dict :param include_default: Include the default hue if True, otherwise exclude the default hue. """ _hue_names = list(self.hues.keys()) if not include_default: _hue_names.remove(MetricsDict.DEFAULT_HUE_KEY) return _hue_names def delete_hue(self, hue: str) -> None: """ Removes all data stored for the given hue from the present object. :param hue: The hue to remove. """ del self.hues[hue] def get_single_metric(self, metric_name: MetricTypeOrStr, hue: str = DEFAULT_HUE_KEY) -> FloatOrInt: """ Gets the value stored for the given metric. The method assumes that there is a single value stored for the metric, and raises a ValueError if that is not the case. :param metric_name: The name of the metric to retrieve. :param hue: The hue to retrieve the metric from. :return: """ name = MetricsDict._metric_name(metric_name) values = self.values(hue)[name] if len(values) == 1: return values[0] raise ValueError(f"Expected a single entry for metric '{name}', but got {len(values)}") def has_prediction_entries(self, hue: str = DEFAULT_HUE_KEY) -> bool: """ Returns True if the present object stores any entries for computing the Area Under Roc Curve metric. :param hue: will be used to check a particular hue otherwise default hue will be used. :return: True if entries exist. False otherwise. """ return self._get_hue(hue).has_prediction_entries def values(self, hue: str = DEFAULT_HUE_KEY) -> Dict[str, Any]: """ Returns values held currently in the dict :param hue: will be used to restrict values for the provided hue otherwise values in the default hue will be returned. :return: Dictionary of values for this object. """ return self._get_hue(hue).values def add_diagnostics(self, name: str, value: Any) -> None: """ Adds a diagnostic value to the present object. Multiple diagnostics can be stored per unique value of name, the values get concatenated. :param name: The name of the diagnostic value to store. :param value: The value to store. """ if name in self.diagnostics: # There is already an entry, append to the end of the list self.diagnostics[name].append(value) else: self.diagnostics[name] = [value] @staticmethod def _metric_name(metric_name: MetricTypeOrStr) -> str: """ Converts a metric name, given either as an enum or a string, to a string. """ if isinstance(metric_name, MetricType): return metric_name.value return str(metric_name) def add_metric(self, metric_name: Union[str, MetricType], metric_value: FloatOrInt, skip_nan_when_averaging: bool = False, hue: str = DEFAULT_HUE_KEY) -> None: """ Adds values for a single metric to the present object, when the metric value is a scalar. :param metric_name: The name of the metric to add. This can be a string or a value in the MetricType enum. :param metric_value: The values of the metric, as a float or integer. :param skip_nan_when_averaging: If True, averaging this metric will skip any NaN (not a number) values. If False, NaN will propagate through the mean computation. :param hue: The hue for which this record belongs to, default hue will be used if None provided. """ _metric_name = MetricsDict._metric_name(metric_name) if isinstance(metric_value, (float, int)): _values = self._get_hue(hue).values if _metric_name in _values: # There is already an entry for this metric, append to the end of the list _values[_metric_name].append(metric_value) else: _values[_metric_name] = [metric_value] else: raise ValueError(f"Expected the metric to be a scalar (float or int), but got: {type(metric_value)}") self.skip_nan_when_averaging[_metric_name] = skip_nan_when_averaging def delete_metric(self, metric_name: Union[str, MetricType], hue: str = DEFAULT_HUE_KEY) -> None: """ Deletes all values that are stored for a given metric from the present object. :param metric_name: The name of the metric to add. This can be a string or a value in the MetricType enum. :param hue: The hue for which this record belongs to, default hue will be used if None provided. """ _metric_name = MetricsDict._metric_name(metric_name) del self._get_hue(hue).values[_metric_name] def add_predictions(self, subject_ids: Sequence[str], predictions: np.ndarray, labels: np.ndarray, hue: str = DEFAULT_HUE_KEY) -> None: """ Adds predictions and labels for later computing the area under the ROC curve. :param subject_ids: Subject ids associated with the predictions and labels. :param predictions: A numpy array with model predictions, of size [N x C] for N samples in C classes, or size [N x 1] or size [N] for binary. :param labels: A numpy array with labels, of size [N x C] for N samples in C classes, or size [N x 1] or size [N] for binary. :param hue: The hue this prediction belongs to, default hue will be used if None provided. """ self._get_hue(hue).add_predictions(subject_ids=subject_ids, labels=labels, predictions=predictions) def num_entries(self, hue: str = DEFAULT_HUE_KEY) -> Dict[str, int]: """ Gets the number of values that are stored for each individual metric. :param hue: The hue to count entries for, otherwise all entries will be counted. :return: A dictionary mapping from metric name to number of values stored. """ _values = self._get_hue(hue).values return {m: len(v) for m, v in _values.items()} def average(self, add_metrics_from_entries: bool = False, across_hues: bool = True) -> MetricsDict: """ Returns a MetricsDict object that only contains the per-metric averages (arithmetic mean) from the present object. Computing the average will respect the skip_nan_when_averaging value that has been provided when adding the metric. :param add_metrics_from_entries: average existing metrics in the dict. :param across_hues: If True then same metric types will be averaged regardless of hues, otherwise separate averages for each metric type for each hue will be computed, Default is True. :return: A MetricsDict object with a single-item list for each of the metrics. """ def _get_all_metrics() -> List[Tuple[str, str, Any]]: _all_values = {} for _hue in self.get_hue_names(): _values = self.values(_hue) if self.has_prediction_entries(_hue): if self.is_classification_metrics: _values[MetricType.AREA_UNDER_ROC_CURVE.value] = [self.get_roc_auc(_hue)] _values[MetricType.AREA_UNDER_PR_CURVE.value] = [self.get_pr_auc(_hue)] # Add metrics at optimal cut-off optimal_threshold, fpr, fnr, accuracy = self.get_metrics_at_optimal_cutoff(_hue) _values[MetricType.ACCURACY_AT_OPTIMAL_THRESHOLD.value] = [accuracy] _values[MetricType.FALSE_POSITIVE_RATE_AT_OPTIMAL_THRESHOLD.value] = [fpr] _values[MetricType.FALSE_NEGATIVE_RATE_AT_OPTIMAL_THRESHOLD.value] = [fnr] _values[MetricType.OPTIMAL_THRESHOLD.value] = [optimal_threshold] if add_metrics_from_entries: if MetricType.CROSS_ENTROPY.value in _values: raise ValueError( "Unable to add cross entropy because this metric is already present in the dict.") else: _values[MetricType.CROSS_ENTROPY.value] = [self.get_cross_entropy(_hue)] _values[MetricType.ACCURACY_AT_THRESHOLD_05.value] = [self.get_accuracy_at05(_hue)] else: if add_metrics_from_entries: _values[MetricType.MEAN_ABSOLUTE_ERROR.value] = [self.get_mean_absolute_error(_hue)] _values[MetricType.MEAN_SQUARED_ERROR.value] = [self.get_mean_squared_error(_hue)] _values[MetricType.EXPLAINED_VAR.value] = [self.get_r2_score(_hue)] _values[MetricType.SUBJECT_COUNT.value] = [len(self.get_predictions(_hue))] _all_values[_hue] = _values # noinspection PyTypeChecker return list( flatten([list(map(lambda x: (k, *x), v.items())) for k, v in _all_values.items()])) # type: ignore def _fill_new_metrics_dict(m: MetricsDict, average: bool = False) -> MetricsDict: for _m_hue, _m_metric_name, _m_value in _get_all_metrics(): skip_nan = self.skip_nan_when_averaging.get(_m_metric_name, False) # type: ignore if average: m.add_metric(_m_metric_name, average_metric_values(_m_value, skip_nan_when_averaging=skip_nan), hue=_m_hue) else: for _v in _m_value: m.add_metric(_m_metric_name, _v, skip_nan_when_averaging=skip_nan) return m if across_hues: return _fill_new_metrics_dict(MetricsDict()).average(across_hues=False) else: return _fill_new_metrics_dict(MetricsDict(hues=self.get_hue_names(include_default=False)), average=True) def get_accuracy_at05(self, hue: str = DEFAULT_HUE_KEY) -> float: """ Returns the binary classification accuracy at threshold 0.5 """ return binary_classification_accuracy(model_output=self.get_predictions(hue=hue), label=self.get_labels(hue=hue)) @classmethod def get_optimal_idx(cls, fpr: np.ndarray, tpr: np.ndarray) -> np.ndarray: """ Given a list of FPR and TPR values corresponding to different thresholds, compute the index which corresponds to the optimal threshold. """ optimal_idx = np.argmax(tpr - fpr) return optimal_idx def get_metrics_at_optimal_cutoff(self, hue: str = DEFAULT_HUE_KEY) -> Tuple: """ Computes the ROC to find the optimal cut-off i.e. the probability threshold for which the difference between true positive rate and false positive rate is smallest. Then, computes the false positive rate, false negative rate and accuracy at this threshold (i.e. when the predicted probability is higher than the threshold the predicted label is 1 otherwise 0). :param hue: The hue to restrict the values used for computation, otherwise all values will be used. :returns: Tuple(optimal_threshold, false positive rate, false negative rate, accuracy) """ fpr, tpr, thresholds = roc_curve(self.get_labels(hue=hue), self.get_predictions(hue=hue)) optimal_idx = MetricsDict.get_optimal_idx(fpr=fpr, tpr=tpr) optimal_threshold = float(thresholds[optimal_idx]) accuracy = binary_classification_accuracy(model_output=self.get_predictions(hue=hue), label=self.get_labels(hue=hue), threshold=optimal_threshold) false_negative_optimal = 1 - tpr[optimal_idx] false_positive_optimal = fpr[optimal_idx] return optimal_threshold, false_positive_optimal, false_negative_optimal, accuracy def get_roc_auc(self, hue: str = DEFAULT_HUE_KEY) -> float: """ Computes the Area Under the ROC curve, from the entries that were supplied in the add_roc_entries method. :param hue: The hue to restrict the values used for computation, otherwise all values will be used. :return: The AUC score, or np.nan if no entries are available in the present object. """ if not self.has_prediction_entries(hue): return np.nan predictions = self.get_predictions(hue) labels = self.get_labels(hue) if predictions.shape[1] == 1 and labels.shape[1] == 1 and len(np.unique(labels)) == 1: # We are dealing with a binary classification problem, but there is only a single class present # in the data: This happens occasionaly in test data. Return 1.0 because in such cases we could # always get a classifier threshold that correctly classifies everything. return 1.0 else: return roc_auc_score(labels, predictions) def get_pr_auc(self, hue: str = DEFAULT_HUE_KEY) -> float: """ Computes the Area Under the Precision Recall Curve, from the entries that were supplied in the add_roc_entries method. :param hue: The hue to restrict the values used for computation, otherwise all values will be used. :return: The PR AUC score, or np.nan if no entries are available in the present object. """ if not self.has_prediction_entries(hue): return np.nan predictions = self.get_predictions(hue) labels = self.get_labels(hue) if predictions.shape[1] == 1 and labels.shape[1] == 1 and len(np.unique(labels)) == 1: # We are dealing with a binary classification problem, but there is only a single class present # in the data: This happens occasionaly in test data. Return 1.0 because in such cases we could # always get a classifier threshold that correctly classifies everything. return 1.0 precision, recall, _ = precision_recall_curve(labels, predictions) return auc(recall, precision) def get_cross_entropy(self, hue: str = DEFAULT_HUE_KEY) -> float: """ Computes the binary cross entropy from the entries that were supplied in the add_roc_entries method. :param hue: The hue to restrict the values used for computation, otherwise all values will be used. :return: The cross entropy score. """ predictions = self.get_predictions(hue) labels = self.get_labels(hue) return log_loss(labels, predictions, labels=[0, 1]) def get_mean_absolute_error(self, hue: str = DEFAULT_HUE_KEY) -> float: """ Get the mean absolute error. :param hue: The hue to restrict the values used for computation, otherwise all values will be used. :return: Mean absolute error. """ return mean_absolute_error(model_output=self.get_predictions(hue), label=self.get_labels(hue)) def get_mean_squared_error(self, hue: str = DEFAULT_HUE_KEY) -> float: """ Get the mean squared error. :param hue: The hue to restrict the values used for computation, otherwise all values will be used. :return: Mean squared error """ return mean_squared_error(model_output=self.get_predictions(hue), label=self.get_labels(hue)) def get_r2_score(self, hue: str = DEFAULT_HUE_KEY) -> float: """ Get the R2 score. :param hue: The hue to restrict the values used for computation, otherwise all values will be used. :return: R2 score """ return r2_score(model_output=self.get_predictions(hue), label=self.get_labels(hue)) def enumerate_single_values(self, hue: Optional[str] = None) -> Iterable[Tuple[str, str, float]]: """ Returns an iterator that contains all (hue name, metric name, metric values) tuples that are stored in the present object. This method assumes that for each hue/metric combination there is exactly 1 value, and it throws an exception if that is more than 1 value. :param hue: The hue to restrict the values, otherwise all values will be used if set to None. :return: An iterator with (hue name, metric name, metric values) pairs. """ for _hue, metric_name, values in self._enumerate_values(hue=hue, ensure_singleton_values_only=True): yield _hue, metric_name, values[0] def _enumerate_values(self, hue: Optional[str] = None, ensure_singleton_values_only: bool = False) \ -> Iterable[Tuple[str, str, List[float]]]: """ Returns an iterator that contains all (hue name, metric name, metric values) tuples that are stored in the present object. :param hue: The hue to restrict the values, otherwise all values will be used if set to None. :param ensure_singleton_values_only: Ensure that each of the values return is a singleton. :return: An iterator with (hue name, metric name, metric values) pairs. """ _hues_to_iterate = [hue] if hue is not None else self.get_hue_names() for _hue in _hues_to_iterate: _values = self._get_hue(_hue).values for metric_name, metric_value in _values.items(): if ensure_singleton_values_only and len(metric_value) != 1: raise ValueError(f"Expected that all metrics lists only hold 1 item, " f"but got this list for Hue {_hue} : metric " f"'{metric_name}': {metric_value}") yield _hue, metric_name, metric_value def enumerate_single_values_groupwise(self) -> Iterable[Tuple[str, Iterable[Tuple[str, float]]]]: """ Returns an iterator that contains (hue name, metric_name_and_value) tuples that are stored in the present object. The second tuple element is again an iterator that returns all metric name and value tuples that are stored for that specific hue. This method assumes that for each hue/metric combination there is exactly 1 value, and it throws an exception if that is more than 1 value. :return: An iterator with (hue name, metric_name_and_value) pairs. """ _hues_to_iterate = [MetricsDict.DEFAULT_HUE_KEY] + self.get_hue_names(include_default=False) for _hue in _hues_to_iterate: yield _hue, self._get_hue(_hue).enumerate_single_values() def get_predictions(self, hue: str = DEFAULT_HUE_KEY) -> np.ndarray: """ Return a concatenated copy of the roc predictions stored internally. :param hue: The hue to restrict the values, otherwise all values will be used. :return: concatenated roc predictions as np array """ return self._get_hue(hue).get_predictions() def get_labels(self, hue: str = DEFAULT_HUE_KEY) -> np.ndarray: """ Return a concatenated copy of the roc labels stored internally. :param hue: The hue to restrict the values, otherwise all values will be used. :return: roc labels as np array """ return self._get_hue(hue).get_labels() def get_predictions_and_labels_per_subject(self, hue: str = DEFAULT_HUE_KEY) \ -> List[PredictionEntry[float]]: """ Gets the per-subject labels and predictions that are stored in the present object. :param hue: The hue to restrict the values, otherwise the default hue will be used. :return: List of per-subject labels and predictions """ return self._get_hue(hue).get_predictions_and_labels_per_subject() def to_string(self, tabulate: bool = True) -> str: """ Creates a multi-line human readable string from the given metrics. :param tabulate: If True then create a pretty printable table string. :return: Formatted metrics string """ from InnerEye.ML.utils.io_util import tabulate_dataframe df = self.to_data_frame() return tabulate_dataframe(df) if tabulate else df.to_string(index=False) def to_data_frame(self) -> pd.DataFrame: """ Creates a data frame representation of the metrics dict in the format with the Hue name as a column and a string representation of all metrics for that hue as a second column. """ def _format_metric_values(x: Union[List[float], float]) -> str: x = [x] if isinstance(x, float) else x _x = [f"{y:0.4f}" for y in x] return str(_x[0] if len(_x) == 1 else _x) info_df = pd.DataFrame(columns=MetricsDict.DATAFRAME_COLUMNS) for hue in self.get_hue_names(): info_list = [f"{metric_name}: {_format_metric_values(metric_values)}" for _, metric_name, metric_values in self._enumerate_values(hue=hue)] if info_list: info_list_str = ", ".join(info_list) info_df = info_df.append({MetricsDict.DATAFRAME_COLUMNS[0]: hue, MetricsDict.DATAFRAME_COLUMNS[1]: info_list_str}, ignore_index=True) return info_df def _get_hue(self, hue: str = DEFAULT_HUE_KEY) -> Hue: """ Get the hue record for the provided key. Raises a KeyError if the provided hue key does not exist. :param hue: The hue to retrieve record for """ if hue not in self.hues: raise KeyError(f"Unknown hue '{hue}' provided, key value must be one of {self.hues.keys()}") else: return self.hues[hue] class ScalarMetricsDict(MetricsDict): """ Specialization of the MetricsDict with Classification related functions. """ def __init__(self, hues: Optional[List[str]] = None, is_classification_metrics: bool = True) -> None: super().__init__(hues, is_classification_metrics=is_classification_metrics) def binary_classification_accuracy(self, hue: str = MetricsDict.DEFAULT_HUE_KEY) -> float: """ :param hue: The hue to restrict the values, otherwise all values will be used. :return: binary classification accuracy """ return binary_classification_accuracy(model_output=self.get_predictions(hue=hue), label=self.get_labels(hue=hue)) def store_metrics_per_subject(self, df_logger: DataframeLogger, mode: ModelExecutionMode, epoch: Union[int, str], cross_validation_split_index: int = DEFAULT_CROSS_VALIDATION_SPLIT_INDEX) -> None: """ Store metrics using the provided df_logger at subject level for classification models. :param df_logger: A data frame logger to use to write the metrics to disk. :param mode: Model execution mode these metrics belong to. :param cross_validation_split_index: cross validation split index for the epoch if performing cross val :return: """ for hue in self.get_hue_names(): for prediction_entry in self.get_predictions_and_labels_per_subject(hue=hue): df_logger.add_record({ LoggingColumns.Hue.value: hue, LoggingColumns.Patient.value: prediction_entry.subject_id, LoggingColumns.ModelOutput.value: prediction_entry.predictions, LoggingColumns.Label.value: prediction_entry.labels, LoggingColumns.Epoch.value: epoch, LoggingColumns.CrossValidationSplitIndex.value: cross_validation_split_index, LoggingColumns.DataSplit.value: mode.value }) @staticmethod def load_execution_mode_metrics_from_df(df: pd.DataFrame, is_classification_metrics: bool) -> MetricsPerExecutionModeAndEpoch: """ Helper function to create BinaryClassificationMetricsDict grouped by ModelExecutionMode and epoch from a given dataframe. The following columns must exist in the provided data frame: >>> LoggingColumns.DataSplit >>> LoggingColumns.Epoch :param df: DataFrame to use for creating the metrics dict. :param is_classification_metrics: If the current metrics are for classification or not. """ has_hue_column = LoggingColumns.Hue.value in df group_columns = [LoggingColumns.DataSplit.value, LoggingColumns.Epoch.value] if has_hue_column: group_columns.append(LoggingColumns.Hue.value) grouped = df.groupby(group_columns) result: MetricsPerExecutionModeAndEpoch = dict() hues = [] if has_hue_column: hues = [h for h in df[LoggingColumns.Hue.value].unique() if h] for name, group in grouped: if has_hue_column: mode_str, epoch, hue = name else: mode_str, epoch = name hue = MetricsDict.DEFAULT_HUE_KEY mode = ModelExecutionMode(mode_str) if mode not in result: result[mode] = dict() if epoch not in result[mode]: result[mode][epoch] = ScalarMetricsDict(is_classification_metrics=is_classification_metrics, hues=hues) subjects = list(group[LoggingColumns.Patient.value].values) predictions = group[LoggingColumns.ModelOutput.value].to_numpy(dtype=np.float) labels = group[LoggingColumns.Label.value].to_numpy(dtype=np.float) result[mode][epoch].add_predictions(subjects, predictions, labels, hue=hue) return result @staticmethod def aggregate_and_save_execution_mode_metrics( metrics: MetricsPerExecutionModeAndEpoch, data_frame_logger: DataframeLogger, log_info: bool = True) -> None: """ Given metrics dicts for execution modes and epochs, compute the aggregate metrics that are computed from the per-subject predictions. The metrics are written to the dataframe logger with the string labels (column names) taken from the `MetricType` enum. :param metrics: Mapping between epoch and subject level metrics :param data_frame_logger: DataFrame logger to write to and flush :param log_info: If True then log results as an INFO string to the default logger also. :return: """ for mode, epoch_metrics in metrics.items(): for epoch, metrics_dict in epoch_metrics.items(): # Compute the aggregate metrics using the .average method of the dictionary, # to ensure that we are averaging over the same metrics that would be written in training. averaged = metrics_dict.average(add_metrics_from_entries=True, across_hues=False) for hue, values_within_hue in averaged.enumerate_single_values_groupwise(): record: Dict[str, Any] = { LoggingColumns.Hue.value: hue, } has_any_values = False for key, value in values_within_hue: has_any_values = True value_str = str(value) if isinstance(value, int) else f"{value:0.5f}" metric_name = get_column_name_for_logging(key) record[metric_name] = value_str # Do not create a row at all if there are no metrics in a particular hue. This could happen # for example when using multi-step RNN, where no data is in the default hue. if has_any_values: # Add epoch last to more easily navigate visually record[LoggingColumns.DataSplit.value] = mode.value record[LoggingColumns.Epoch.value] = epoch data_frame_logger.add_record(record) # save results to disk data_frame_logger.flush(log_info=log_info) class SequenceMetricsDict(ScalarMetricsDict): """ Specialization of the MetricsDict with Sequence related functions. """ def __init__(self, hues: Optional[List[str]] = None, is_classification_metrics: bool = True) -> None: super().__init__(hues, is_classification_metrics=is_classification_metrics) @staticmethod def create(is_classification_model: bool, sequence_target_positions: List[int]) -> SequenceMetricsDict: # Create labels for the different prediction target positions that give numerically increasing positions # when using string sorting hues = [SequenceMetricsDict.get_hue_name_from_target_index(p) for p in sequence_target_positions] return SequenceMetricsDict(hues=hues, is_classification_metrics=is_classification_model) @staticmethod def get_hue_name_from_target_index(target_index: int) -> str: """ Creates a metrics hue name for sequence models, from a target index. For a sequence model that predicts at index 7, the hue name would be "Seq_pos 07" """ return f"{SEQUENCE_POSITION_HUE_NAME_PREFIX} {target_index:02}" @staticmethod def get_target_index_from_hue_name(hue_name: str) -> int: """ Extracts a sequence target index from a metrics hue name. For example, from metrics hue "Seq_pos 07", it would return 7. :param hue_name: hue name containing sequence target index """ if hue_name.startswith(SEQUENCE_POSITION_HUE_NAME_PREFIX): try: return int(hue_name[len(SEQUENCE_POSITION_HUE_NAME_PREFIX):]) except: pass raise ValueError(f"Unable to extract target index from this string: {hue_name}") class DataframeLogger: """ Single DataFrame logger for logging to CSV file """ def __init__(self, csv_path: FilePathOrBuffer, fixed_columns: Optional[Dict[str, Any]] = None): self.csv_path = csv_path self.fixed_columns = fixed_columns or {} self.records: List[Dict[str, Any]] = [] def add_record(self, record: Dict[str, Any]) -> None: self.records.append({**record, **self.fixed_columns}) def flush(self, log_info: bool = False) -> None: """ Save the internal records to a csv file. :param log_info: If true, write the final dataframe also to logging.info. """ import pandas as pd if isinstance(self.csv_path, Path): self.csv_path.parent.mkdir(parents=True, exist_ok=True) # Specifying columns such that the order in which columns appear matches the order in which # columns were added in the code. columns = self.records[0].keys() if len(self.records) > 0 else None df = pd.DataFrame.from_records(self.records, columns=columns) special_formatting = { MetricType.LEARNING_RATE.value: ".6e", } for column, column_format in special_formatting.items(): if column in df: column_format = "{0:" + column_format + "}" df[column] = df[column].map(lambda x: column_format.format(x)) df.to_csv(self.csv_path, sep=',', mode='w', index=False, float_format="%.6f") if log_info: s = df.to_string(index=False, float_format="%.6f") logging.info(f"\n{s}")
17,813
7,581
<reponame>dinhtuyen/PRML01<filename>test/nn/math/divide.py import unittest import numpy as np from prml import nn class TestDivide(unittest.TestCase): def test_divide(self): x = nn.Parameter(10.) z = x / 2 self.assertEqual(z.value, 5) z.backward() self.assertEqual(x.grad, 0.5) x = np.random.rand(5, 10, 3) y = np.random.rand(10, 1) p = nn.Parameter(y) z = x / p self.assertTrue((z.value == x / y).all()) z.backward(np.ones((5, 10, 3))) d = np.sum(-x / y ** 2, axis=0).sum(axis=1, keepdims=True) self.assertTrue((p.grad == d).all()) if __name__ == '__main__': unittest.main()
353
5,857
<gh_stars>1000+ import json import scrapy import re from datetime import datetime import pandas as pd import time from common.util import get_13_time t = get_13_time() from liepinSpecialComJob.items import LiepinspecialcomjobItem class LiepinSpdier(scrapy.Spider): name = 'liepin' start_urls = ['https://vip.liepin.com/883905/1405577359643.shtml', 'https://vip.liepin.com/8161070/joblist.shtml', # 'http://maker.haier.net/custompage/socialchannel/index.html?platformcode=lp', 'https://vip.liepin.com/7855333/joblist.shtml', 'https://vip.liepin.com/8090130/1409730340536.shtml', 'https://vip.liepin.com/8399212/joblist.shtml', 'https://vip.liepin.com/1198424/joblist2.shtml', 'https://vip.liepin.com/8787971/joblist.shtml', 'https://vip.liepin.com/8796178/joblist2.shtml', 'https://vip.liepin.com/8091337/1426475303042.shtml', 'https://vip.liepin.com/7904788/job.shtml', ] def parse(self, response): text = response.text company_name = re.search(r'<title>(.*?) - 猎聘网招聘官网',text).group(1) companyId=re.search(r'CONFIG={"companyId":"([0-9]+)"}',text).group(1) next_meta = response.meta data = pd.read_csv('G:\workspace\y2019m01\/first_lagou\company300.csv', encoding='gbk') try: for i in range(len(data)): n = 0 for j in data.loc[i, '股票简称']: if j in company_name: n += 1 if n == len(data.loc[i, '股票简称']): next_meta['ticker'] = data.loc[i, '股票代码'] print(n, next_meta['ticker'], company_name) except BaseException as e: next_meta['ticker'] ='None' print('ticker匹配错误') next_meta['company_name'] = company_name next_meta['companyId'] = companyId url='https://www.liepin.com/ajaxproxy.html' # headers={ # 'Referer':'https://vip.liepin.com/8091337/1426475303042.shtml' # } yield scrapy.Request(url, callback=self.parse_list, meta=next_meta,dont_filter=True) def parse_list(self,response): next_meta = response.meta companyId = next_meta['companyId'].strip() # print(companyId,response.text) n=0 while n<95: # try: t = get_13_time() # 'https://www.liepin.com/company/sojob.json?pageSize=15&curPage=0&ecompIds=8091337&dq=&publishTime=&keywords=&_=1550383073951' url=f'https://www.liepin.com/company/sojob.json?pageSize=15&curPage={n}&ecompIds={companyId}&dq=&publishTime=&keywords=&_={t}' n+=1 headers={ 'referer':'https://www.liepin.com/ajaxproxy.html' } cookies={ '__uuid': '1550017147980.22', '_uuid': 'E4361B46FFA8441973EC46E6488BD983', 'is_lp_user': 'true', 'need_bind_tel': 'false', 'new_user': 'false', 'c_flag': 'f57e19ed294147b87179e4e6132477f5', 'imClientId': '<KEY>', 'imId': '45e417dd37f82ac6a36687782a0c1c67', 'imClientId_0': '<KEY>', 'imId_0': '45e417dd37f82ac6a36687782a0c1c67', 'gr_user_id': '374534ce-aa54-4880-88ca-7a7bb7adf340', 'bad1b2d9162fab1f80dde1897f7a2972_gr_last_sent_cs1': '463d81f04fd219c61a667e00ad0d9493', 'grwng_uid': 'f3fda8f8-0c2e-4f29-8507-f42f7a9671ec', 'fe_work_exp_add': 'true', 'ADHOC_MEMBERSHIP_CLIENT_ID1.0': 'fa804ff0-2a02-3f31-8dcb-8e13b527dfcb', 'bad1b2d9162fab1f80dde1897f7a2972_gr_cs1': '463d81f04fd219c61a667e00ad0d9493', '__tlog': '1550383052778.97%7C00000000%7C00000000%7C00000000%7C00000000', '_mscid': '00000000', 'Hm_lvt_a2647413544f5a04f00da7eee0d5e200': '1550233873,1550279247,1550281552,1550383053', 'abtest': '0', '_fecdn_': '0', '__session_seq': '2', '__uv_seq': '2', 'Hm_lpvt_a2647413544f5a04f00da7eee0d5e200': '1550383074' } next_meta['ticker'] = next_meta['ticker'] print(next_meta['ticker']) next_meta['company_name'] = next_meta['company_name'] print(next_meta['company_name']) yield scrapy.Request(url, callback=self.parse_job,meta=next_meta,headers=headers,cookies=cookies) # except BaseException as e: # print('已完成最后一页') # break def parse_job(self,response): meta = response.meta item = LiepinspecialcomjobItem() text = response.text print('****************************************') json_data = json.loads(text) as_of_date = datetime.now() job_infos=json_data['list'] for job_info in job_infos: origin_site=job_info['url'] job_name=job_info['title'] salary=job_info['salary'] city=job_info['city'] education=job_info['eduLevel'] work_year=job_info['workYear'] pub_time=job_info['time'] function=job_info['dept'] item['ticker'] = meta['ticker'].strip() item['company_name'] = meta['company_name'].strip() item['job_name']=job_name item['salary']=salary item['city']=city item['education']=education item['work_year']=work_year item['pub_time']=pub_time item['as_of_date']=as_of_date item['function']=function item['origin_site']=origin_site yield item #暂不深挖 # for url in origin_sites: # yield scrapy.Request(url, callback=self.parse_job)
3,393
778
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // license: HDF5Application/license.txt // // Main author: <NAME>, https://github.com/msandre // /** @file factor_elements_and_conditions_utility.h * @brief Methods for sorting elements and conditions into homogeneous containers by their type. */ #if !defined(KRATOS_FACTOR_ELEMENTS_AND_CONDITIONS_UTILITY_H_INCLUDED) #define KRATOS_FACTOR_ELEMENTS_AND_CONDITIONS_UTILITY_H_INCLUDED // System includes #include <vector> #include <string> // External includes // Project includes #include "includes/element.h" #include "includes/condition.h" #include "includes/indexed_object.h" #include "containers/pointer_vector_set.h" namespace Kratos { typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType; typedef PointerVectorSet<Condition, IndexedObject> ConditionsContainerType; /// Factor a collection of elements into uniform containers. /** * Each container consists of all elements of a single type. In MPI, * the sequence of element containers is global (i.e., the ith container on * each process corresponds to the same element type), but the contents of * each container on a process are local. If a process has no local elements * corresponding to the ith container, it is empty. */ std::vector<ElementsContainerType> FactorElements(ElementsContainerType const& rElements); void FactorElements(ElementsContainerType const& rElements, std::vector<std::string>& rNames, std::vector<ElementsContainerType>& rFactoredElements); std::vector<ConditionsContainerType> FactorConditions(ConditionsContainerType const& rConditions); void FactorConditions(ConditionsContainerType const& rConditions, std::vector<std::string>& rNames, std::vector<ConditionsContainerType>& rFactoredConditions); } // namespace Kratos. #endif // KRATOS_FACTOR_ELEMENTS_AND_CONDITIONS_UTILITY_H_INCLUDED defined
763
14,668
// Copyright (c) 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/test/chromedriver/chrome/devtools_endpoint.h" #include "chrome/test/chromedriver/chrome/devtools_http_client.h" #include "chrome/test/chromedriver/net/net_util.h" DevToolsEndpoint::DevToolsEndpoint(int port) : DevToolsEndpoint(NetAddress(port)) {} DevToolsEndpoint::DevToolsEndpoint(const NetAddress& address) : server_url_(std::string("http://") + address.ToString()) {} DevToolsEndpoint::DevToolsEndpoint(const std::string& url) : server_url_(url) {} bool DevToolsEndpoint::IsValid() const { return server_url_.is_valid(); } NetAddress DevToolsEndpoint::Address() const { return NetAddress(server_url_.host(), server_url_.EffectiveIntPort()); } std::string DevToolsEndpoint::GetBrowserDebuggerUrl() const { const std::string scheme = server_url_.SchemeIs("https") ? "wss" : "ws"; url::Replacements<char> replacements; replacements.SetScheme(scheme.c_str(), url::Component(0, scheme.length())); return server_url_.Resolve("devtools/browser/") .ReplaceComponents(replacements) .spec(); } std::string DevToolsEndpoint::GetDebuggerUrl(const std::string& id) const { const std::string scheme = server_url_.SchemeIs("https") ? "wss" : "ws"; url::Replacements<char> replacements; replacements.SetScheme(scheme.c_str(), url::Component(0, scheme.length())); return server_url_.Resolve("devtools/page/" + id) .ReplaceComponents(replacements) .spec(); } std::string DevToolsEndpoint::GetVersionUrl() const { return server_url_.Resolve("json/version").spec(); } std::string DevToolsEndpoint::GetListUrl() const { return server_url_.Resolve("json/list").spec(); } std::string DevToolsEndpoint::GetCloseUrl(const std::string& id) const { return server_url_.Resolve("json/close/" + id).spec(); } std::string DevToolsEndpoint::GetActivateUrl(const std::string& id) const { return server_url_.Resolve("json/activate/" + id).spec(); }
684
335
{ "word": "Regional", "definitions": [ "A stamp, newspaper, or other thing produced or used in a particular region.", "A sporting contest involving competitors from a particular region." ], "parts-of-speech": "Noun" }
87
1,788
package com.fingerchar.api.config; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Component; import org.springframework.web.servlet.HandlerInterceptor; import com.alibaba.fastjson.JSON; import com.fingerchar.api.utils.JwtHelper; import com.fingerchar.core.util.ResponseUtil; /** * Token拦截器 */ @Component public class TokenInterceptor implements HandlerInterceptor { private Logger logger = LoggerFactory.getLogger(this.getClass()); @Override public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception { logger.info("请求路径: {}", request.getRequestURL()); String token = request.getHeader("Finger-Nft-Token"); logger.info("token:{}", token); String userAddress = null; if (StringUtils.isEmpty(token) || StringUtils.isEmpty((userAddress = JwtHelper.verifyTokenAndGetUserAddress(token)))) { this.unLogin(request, response); return false; } request.setAttribute("userAddress", userAddress); return true; } private void unLogin(HttpServletRequest request, HttpServletResponse response) throws Exception { Object obj = ResponseUtil.unlogin(); response.setCharacterEncoding("utf-8"); response.setContentType("application/json"); response.getWriter().write(JSON.toJSONString(obj)); } }
571
980
package org.jcodec.containers.mkv.util; import java.lang.StringBuilder; import java.nio.ByteBuffer; /** * This class is part of JCodec ( www.jcodec.org ) This software is distributed * under FreeBSD License * * EBML IO implementation * * @author The JCodec project * */ public class EbmlUtil { /** * Encodes unsigned integer with given length * * @param value unsigned integer to be encoded * @param length ebml sequence length * @return */ public static byte[] ebmlEncodeLen(long value, int length) { byte[] b = new byte[length]; for (int idx = 0; idx < length; idx++) { // Rightmost bytes should go to end of array to preserve big-endian notation b[length - idx - 1] = (byte) ((value >>> (8 * idx)) & 0xFFL); } b[0] |= 0x80 >>> (length - 1); return b; } /** * Encodes unsigned integer value according to ebml convention * * @param value unsigned integer to be encoded * @return */ public static byte[] ebmlEncode(long value) { return ebmlEncodeLen(value, ebmlLength(value)); } public static final byte[] lengthOptions = { 0, (byte) 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 }; /** * This method is used mostly during reading EBML bitstream. It asnwers the * question "What is the length of an integer (signed/unsigned) encountered in * the bitstream" * * @param b * @return */ static public int computeLength(byte b) { if (b == 0x00) throw new RuntimeException("Invalid head element for ebml sequence"); int i = 1; while ((b & lengthOptions[i]) == 0) i++; return i; } public static final long one = 0x7F; // 0x3F 0x80 public static final long two = 0x3F80; // 0x1F 0xC0 0x00 public static final long three = 0x1FC000; // 0x0F 0xE0 0x00 0x00 public static final long four = 0x0FE00000; // 0x07 0xF0 0x00 0x00 0x00 public static final long five = 0x07F0000000L; // 0x03 0xF8 0x00 0x00 0x00 0x00 public static final long six = 0x03F800000000L; // 0x01 0xFC 0x00 0x00 0x00 0x00 0x00 public static final long seven = 0x01FC0000000000L; // 0x00 0xFE 0x00 0x00 0x00 0x00 0x00 0x00 public static final long eight = 0xFE000000000000L; public static final long[] ebmlLengthMasks = new long[] { 0, one, two, three, four, five, six, seven, eight }; /** * This method is used mostly during writing EBML bitstream. It answers the * following question "How many bytes should be used to encode unsigned integer * value" * * @param v unsigned integer to be encoded * @return */ public static int ebmlLength(long v) { if (v == 0) return 1; int length = 8; while (length > 0 && (v & ebmlLengthMasks[length]) == 0) length--; return length; } public static String toHexString(byte[] a) { StringBuilder sb = new StringBuilder(); for (byte b : a) sb.append(String.format("0x%02x ", b & 0xff)); return sb.toString(); } public static class VarIntDetail { public final int length; public final long value; public VarIntDetail(int l, long v) { length = l; value = v; } } public static VarIntDetail parseVarInt(ByteBuffer varInt) { // read the first byte byte firstByte = varInt.get(); final int length = EbmlUtil.computeLength(firstByte); if (length == 0) throw new RuntimeException("Invalid ebml integer size."); // use the first byte long value = firstByte & (0xFF >>> length); // use the rest for (int i = 1; i < length; i++) { value = (value << 8) | (varInt.get() & 0xff); } return new VarIntDetail(length, value); } }
1,680
4,708
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ # Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/evaluation/semantic.py # Modified by <NAME> # ------------------------------------------------------------------------------ from collections import OrderedDict import numpy as np class SemanticEvaluator: """ Evaluate semantic segmentation Args: num_classes (int): number of classes ignore_index (int, optional): value in semantic segmentation ground truth. Predictions for the corresponding pixels should be ignored. Default: 255. """ def __init__(self, num_classes, ignore_index=255): self._num_classes = num_classes self._ignore_index = ignore_index self._N = num_classes + 1 # store ignore label in the last class self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) def update(self, pred, gt): pred = pred.astype(np.int) gt = gt.astype(np.int) gt[gt == self._ignore_index] = self._num_classes # raw: pred, column: gt self._conf_matrix += np.bincount( self._N * pred.reshape(-1) + gt.reshape(-1), minlength=self._N**2).reshape(self._N, self._N) def evaluate(self): """ Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): * Mean intersection-over-union averaged across classes (mIoU) * Frequency Weighted IoU (fwIoU) * Mean pixel accuracy averaged across classes (mACC) * Pixel Accuracy (pACC) """ acc = np.zeros(self._num_classes, dtype=np.float) iou = np.zeros(self._num_classes, dtype=np.float) tp = self._conf_matrix.diagonal()[:-1].astype(np.float) pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) class_weights = pos_gt / np.sum(pos_gt) pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) acc_valid = pos_pred > 0 acc[acc_valid] = tp[acc_valid] / pos_pred[acc_valid] iou_valid = (pos_gt + pos_pred) > 0 union = pos_gt + pos_pred - tp iou[acc_valid] = tp[acc_valid] / union[acc_valid] macc = np.sum(acc) / np.sum(acc_valid) miou = np.sum(iou) / np.sum(iou_valid) fiou = np.sum(iou * class_weights) pacc = np.sum(tp) / np.sum(pos_gt) res = {} res["mIoU"] = 100 * miou res["fwIoU"] = 100 * fiou res["mACC"] = 100 * macc res["pACC"] = 100 * pacc results = OrderedDict({"sem_seg": res}) return results
1,302
13,648
<reponame>rxchen/micropython /* * This file is part of the MicroPython project, http://micropython.org/ * * The MIT License (MIT) * * Copyright (c) 2021 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "py/runtime.h" #include "py/mphal.h" #include "usb_serial_jtag.h" #if CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG #include "hal/usb_serial_jtag_ll.h" #include "esp_intr_alloc.h" #include "soc/periph_defs.h" #define USB_SERIAL_JTAG_BUF_SIZE (64) static uint8_t rx_buf[USB_SERIAL_JTAG_BUF_SIZE]; static volatile bool terminal_connected = false; static void usb_serial_jtag_isr_handler(void *arg) { uint32_t flags = usb_serial_jtag_ll_get_intsts_mask(); if (flags & USB_SERIAL_JTAG_INTR_SOF) { usb_serial_jtag_ll_clr_intsts_mask(USB_SERIAL_JTAG_INTR_SOF); } if (flags & USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT) { usb_serial_jtag_ll_clr_intsts_mask(USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT); size_t req_len = ringbuf_free(&stdin_ringbuf); if (req_len > USB_SERIAL_JTAG_BUF_SIZE) { req_len = USB_SERIAL_JTAG_BUF_SIZE; } size_t len = usb_serial_jtag_ll_read_rxfifo(rx_buf, req_len); for (size_t i = 0; i < len; ++i) { if (rx_buf[i] == mp_interrupt_char) { mp_sched_keyboard_interrupt(); } else { ringbuf_put(&stdin_ringbuf, rx_buf[i]); } } mp_hal_wake_main_task_from_isr(); } } void usb_serial_jtag_init(void) { usb_serial_jtag_ll_clr_intsts_mask(USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT | USB_SERIAL_JTAG_INTR_SOF); usb_serial_jtag_ll_ena_intr_mask(USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT | USB_SERIAL_JTAG_INTR_SOF); ESP_ERROR_CHECK(esp_intr_alloc(ETS_USB_SERIAL_JTAG_INTR_SOURCE, ESP_INTR_FLAG_LEVEL1, usb_serial_jtag_isr_handler, NULL, NULL)); } void usb_serial_jtag_tx_strn(const char *str, size_t len) { while (len) { size_t l = len; if (l > USB_SERIAL_JTAG_PACKET_SZ_BYTES) { l = USB_SERIAL_JTAG_PACKET_SZ_BYTES; } portTickType start_tick = xTaskGetTickCount(); while (!usb_serial_jtag_ll_txfifo_writable()) { portTickType now_tick = xTaskGetTickCount(); if (!terminal_connected || now_tick > (start_tick + pdMS_TO_TICKS(200))) { terminal_connected = false; return; } } terminal_connected = true; l = usb_serial_jtag_ll_write_txfifo((const uint8_t *)str, l); usb_serial_jtag_ll_txfifo_flush(); str += l; len -= l; } } #endif // CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG
1,628
666
<reponame>18668031200/spring-cloud-config-admin<filename>scca-rest/src/main/java/com/didispace/scca/rest/SccaRestProperties.java package com.didispace.scca.rest; import lombok.Data; import org.springframework.boot.context.properties.ConfigurationProperties; @Data @ConfigurationProperties("scca.rest") public class SccaRestProperties { /** * scca-rest¬的contextPath */ private String contextPath = ""; }
153
709
<reponame>security-geeks/jackhammer package com.olacabs.jackhammer.db; import com.olacabs.jackhammer.models.GroupUser; import com.olacabs.jackhammer.models.mapper.GroupUserMapper; import org.skife.jdbi.v2.sqlobject.Bind; import org.skife.jdbi.v2.sqlobject.BindBean; import org.skife.jdbi.v2.sqlobject.SqlQuery; import org.skife.jdbi.v2.sqlobject.SqlUpdate; import org.skife.jdbi.v2.sqlobject.customizers.RegisterMapper; import java.util.List; @RegisterMapper(GroupUserMapper.class) public interface GroupUserDAO { @SqlUpdate("insert into groupsUsers (groupId, userId) values (:gu.groupId, :gu.userId)") long insert(@BindBean("gu") GroupUser groupUser); @SqlQuery("select * from groupsUsers where userId=:userId and isDeleted=false") List<GroupUser> findByUserId(@Bind("userId") long userId); @SqlUpdate("update groupsUsers set isDeleted=true where groupId = :gu.groupId and userId = :gu.userId") void delete(@BindBean("gu") GroupUser groupUser); }
365
5,788
<gh_stars>1000+ // Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. // Full license terms provided in LICENSE.md file. #include "internal_utils.h" #include <iomanip> #include <sstream> #include "conv_utils.h" namespace redtail { namespace tensorrt { using namespace nvinfer1; // ----------------------------------------------------------------- // 3D convolution plugin. // For more information on how 3D convolution is implemented, see // comments in conv_utils.h // ----------------------------------------------------------------- class Conv3DPlugin: public IPlugin { public: Conv3DPlugin(Conv3DType conv_type, Dims kernel_dims, Dims stride_dims, Dims pad_start_dims, Dims pad_end_dims, Weights kernel_weights, Weights bias_weights, ILogger& log, std::string name): conv_type_(conv_type), w_dims_(kernel_dims), stride_dims_(stride_dims), pad_start_dims_(pad_start_dims), pad_end_dims_(pad_end_dims), kernel_weights_(kernel_weights), bias_weights_(bias_weights), log_(log), name_(name) { // REVIEW alexeyk: TRT currently does not support FP16 data tensors so we // use weights tensor data type for all descriptors. In case weights // are in FP16 we'll do the conversion on the fly. This should be changed // when TRT adds full support for FP16. // For FP16 we support only TRUE_HALF_CONFIG mode: // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionForward data_type_ = CUDNN_DATA_FLOAT; // Expecting kernel to be 5D tensor in KVCRS format. assert(w_dims_.nbDims == 5); // Expecting stride to be 3D tensor in DHW format. assert(stride_dims.nbDims == 3); // Expecting padding to be 3D tensors in DHW format. assert(pad_start_dims.nbDims == 3); assert(pad_end_dims.nbDims == 3); // Currently only symmetric padding is supported for H,W dims. assert(pad_start_dims_.d[1] == pad_end_dims_.d[1]); assert(pad_start_dims_.d[2] == pad_end_dims_.d[2]); // Special case (TF-compatible) of asymmetric padding is supported for D dim. assert(pad_start_dims_.d[0] == pad_end_dims_.d[0] || pad_start_dims_.d[0] == pad_end_dims_.d[0] - 1); // TRT supprots FP32/FP16 weights. assert(kernel_weights_.type == DataType::kFLOAT || kernel_weights_.type == DataType::kHALF); assert(kernel_weights_.count > 0 && kernel_weights_.values != nullptr); // TRT supprots FP32/FP16 weights. assert(bias_weights_.type == DataType::kFLOAT || bias_weights_.type == DataType::kHALF); assert((bias_weights_.count > 0 && bias_weights_.values != nullptr) || (bias_weights_.count == 0 && bias_weights_.values == nullptr)); // Assume same type for simplicity. assert(bias_weights_.type == kernel_weights_.type); weights_type_ = trtToCudnnDataType(kernel_weights_.type); } Conv3DPlugin(Conv3DPlugin&&) = delete; int getNbOutputs() const override { return 1; } Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) override { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].nbDims == 4); x_dims_ = DimsNCHW(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2], inputs[0].d[3]); createDescriptors(); // Can use batch_size == 1 to set tensor descriptors initially. // Set input descriptor. ConvUtils::setConv3DTensorDescriptor(conv_type_, x_dims_, 1, weights_type_, x_desc_, log_); // Set conv operation descriptors. ConvUtils::setConv3DOperationDescriptors(conv_type_, w_dims_, stride_dims_, pad_start_dims_, weights_type_, w_desc_, c_desc_, log_); // Compute output dims. auto y_d = ConvUtils::getConv3DOutputDims(c_desc_, x_desc_, w_desc_, log_); // Remove batch index dim. y_dims_ = DimsNCHW(y_d.d[1], y_d.d[2], y_d.d[3], y_d.d[4]); // Output tensor is always in cuDNN format. ConvUtils::setConv3DTensorDescriptor(Conv3DType::kCuDnn, y_dims_, 1, weights_type_, y_desc_, log_); // Set bias descriptor. // REVIEW alexeyk: see the comment in tensorrt_model_builder.py re: the stride issue in Conv3D. ConvUtils::setConv3DBiasDescriptor(Dims{5, {1, y_dims_.d[0], 1, 1, 1}}, weights_type_, b_desc_, log_); return y_dims_; } void configure(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, int maxBatchSize) override { assert(isValid()); assert(nbInputs == 1); assert(nbOutputs == 1); assert(DimsUtils::areEqual(inputDims[0], x_dims_)); assert(DimsUtils::areEqual(outputDims[0], y_dims_)); max_batch_size_ = maxBatchSize; // Update in/out descriptors and run auto-tuner to find best (fastest) algo. ConvUtils::setConv3DTensorDescriptor(conv_type_, x_dims_, maxBatchSize, weights_type_, x_desc_, log_); ConvUtils::setConv3DTensorDescriptor(Conv3DType::kCuDnn, y_dims_, maxBatchSize, weights_type_, y_desc_, log_); findBestAlgo(); const size_t elt_size = getWeightsDataTypeSize(); // Need workspace for FP32 -> FP16 conversion. if (isFP16()) workspace_bytes_ += max_batch_size_ * std::max(DimsUtils::getTensorSize(x_dims_), DimsUtils::getTensorSize(y_dims_)) * elt_size; // Allocate memory and copy weights. CHECK(cudaMalloc(&kernel_weights_d_, kernel_weights_.count * elt_size)); CHECK(cudaMemcpy(kernel_weights_d_, kernel_weights_.values, kernel_weights_.count * elt_size, cudaMemcpyHostToDevice)); if (bias_weights_.count > 0) { CHECK(cudaMalloc(&bias_weights_d_, bias_weights_.count * elt_size)); CHECK(cudaMemcpy(bias_weights_d_, bias_weights_.values, bias_weights_.count * elt_size, cudaMemcpyHostToDevice)); } log_.log(ILogger::Severity::kINFO, (name_ + ": InDims : " + DimsUtils::toString(x_dims_)).c_str()); log_.log(ILogger::Severity::kINFO, (name_ + ": OutDims : " + DimsUtils::toString(y_dims_)).c_str()); } int initialize() override { assert(isValid()); return 0; } void terminate() override { assert(isValid()); if (c_desc_ != nullptr) CHECK(cudnnDestroyConvolutionDescriptor(c_desc_)); if (w_desc_ != nullptr) CHECK(cudnnDestroyFilterDescriptor(w_desc_)); if (x_desc_ != nullptr) CHECK(cudnnDestroyTensorDescriptor(x_desc_)); if (y_desc_ != nullptr) CHECK(cudnnDestroyTensorDescriptor(y_desc_)); if (b_desc_ != nullptr) CHECK(cudnnDestroyTensorDescriptor(b_desc_)); if (cudnn_ != nullptr) CHECK(cudnnDestroy(cudnn_)); if (kernel_weights_d_ != nullptr) CHECK(cudaFree(kernel_weights_d_)); if (bias_weights_d_ != nullptr) CHECK(cudaFree(bias_weights_d_)); c_desc_ = nullptr; w_desc_ = nullptr; x_desc_ = nullptr; y_desc_ = nullptr; b_desc_ = nullptr; cudnn_ = nullptr; kernel_weights_d_ = nullptr; bias_weights_d_ = nullptr; assert(!isValid()); } size_t getWorkspaceSize(int maxBatchSize) const { assert(isValid()); assert(max_batch_size_ == maxBatchSize); return workspace_bytes_; } int enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) override { assert(isValid()); // REVIEW alexeyk: for now assuming batch size always equals max batch size. // That's pretty strict as it disables dynamic batch sizes but fine for now. assert(batchSize == max_batch_size_); cudnnStatus_t status; CHECK(status = cudnnSetStream(cudnn_, stream)); size_t workspace_used_bytes = 0; // Convert to FP16 first if needed. auto px = preprocessInput(batchSize, inputs[0], workspace, stream, workspace_used_bytes); assert(px != nullptr); assert(workspace_used_bytes <= workspace_bytes_); CHECK(status = cudnnConvolutionForward(cudnn_, &Consts::kOne, x_desc_, px, w_desc_, kernel_weights_d_, c_desc_, best_algo_, static_cast<uint8_t*>(workspace) + workspace_used_bytes, workspace_bytes_ - workspace_used_bytes, &Consts::kZero, y_desc_, outputs[0])); if (bias_weights_.count > 0) CHECK(status = cudnnAddTensor(cudnn_, &Consts::kOne, b_desc_, bias_weights_d_, &Consts::kOne, y_desc_, outputs[0])); // Convert back to FP32 if needed. postprocessOutput(batchSize, outputs[0], workspace, stream); return status == CUDNN_STATUS_SUCCESS ? 0 : -1; } size_t getSerializationSize() override { assert(isValid()); return 0; } void serialize(void* buffer) override { assert(isValid()); // REVIEW alexeyk: implement. assert(false); } private: bool isValid() const { return cudnn_ != nullptr; } bool isFP16() const { return weights_type_ == CUDNN_DATA_HALF; } size_t getWeightsDataTypeSize() const { return (isFP16() ? sizeof(uint16_t) : sizeof(float)); } const void* preprocessInput(int batchSize, const void* x, void* workspace, cudaStream_t stream, size_t& workspace_used_bytes) { if (!isFP16()) return x; assert(data_type_ == CUDNN_DATA_FLOAT); // Convert to FP16 using workspace. size_t x_size = batchSize * DimsUtils::getTensorSize(x_dims_); CHECK(CudaKernels::fp32Tofp16((const float*)x, (uint16_t*)workspace, x_size, stream)); workspace_used_bytes = x_size * sizeof(uint16_t); return workspace; } void postprocessOutput(int batchSize, void* y, void* workspace, cudaStream_t stream) { if (!isFP16()) return; assert(data_type_ == CUDNN_DATA_FLOAT); size_t y_size = batchSize * DimsUtils::getTensorSize(y_dims_); // Copy to workspace first. CHECK(cudaMemcpyAsync(workspace, y, y_size * sizeof(uint16_t), cudaMemcpyDeviceToDevice, stream)); // Convert to FP32 from workspace. CHECK(CudaKernels::fp16Tofp32((const uint16_t*)workspace, (float*)y, y_size, stream)); } void createDescriptors() { if (cudnn_ == nullptr) CHECK(cudnnCreate(&cudnn_)); if (x_desc_ == nullptr) CHECK(cudnnCreateTensorDescriptor(&x_desc_)); if (y_desc_ == nullptr) CHECK(cudnnCreateTensorDescriptor(&y_desc_)); if (w_desc_ == nullptr) CHECK(cudnnCreateFilterDescriptor(&w_desc_)); if (c_desc_ == nullptr) CHECK(cudnnCreateConvolutionDescriptor(&c_desc_)); if (b_desc_ == nullptr) CHECK(cudnnCreateTensorDescriptor(&b_desc_)); } void findBestAlgo() { // Let's hope cuDNN team will not come up with more than that number of algos (8 in cuDNN 7). const int algo_count = 20; int res_algo_count; cudnnConvolutionFwdAlgoPerf_t algos[algo_count]; auto err = cudnnFindConvolutionForwardAlgorithm(cudnn_, x_desc_, w_desc_, c_desc_, y_desc_, algo_count, &res_algo_count, algos); // Currently (v7.1) cuDNN fails with CUDNN_STATUS_ALLOC_FAILED/CUDNN_STATUS_BAD_PARAM // apparently while trying to allocate workspace when enumerating algos. // Handle this case separately and use algo that does not require workspace. // This does not affect correctness as the actual computation will be done later // and will fail in case of a genuine error. // REVIEW alexeyk: fix this when cuDNN is fixed. if (err == CUDNN_STATUS_ALLOC_FAILED || algos[0].status == CUDNN_STATUS_BAD_PARAM) { res_algo_count = 1; algos[0].algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; algos[0].status = CUDNN_STATUS_SUCCESS; algos[0].memory = 0; algos[0].time = -1; } assert(res_algo_count > 0); assert(algos[0].status == CUDNN_STATUS_SUCCESS); // Best algo is the first. best_algo_ = algos[0].algo; workspace_bytes_ = algos[0].memory; // Log results. log_.log(ILogger::Severity::kINFO, (name_ + ": --> Conv3D layer tuning results:").c_str()); for (auto& a: algos) { if (a.status != CUDNN_STATUS_SUCCESS) break; std::ostringstream str; str << a.algo << ": " << std::fixed << std::setw(8) << std::setprecision(1) << a.time << "ms, " << std::fixed << std::setw(8) << a.memory << "B"; log_.log(ILogger::Severity::kINFO, str.str().c_str()); } log_.log(ILogger::Severity::kINFO, (name_ + ": <-- Conv3D layer tuning results.").c_str()); } private: Conv3DType conv_type_; cudnnDataType_t data_type_; cudnnDataType_t weights_type_; // Using DimsNCHW to represent 3D convos input/output is an ugly workaround // of TRT limitations which currently result in assert in the guts of TRT. DimsNCHW x_dims_; DimsNCHW y_dims_; Dims w_dims_; Dims stride_dims_; Dims pad_start_dims_; Dims pad_end_dims_; int max_batch_size_ = 0; // Kernel weights on the host. Weights kernel_weights_; // Kernel weights on the device. float* kernel_weights_d_ = nullptr; // Bias weights on the host. Weights bias_weights_; // Bias weights on the device. float* bias_weights_d_ = nullptr; cudnnHandle_t cudnn_ = nullptr; cudnnTensorDescriptor_t x_desc_ = nullptr; cudnnTensorDescriptor_t y_desc_ = nullptr; cudnnFilterDescriptor_t w_desc_ = nullptr; cudnnConvolutionDescriptor_t c_desc_ = nullptr; cudnnTensorDescriptor_t b_desc_ = nullptr; cudnnConvolutionFwdAlgo_t best_algo_ = (cudnnConvolutionFwdAlgo_t)-1; size_t workspace_bytes_ = 0; ILogger& log_; std::string name_; }; // Factory method. IPlugin* PluginContainer::createConv3DPlugin(Conv3DType conv_type, Dims kernel_dims, Dims stride_dims, Dims pad_start_dims, Dims pad_end_dims, Weights kernel_weights, Weights bias_weights, std::string name) { std::lock_guard<std::mutex> lock(lock_); plugins_.push_back(new Conv3DPlugin(conv_type, kernel_dims, stride_dims, pad_start_dims, pad_end_dims, kernel_weights, bias_weights, log_, name)); return plugins_.back(); } } }
7,265
575
<gh_stars>100-1000 // // DemoCameraHelper.h // DJISdkDemo // // Created by <NAME> on 7/5/18. // Copyright © 2018 DJI. All rights reserved. // #import <Foundation/Foundation.h> #import <DJISDK/DJISDK.h> @interface DemoCameraHelper : NSObject + (BOOL)isXT2Camera; + (nullable DJICamera *)connectedThermalCamera; + (nullable DJICamera *)connectedXT2VisionCamera; + (nullable DJICameraKey *)thermalCameraKeyWithParam:(nonnull NSString *)param; + (nullable DJICamera *)cameraAtComponentIndex:(NSInteger)componentIndex; + (BOOL)isMultilensCamera:(nonnull NSString *)cameraName; @end
205
3,200
<reponame>mindspore-ai/mindspore<gh_stars>1000+ /** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_TCP_COMMUNICATOR_H_ #define MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_TCP_COMMUNICATOR_H_ #include <map> #include <vector> #include <string> #include <memory> #include <unordered_map> #include "proto/ps.pb.h" #include "ps/core/server_node.h" #include "ps/core/cluster_metadata.h" #include "ps/core/cluster_config.h" #include "ps/ps_context.h" #include "ps/core/communicator/task_executor.h" #include "ps/core/communicator/communicator_base.h" #include "ps/core/communicator/tcp_msg_handler.h" #include "ps/core/comm_util.h" #include "ps/constants.h" namespace mindspore { namespace ps { namespace core { const std::unordered_map<TcpUserCommand, std::string> kUserCommandToMsgType = { {TcpUserCommand::kPush, "push"}, {TcpUserCommand::kPull, "pull"}, {TcpUserCommand::kCount, "count"}, {TcpUserCommand::kReachThreshold, "countReachThreshold"}, {TcpUserCommand::kResetCount, "resetCnt"}, {TcpUserCommand::kGetMetadata, "getMetadata"}, {TcpUserCommand::kUpdateMetadata, "updateMetadata"}, {TcpUserCommand::kCounterEvent, "counterEvent"}, {TcpUserCommand::kPullWeight, "pullWeight"}, {TcpUserCommand::kPushWeight, "pushWeight"}, {TcpUserCommand::kSyncIteration, "syncIteration"}, {TcpUserCommand::kNotifyLeaderToNextIter, "notifyLeaderToNextIter"}, {TcpUserCommand::kPrepareForNextIter, "prepareForNextIter"}, {TcpUserCommand::kProceedToNextIter, "proceedToNextIter"}, {TcpUserCommand::kEndLastIter, "endLastIter"}, {TcpUserCommand::kStartFLJob, "startFLJob"}, {TcpUserCommand::kUpdateModel, "updateModel"}, {TcpUserCommand::kGetModel, "getModel"}, {TcpUserCommand::kPushMetrics, "pushMetrics"}, {TcpUserCommand::kNewInstance, "newInstance"}, {TcpUserCommand::kQueryInstance, "queryInstance"}, {TcpUserCommand::kEnableFLS, "enableFLS"}, {TcpUserCommand::kDisableFLS, "disableFLS"}}; class TcpCommunicator : public CommunicatorBase { public: explicit TcpCommunicator(const std::shared_ptr<TaskExecutor> &task_executor, AbstractNode *node) : task_executor_(task_executor), server_num_(0), worker_num_(0), scheduler_ip_(""), scheduler_port_(0), abstrace_node_(node) {} ~TcpCommunicator() = default; bool Start() override; bool Stop() override; void RegisterMsgCallBack(const std::string &msg_type, const MessageCallback &cb) override; void RegisterEventCallback(const core::ClusterEvent &event, const EventCallback &event_cb); template <class T> bool SendPbRequest(const T &pb_msg, const uint32_t &rank_id, TcpUserCommand command, std::shared_ptr<std::vector<unsigned char>> *output = nullptr) { const std::string &msg_str = pb_msg.SerializeAsString(); std::shared_ptr<unsigned char[]> msg(new unsigned char[msg_str.size()]); MS_ERROR_IF_NULL_W_RET_VAL(msg, false); size_t dest_size = msg_str.size(); size_t src_size = msg_str.size(); if (memcpy_s(msg.get(), dest_size, msg_str.c_str(), src_size) != EOK) { MS_LOG(EXCEPTION) << "Memcpy_s error"; } if (output != nullptr) { if (!abstrace_node_->Send(NodeRole::SERVER, rank_id, msg, msg_str.size(), static_cast<int>(command), output)) { MS_LOG(ERROR) << "Sending protobuffer message to server " << rank_id << " failed."; return false; } } else { if (!abstrace_node_->Send(NodeRole::SERVER, rank_id, msg, msg_str.size(), static_cast<int>(command))) { MS_LOG(ERROR) << "Sending protobuffer message to server " << rank_id << " failed."; return false; } } return true; } private: std::shared_ptr<TaskExecutor> task_executor_; TcpMsgCallback tcp_msg_callback_; OnNodeEventCallback event_callback_; uint32_t server_num_; uint32_t worker_num_; std::string scheduler_ip_; uint16_t scheduler_port_; AbstractNode *abstrace_node_; }; } // namespace core } // namespace ps } // namespace mindspore #endif // MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_TCP_COMMUNICATOR_H_
1,729
462
<gh_stars>100-1000 from .account import AWSAccountCollector from .region import AWSRegionCollector
29
5,169
<gh_stars>1000+ { "name": "PebbleKit-Static", "version": "3.1.1", "summary": "Embed PebbleKit iOS into your app to communicate with Pebble", "homepage": "https://github.com/pebble/pebble-ios-sdk", "license": { "type": "Commercial", "text": " © 2015 Pebble Technology Corp. All rights reserved.\n" }, "authors": { "Pebble Technology Corp.": "<EMAIL>" }, "source": { "git": "https://github.com/pebble/pebble-ios-sdk.git", "tag": "3.1.1" }, "platforms": { "ios": "7.0" }, "requires_arc": true, "header_dir": "PebbleKit", "preserve_paths": "PebbleKit-Static.framework", "vendored_frameworks": "PebbleKit-Static.framework", "public_header_files": "PebbleKit-Static.framework/Versions/A/Headers/*.h", "frameworks": [ "Foundation", "UIKit", "CoreGraphics" ], "weak_frameworks": [ "ExternalAccessory", "CoreBluetooth" ] }
387
782
/* * Copyright (c) 2021, <NAME>. All Rights Reserved. * * This file is part of BoofCV (http://boofcv.org). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package boofcv.app.fiducials; import boofcv.alg.drawing.FiducialImageEngine; import boofcv.alg.fiducial.square.FiducialSquareHammingGenerator; import boofcv.factory.fiducial.ConfigHammingMarker; import org.ddogleg.struct.DogArray_I32; /** * Generates the QR Code PDF Document * * @author <NAME> */ public class CreateSquareHammingDocumentImage extends CreateFiducialDocumentImage { private FiducialSquareHammingGenerator g; FiducialImageEngine render = new FiducialImageEngine(); public ConfigHammingMarker config = new ConfigHammingMarker(); int whiteBorderPixels; public CreateSquareHammingDocumentImage( String documentName ) { super(documentName); g = new FiducialSquareHammingGenerator(config); g.setRenderer(render); } public FiducialSquareHammingGenerator getGenerator() { return g; } public void render( DogArray_I32 markerIDs ) { if (markerHeight > 0) throw new IllegalArgumentException("markerHeight must be < 0 since only square is supported"); g.setMarkerWidth(markerWidth); render.configure(whiteBorderPixels, (int)g.getMarkerWidth()); for (int i = 0; i < markerIDs.size; i++) { g.generate(markerIDs.get(i)); save(render.getGray(), markerIDs.get(i)+""); } } public void setWhiteBorder( int pixels ) { whiteBorderPixels = pixels; } }
632
14,425
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.router.webapp; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServiceProtocol; import org.apache.hadoop.yarn.server.webapp.WebServices; import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; /** * Defines the contract to be implemented by the request intercepter classes, * that can be used to intercept and inspect messages sent from the client to * the resource manager server. * * This class includes 4 methods getAppAttempts, getAppAttempt, getContainers * and getContainer that belong to {@link WebServices}. They are in this class * to make sure that RouterWebServices implements the same REST methods of * {@code RMWebServices}. */ public interface RESTRequestInterceptor extends RMWebServiceProtocol, Configurable { /** * This method is called for initializing the intercepter. This is guaranteed * to be called only once in the lifetime of this instance. * * @param user the name of the client */ void init(String user); /** * This method is called to release the resources held by the intercepter. * This will be called when the application pipeline is being destroyed. The * concrete implementations should dispose the resources and forward the * request to the next intercepter, if any. */ void shutdown(); /** * Sets the next intercepter in the pipeline. The concrete implementation of * this interface should always pass the request to the nextInterceptor after * inspecting the message. The last intercepter in the chain is responsible to * send the messages to the resource manager service and so the last * intercepter will not receive this method call. * * @param nextInterceptor the RESTRequestInterceptor to set in the pipeline */ void setNextInterceptor(RESTRequestInterceptor nextInterceptor); /** * Returns the next intercepter in the chain. * * @return the next intercepter in the chain */ RESTRequestInterceptor getNextInterceptor(); /** * * @see WebServices#getAppAttempt(HttpServletRequest, HttpServletResponse, * String, String) * @param req the servlet request * @param res the servlet response * @param appId the application we want to get the appAttempt. It is a * PathParam. * @param appAttemptId the AppAttempt we want to get the info. It is a * PathParam. * @return AppAttemptInfo of the specific AppAttempt */ AppAttemptInfo getAppAttempt(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId); /** * * @see WebServices#getContainers(HttpServletRequest, HttpServletResponse, * String, String) * @param req the servlet request * @param res the servlet response * @param appId the application we want to get the containers info. It is a * PathParam. * @param appAttemptId the AppAttempt we want to get the info. It is a * PathParam. * @return ContainersInfo of all the containers that belong to the specific * AppAttempt */ ContainersInfo getContainers(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId); /** * * @see WebServices#getContainer(HttpServletRequest, HttpServletResponse, * String, String, String) * @param req the servlet request * @param res the servlet response * @param appId the application we want to get the containers info. It is a * PathParam. * @param appAttemptId the AppAttempt we want to get the info. It is a * PathParam. * @param containerId the container we want to get the info. It is a * PathParam. * @return ContainerInfo of the specific ContainerId */ ContainerInfo getContainer(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId, String containerId); }
1,485
550
package play.db.jpa; import javassist.CtClass; import javassist.CtMethod; import play.classloading.ApplicationClasses.ApplicationClass; import play.classloading.enhancers.Enhancer; /** * Enhance JPABase entities classes */ public class JPAEnhancer extends Enhancer { public void enhanceThisClass(ApplicationClass applicationClass) throws Exception { CtClass ctClass = makeClass(applicationClass); if (!ctClass.subtypeOf(classPool.get("play.db.jpa.JPABase"))) { return; } // Enhance only JPA entities if (!hasAnnotation(ctClass, "javax.persistence.Entity")) { return; } String entityName = ctClass.getName(); // count CtMethod count = CtMethod.make("public static long count() { return play.db.jpa.JPQL.instance.count(\"" + entityName + "\"); }", ctClass); ctClass.addMethod(count); // count2 CtMethod count2 = CtMethod.make("public static long count(String query, Object[] params) { return play.db.jpa.JPQL.instance.count(\"" + entityName + "\", query, params); }", ctClass); ctClass.addMethod(count2); // findAll CtMethod findAll = CtMethod.make("public static java.util.List findAll() { return play.db.jpa.JPQL.instance.findAll(\"" + entityName + "\"); }", ctClass); ctClass.addMethod(findAll); // findById CtMethod findById = CtMethod.make("public static play.db.jpa.JPABase findById(Object id) { return play.db.jpa.JPQL.instance.findById(\"" + entityName + "\", id); }", ctClass); ctClass.addMethod(findById); // find CtMethod find = CtMethod.make("public static play.db.jpa.GenericModel.JPAQuery find(String query, Object[] params) { return play.db.jpa.JPQL.instance.find(\"" + entityName + "\", query, params); }", ctClass); ctClass.addMethod(find); // find CtMethod find2 = CtMethod.make("public static play.db.jpa.GenericModel.JPAQuery find() { return play.db.jpa.JPQL.instance.find(\"" + entityName + "\"); }", ctClass); ctClass.addMethod(find2); // all CtMethod all = CtMethod.make("public static play.db.jpa.GenericModel.JPAQuery all() { return play.db.jpa.JPQL.instance.all(\"" + entityName + "\"); }", ctClass); ctClass.addMethod(all); // delete CtMethod delete = CtMethod.make("public static int delete(String query, Object[] params) { return play.db.jpa.JPQL.instance.delete(\"" + entityName + "\", query, params); }", ctClass); ctClass.addMethod(delete); // deleteAll CtMethod deleteAll = CtMethod.make("public static int deleteAll() { return play.db.jpa.JPQL.instance.deleteAll(\"" + entityName + "\"); }", ctClass); ctClass.addMethod(deleteAll); // findOneBy CtMethod findOneBy = CtMethod.make("public static play.db.jpa.JPABase findOneBy(String query, Object[] params) { return play.db.jpa.JPQL.instance.findOneBy(\"" + entityName + "\", query, params); }", ctClass); ctClass.addMethod(findOneBy); // create CtMethod create = CtMethod.make("public static play.db.jpa.JPABase create(String name, play.mvc.Scope.Params params) { return play.db.jpa.JPQL.instance.create(\"" + entityName + "\", name, params); }", ctClass); ctClass.addMethod(create); // Done. applicationClass.enhancedByteCode = ctClass.toBytecode(); ctClass.defrost(); } }
1,357
190,993
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/platform/intrusive_ptr.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace core { namespace { TEST(IntrusivePtr, ConstructorAddRefFalse) { auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); // This is needed so that the compiler does not optimize away dead code. ASSERT_TRUE(ptr->RefCountIsOne()); // Test that there is no leak. } TEST(IntrusivePtr, ConstructorAddRefTrue) { auto raw = new RefCounted(); auto ptr = IntrusivePtr<RefCounted>(raw, /*add_ref=*/true); ASSERT_FALSE(raw->RefCountIsOne()); raw->Unref(); ASSERT_TRUE(raw->RefCountIsOne()); } TEST(IntrusivePtr, CopyConstructor) { auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); auto ptr2 = IntrusivePtr<RefCounted>(ptr1); ASSERT_FALSE(ptr2->RefCountIsOne()); } TEST(IntrusivePtr, CopyAssignment) { auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); auto raw = new RefCounted(); auto ptr2 = IntrusivePtr<RefCounted>(raw, /*add_ref=*/true); ptr2 = ptr1; ASSERT_EQ(ptr1.get(), ptr2.get()); ASSERT_FALSE(ptr2->RefCountIsOne()); ASSERT_TRUE(raw->RefCountIsOne()); raw->Unref(); } TEST(IntrusivePtr, CopyAssignmentIntoEmpty) { auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); auto ptr2 = IntrusivePtr<RefCounted>(); ptr2 = ptr1; ASSERT_FALSE(ptr2->RefCountIsOne()); } TEST(IntrusivePtr, MoveConstructor) { auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); auto ptr2 = IntrusivePtr<RefCounted>(std::move(ptr1)); ASSERT_TRUE(ptr2->RefCountIsOne()); ASSERT_EQ(ptr1.get(), nullptr); // NOLINT(bugprone-use-after-move) } TEST(IntrusivePtr, MoveAssignment) { auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); auto ptr2 = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); ptr2 = std::move(ptr1); ASSERT_TRUE(ptr2->RefCountIsOne()); ASSERT_EQ(ptr1.get(), nullptr); // NOLINT(bugprone-use-after-move) } TEST(IntrusivePtr, MoveAssignmentIntoEmpty) { auto ptr1 = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); auto ptr2 = IntrusivePtr<RefCounted>(); ptr2 = std::move(ptr1); ASSERT_TRUE(ptr2->RefCountIsOne()); ASSERT_EQ(ptr1.get(), nullptr); // NOLINT(bugprone-use-after-move) } TEST(IntrusivePtr, MoveAssignmentAlias) { auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); auto& ptr_alias = ptr; ptr = std::move(ptr_alias); ASSERT_TRUE(ptr->RefCountIsOne()); } TEST(IntrusivePtr, Reset) { auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); ptr.reset(new RefCounted(), /*add_ref=*/false); ASSERT_TRUE(ptr->RefCountIsOne()); // Test no leak. } TEST(IntrusivePtr, ResetIntoEmpty) { auto ptr = IntrusivePtr<RefCounted>(); ptr.reset(new RefCounted(), /*add_ref=*/false); ASSERT_TRUE(ptr->RefCountIsOne()); // Test no leak. } TEST(IntrusivePtr, ResetAlias) { auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); ASSERT_TRUE(ptr->RefCountIsOne()); ptr.reset(ptr.get(), /*add_ref=*/false); // No-op. ASSERT_TRUE(ptr->RefCountIsOne()); } TEST(IntrusivePtr, ResetRefBeforeUnref) { class Foo : public RefCounted { public: explicit Foo(char label, Foo* ptr = nullptr) : label_(label), ptr_(ptr, false) {} char label_; IntrusivePtr<Foo> ptr_; }; IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false); // This test ensures that reset calls Ref on the new handle before unreffing // the current handle to avoid subtle use-after-delete bugs. // Here if we were to call Unref first, we will Unref the "Foo" with the // label 'b', thereby destroying it. This will in turn Unref 'c' and destroy // that. So reset would try to Ref a deleted object. Calling // x->ptr_->ptr_.Ref() before x->ptr_.Unref() avoids this. x->ptr_ = x->ptr_->ptr_; } TEST(IntrusivePtr, ResetStealPtrBeforeUnref) { class Foo : public RefCounted { public: explicit Foo(char label, Foo* ptr = nullptr) : label_(label), ptr_(ptr, false) {} char label_; IntrusivePtr<Foo> ptr_; }; IntrusivePtr<Foo> x(new Foo{'a', new Foo{'b', new Foo{'c'}}}, false); // This test ensures that move assignment clears the handle_ of the moved // object before Unreffing the current handle_. x->ptr_ = std::move(x->ptr_->ptr_); } TEST(IntrusivePtr, Detach) { auto ptr = IntrusivePtr<RefCounted>(new RefCounted(), /*add_ref=*/false); ASSERT_TRUE(ptr->RefCountIsOne()); auto raw = ptr.detach(); ASSERT_TRUE(raw->RefCountIsOne()); raw->Unref(); } } // namespace } // namespace core } // namespace tensorflow
1,974
1,840
/** * Copyright Pravega Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.pravega.segmentstore.storage.cache; import io.pravega.common.Exceptions; import io.pravega.common.Timer; import io.pravega.common.util.BufferView; import io.pravega.common.util.ByteArraySegment; import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import lombok.Cleanup; import lombok.Data; import lombok.val; import org.junit.Ignore; import org.junit.Test; /** * Benchmark tests for {@link CacheStorage} and its implementations. This should be used to compare the runtime performance * of various changes in the implementation. Since the results may vary significantly based on the hardware used, outcomes * are not comparable across environments. The way to use this is to establish a benchmark using the base code and then * re-run it with the modifications already implemented and tested. * * The number of iterations {@link #ITERATION_COUNT} should always be greater than 1 since the first iteration will incur * the costs of the initial memory allocation, while the remaining ones will benefit from the memory already being allocated. * * This is marked as @Ignore since these are not real unit tests (no correctness checking) and they take a long time to execute. */ @Ignore public class BenchmarkTests { private static final long MAX_CACHE_SIZE = 16 * 1024 * 1024 * 1024L; private static final int ENTRY_SIZE = 10 * 1024; private static final int ENTRY_COUNT = 1000 * 1000; private static final int ITERATION_COUNT = 5; private static final int RANDOM_OPERATIONS_THREAD_COUNT = 1; private static final int RANDOM_OPERATIONS_INSERT_PERCENTAGE = 60; // Must be 0-100. private final Random random = new Random(0); /** * Tests sequential operations: * - {@link #ENTRY_COUNT} calls to {@link CacheStorage#insert} (with an entry of size {@link #ENTRY_SIZE}). * - {@link #ENTRY_COUNT} calls to {@link CacheStorage#append} (with data of size {@link CacheStorage#getAppendableLength}). * - {@link #ENTRY_COUNT} calls to {@link CacheStorage#replace} (with an entry of equal size). * - {@link #ENTRY_COUNT} calls to {@link CacheStorage#get}. * - {@link #ENTRY_COUNT} calls to {@link CacheStorage#delete}. */ @Test public void testSequentialOperations() { test(this::testSequentialOperations); } private SequentialResult testSequentialOperations(CacheStorage s) { val writeBuffer = new ByteArraySegment(new byte[ENTRY_SIZE]); val appendBuffer = new ByteArraySegment(writeBuffer.array(), 0, s.getAppendableLength(ENTRY_SIZE)); val readBuffer = new byte[ENTRY_SIZE]; this.random.nextBytes(writeBuffer.array()); int[] ids = new int[ENTRY_COUNT]; val insert = measure(() -> { for (int i = 0; i < ENTRY_COUNT; i++) { ids[i] = s.insert(writeBuffer); } }); val append = measure(() -> { for (int i = 0; i < ENTRY_COUNT; i++) { s.append(ids[i], writeBuffer.getLength(), appendBuffer); } }); val replace = measure(() -> { for (int i = 0; i < ENTRY_COUNT; i++) { ids[i] = s.replace(ids[i], writeBuffer); } }); val get = measure(() -> { for (int i = 0; i < ENTRY_COUNT; i++) { BufferView result = s.get(ids[i]); result.copyTo(ByteBuffer.wrap(readBuffer)); } }); val delete = measure(() -> { for (int i = 0; i < ENTRY_COUNT; i++) { s.delete(ids[i]); } }); return new SequentialResult(insert, replace, append, get, delete); } /** * Tests {@link #ENTRY_COUNT} random operations with {@link #RANDOM_OPERATIONS_INSERT_PERCENTAGE} chance of insertions * (and 100%-{@link #RANDOM_OPERATIONS_INSERT_PERCENTAGE} chance of deletions). */ @Test public void testRandomOperations() { test(this::testRandomOperations); } private RandomResult testRandomOperations(CacheStorage s) { val writeBuffer = new ByteArraySegment(new byte[ENTRY_SIZE]); this.random.nextBytes(writeBuffer.array()); val ids = new ArrayList<Integer>(); val timer = new Timer(); val threads = new ArrayList<Thread>(); val iterations = new AtomicInteger(0); val insertCount = new AtomicInteger(0); val getCount = new AtomicInteger(0); val deleteCount = new AtomicInteger(0); for (int threadId = 0; threadId < RANDOM_OPERATIONS_THREAD_COUNT; threadId++) { val t = new Thread(() -> { val readBuffer = new byte[ENTRY_SIZE * 2]; int i; while ((i = iterations.incrementAndGet()) <= ENTRY_COUNT) { boolean insert; int length; synchronized (ids) { insert = (this.random.nextInt(100) < RANDOM_OPERATIONS_INSERT_PERCENTAGE) || ids.isEmpty(); length = insert ? this.random.nextInt(writeBuffer.getLength()) : 0; } if (insert) { int insertedId = s.insert(writeBuffer.slice(0, length)); synchronized (ids) { ids.add(insertedId); } insertCount.incrementAndGet(); } else { // delete int toRemove; synchronized (ids) { toRemove = ids.remove(this.random.nextInt(ids.size())); } s.delete(toRemove); deleteCount.incrementAndGet(); } int toRead = -1; synchronized (ids) { if (!ids.isEmpty()) { toRead = ids.get(this.random.nextInt(ids.size())); } } if (toRead >= 0) { BufferView result = s.get(toRead); if (result != null) { result.copyTo(ByteBuffer.wrap(readBuffer)); } getCount.incrementAndGet(); } } }); t.start(); threads.add(t); } for (val t : threads) { Exceptions.handleInterrupted(t::join); } Duration elapsed = timer.getElapsed(); ids.forEach(s::delete); // do not count this. return new RandomResult(elapsed, insertCount.get(), getCount.get(), deleteCount.get()); } private <T> void test(Function<CacheStorage, T> toTest) { @Cleanup val s = new DirectMemoryCache(MAX_CACHE_SIZE); for (int i = 0; i < ITERATION_COUNT; i++) { val r = toTest.apply(s); System.out.println(String.format("#%d: %s", i + 1, r)); } } private Duration measure(Runnable toRun) { System.gc(); val timer = new Timer(); toRun.run(); return timer.getElapsed(); } @Data private static class RandomResult { final Duration elapsed; final int insertCount; final int getCount; final int deleteCount; @Override public String toString() { return String.format("Elapsed: %dms, InsertCount: %d, GetCount: %d, DeleteCount: %d", elapsed.toMillis(), this.insertCount, this.getCount, this.deleteCount); } } @Data private static class SequentialResult { final Duration insert; final Duration replace; final Duration append; final Duration get; final Duration delete; @Override public String toString() { return String.format("Insert: %dms, Replace: %dms, Append: %dms, Get: %dms, Delete: %dms", insert.toMillis(), replace.toMillis(), append.toMillis(), get.toMillis(), delete.toMillis()); } } }
3,912
903
/**************************************************************************** ** ** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). ** All rights reserved. ** Contact: Nokia Corporation (<EMAIL>) ** ** This file is part of the QtCore module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** GNU Lesser General Public License Usage ** This file may be used under the terms of the GNU Lesser General Public ** License version 2.1 as published by the Free Software Foundation and ** appearing in the file LICENSE.LGPL included in the packaging of this ** file. Please review the following information to ensure the GNU Lesser ** General Public License version 2.1 requirements will be met: ** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ** ** In addition, as a special exception, Nokia gives you certain additional ** rights. These rights are described in the Nokia Qt LGPL Exception ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU General ** Public License version 3.0 as published by the Free Software Foundation ** and appearing in the file LICENSE.GPL included in the packaging of this ** file. Please review the following information to ensure the GNU General ** Public License version 3.0 requirements will be met: ** http://www.gnu.org/copyleft/gpl.html. ** ** Other Usage ** Alternatively, this file may be used in accordance with the terms and ** conditions contained in a signed written agreement between you and Nokia. ** ** ** ** ** ** $QT_END_LICENSE$ ** ****************************************************************************/ /* Data structures */ #ifndef QT_NO_QUUID_STRING # define QT_NO_QUUID_STRING #endif #ifndef QT_NO_STL # define QT_NO_STL #endif /* Dialogs */ #ifndef QT_NO_COLORDIALOG # define QT_NO_COLORDIALOG #endif #ifndef QT_NO_ERRORMESSAGE # define QT_NO_ERRORMESSAGE #endif #ifndef QT_NO_FILEDIALOG # define QT_NO_FILEDIALOG #endif #ifndef QT_NO_FONTDIALOG # define QT_NO_FONTDIALOG #endif #ifndef QT_NO_INPUTDIALOG # define QT_NO_INPUTDIALOG #endif #ifndef QT_NO_MESSAGEBOX # define QT_NO_MESSAGEBOX #endif #ifndef QT_NO_PRINTDIALOG # define QT_NO_PRINTDIALOG #endif #ifndef QT_NO_PROGRESSDIALOG # define QT_NO_PROGRESSDIALOG #endif #ifndef QT_NO_TABDIALOG # define QT_NO_TABDIALOG #endif /* File I/O */ #ifndef QT_NO_TEXTSTREAM # define QT_NO_TEXTSTREAM #endif #ifndef QT_NO_SETTINGS # define QT_NO_SETTINGS #endif #ifndef QT_NO_LIBRARY # define QT_NO_LIBRARY #endif /* Fonts */ #ifndef QT_NO_FREETYPE # define QT_NO_FREETYPE #endif /* Images */ #ifndef QT_NO_IMAGEFORMATPLUGIN # define QT_NO_IMAGEFORMATPLUGIN #endif #ifndef QT_NO_IMAGEFORMAT_BMP # define QT_NO_IMAGEFORMAT_BMP #endif #ifndef QT_NO_IMAGEFORMAT_JPEG # define QT_NO_IMAGEFORMAT_JPEG #endif #ifndef QT_NO_IMAGEFORMAT_PPM # define QT_NO_IMAGEFORMAT_PPM #endif #ifndef QT_NO_IMAGEFORMAT_XBM # define QT_NO_IMAGEFORMAT_XBM #endif #ifndef QT_NO_IMAGEFORMAT_XPM # define QT_NO_IMAGEFORMAT_XPM #endif #ifndef QT_NO_IMAGE_TEXT # define QT_NO_IMAGE_TEXT #endif #ifndef QT_NO_MOVIE # define QT_NO_MOVIE #endif /* Internationalization */ #ifndef QT_NO_QWS_INPUTMETHODS # define QT_NO_QWS_INPUTMETHODS #endif #ifndef QT_NO_TEXTCODEC # define QT_NO_TEXTCODEC #endif #ifndef QT_NO_CODECS # define QT_NO_CODECS #endif #ifndef QT_NO_TEXTCODECPLUGIN # define QT_NO_TEXTCODECPLUGIN #endif #ifndef QT_NO_TRANSLATION # define QT_NO_TRANSLATION #endif #ifndef QT_NO_TRANSLATION_UTF8 # define QT_NO_TRANSLATION_UTF8 #endif /* ItemViews */ #ifndef QT_NO_DIRMODEL # define QT_NO_DIRMODEL #endif #ifndef QT_NO_PROXYMODEL # define QT_NO_PROXYMODEL #endif #ifndef QT_NO_SORTFILTERPROXYMODEL # define QT_NO_SORTFILTERPROXYMODEL #endif #ifndef QT_NO_STRINGLISTMODEL # define QT_NO_STRINGLISTMODEL #endif #ifndef QT_NO_TABLEVIEW # define QT_NO_TABLEVIEW #endif #ifndef QT_NO_TREEVIEW # define QT_NO_TREEVIEW #endif /* Kernel */ #ifndef QT_NO_ACTION # define QT_NO_ACTION #endif #ifndef QT_NO_CLIPBOARD # define QT_NO_CLIPBOARD #endif #ifndef QT_NO_DRAGANDDROP # define QT_NO_DRAGANDDROP #endif #ifndef QT_NO_EFFECTS # define QT_NO_EFFECTS #endif #ifndef QT_NO_PROPERTIES # define QT_NO_PROPERTIES #endif #ifndef QT_NO_SESSIONMANAGER # define QT_NO_SESSIONMANAGER #endif #ifndef QT_NO_SHORTCUT # define QT_NO_SHORTCUT #endif #ifndef QT_NO_SOUND # define QT_NO_SOUND #endif #ifndef QT_NO_WHEELEVENT # define QT_NO_WHEELEVENT #endif /* Networking */ #ifndef QT_NO_COP # define QT_NO_COP #endif #ifndef QT_NO_HOSTINFO # define QT_NO_HOSTINFO #endif #ifndef QT_NO_HTTP # define QT_NO_HTTP #endif /* Painting */ #ifndef QT_NO_COLORNAMES # define QT_NO_COLORNAMES #endif #ifndef QT_NO_PICTURE # define QT_NO_PICTURE #endif #ifndef QT_NO_PRINTER # define QT_NO_PRINTER #endif #ifndef QT_NO_CUPS # define QT_NO_CUPS #endif /* Qt for Embedded Linux */ #ifndef QT_NO_QWS_SOUNDSERVER # define QT_NO_QWS_SOUNDSERVER #endif #ifndef QT_NO_QWS_PROPERTIES # define QT_NO_QWS_PROPERTIES #endif /* Styles */ #ifndef QT_NO_STYLE_MOTIF # define QT_NO_STYLE_MOTIF #endif #ifndef QT_NO_STYLE_CDE # define QT_NO_STYLE_CDE #endif #ifndef QT_NO_STYLE_CLEANLOOKS # define QT_NO_STYLE_CLEANLOOKS #endif #ifndef QT_NO_STYLE_PLASTIQUE # define QT_NO_STYLE_PLASTIQUE #endif #ifndef QT_NO_STYLE_STYLESHEET # define QT_NO_STYLE_STYLESHEET #endif #ifndef QT_NO_STYLE_WINDOWSXP # define QT_NO_STYLE_WINDOWSXP #endif /* Utilities */ #ifndef QT_NO_COMPLETER # define QT_NO_COMPLETER #endif #ifndef QT_NO_DESKTOPSERVICES # define QT_NO_DESKTOPSERVICES #endif #ifndef QT_NO_SYSTEMTRAYICON # define QT_NO_SYSTEMTRAYICON #endif #ifndef QT_NO_GESTURES # define QT_NO_GESTURES #endif /* Widgets */ #ifndef QT_NO_LCDNUMBER # define QT_NO_LCDNUMBER #endif #ifndef QT_NO_FONTCOMBOBOX # define QT_NO_FONTCOMBOBOX #endif #ifndef QT_NO_SPINBOX # define QT_NO_SPINBOX #endif #ifndef QT_NO_CALENDARWIDGET # define QT_NO_CALENDARWIDGET #endif #ifndef QT_NO_DATETIMEEDIT # define QT_NO_DATETIMEEDIT #endif #ifndef QT_NO_MENU # define QT_NO_MENU #endif #ifndef QT_NO_CONTEXTMENU # define QT_NO_CONTEXTMENU #endif #ifndef QT_NO_MAINWINDOW # define QT_NO_MAINWINDOW #endif #ifndef QT_NO_DOCKWIDGET # define QT_NO_DOCKWIDGET #endif #ifndef QT_NO_TOOLBAR # define QT_NO_TOOLBAR #endif #ifndef QT_NO_MENUBAR # define QT_NO_MENUBAR #endif #ifndef QT_NO_WORKSPACE # define QT_NO_WORKSPACE #endif #ifndef QT_NO_PROGRESSBAR # define QT_NO_PROGRESSBAR #endif #ifndef QT_NO_SPLITTER # define QT_NO_SPLITTER #endif #ifndef QT_NO_SIZEGRIP # define QT_NO_SIZEGRIP #endif #ifndef QT_NO_STACKEDWIDGET # define QT_NO_STACKEDWIDGET #endif #ifndef QT_NO_TABWIDGET # define QT_NO_TABWIDGET #endif #ifndef QT_NO_STATUSBAR # define QT_NO_STATUSBAR #endif #ifndef QT_NO_TABLEWIDGET # define QT_NO_TABLEWIDGET #endif #ifndef QT_NO_TOOLBUTTON # define QT_NO_TOOLBUTTON #endif #ifndef QT_NO_TABBAR # define QT_NO_TABBAR #endif #ifndef QT_NO_TOOLBOX # define QT_NO_TOOLBOX #endif #ifndef QT_NO_WHATSTHIS # define QT_NO_WHATSTHIS #endif #ifndef QT_NO_TOOLTIP # define QT_NO_TOOLTIP #endif #ifndef QT_NO_TREEWIDGET # define QT_NO_TREEWIDGET #endif #ifndef QT_NO_VALIDATOR # define QT_NO_VALIDATOR #endif
3,603
930
<gh_stars>100-1000 from django.contrib.auth.models import Group from django.utils import timezone from django.test import TestCase from django.core import mail from django.urls import reverse from boxes.tests import create_and_login_user from hawkpost import celery_app from .models import Notification, User from .forms import UpdateUserInfoForm from .tasks import enqueue_email_notifications from .utils import key_state, with_gpg_obj from .test_constants import VALID_KEY_FINGERPRINT, VALID_KEYSERVER_URL from .test_constants import EXPIRED_KEY_FINGERPRINT from .test_constants import REVOKED_KEY, EXPIRED_KEY, VALID_KEY from copy import copy from shutil import rmtree import gnupg import tempfile import random import string from unittest import mock from collections import namedtuple DEFAULT_USER_DATA = { "first_name": "<NAME>", "last_name": "<NAME>", "company": "some company", "fingerprint": VALID_KEY_FINGERPRINT, "timezone": "UTC", "language": "en-us", "public_key": VALID_KEY } def create_notification(sent=False, group=None): sent_at = timezone.now() if sent else None return Notification.objects.create(subject="Test subject", body="Test Body", sent_at=sent_at, send_to=group) @with_gpg_obj def create_expiring_key(days_to_expire, gpg): days_to_expire = str(days_to_expire) + "d" # Example values for expire_date: “2009-12-31”, “365d”, “3m”, “6w”, “5y”, “seconds=<epoch>”, 0 input_data = gpg.gen_key_input(key_type="RSA", key_length=1024, expire_date=days_to_expire, passphrase="<PASSWORD>") key_id = gpg.gen_key(input_data) # retrieve the key key_ascii = gpg.export_keys(key_id) # remove the keyring return key_ascii def create_and_login_user(client): username = ''.join(random.choice(string.ascii_uppercase) for _ in range(5)) user = User.objects.create_user(username=username, email="{}<EMAIL>(username)) client.force_login(user) return user class UpdateUserFormTests(TestCase): def setUp(self): self.default_data = DEFAULT_USER_DATA def test_empty_fingerprint(self): data = copy(self.default_data) data["fingerprint"] = "" form = UpdateUserInfoForm(data) self.assertEqual(form.is_valid(), False) def test_fingerprint_plus_public_key(self): data = copy(self.default_data) data["fingerprint"] = VALID_KEY_FINGERPRINT data["public_key"] = VALID_KEY form = UpdateUserInfoForm(data) self.assertEqual(form.is_valid(), True) @mock.patch("humans.forms.requests.get") def test_fingerprint_plus_keyserver_url(self, get_mock): Response = namedtuple("Response", "status_code,text") get_mock.return_value = Response(200, VALID_KEY) data = copy(self.default_data) data["keyserver_url"] = VALID_KEYSERVER_URL form = UpdateUserInfoForm(data) self.assertEqual(form.is_valid(), True) def test_fingerprint_mismatch(self): data = copy(self.default_data) data["fingerprint"] = EXPIRED_KEY_FINGERPRINT form = UpdateUserInfoForm(data) self.assertEqual(form.is_valid(), False) def test_empty_language(self): data = copy(self.default_data) data["language"] = "" form = UpdateUserInfoForm(data) self.assertEqual(form.is_valid(), False) def test_non_valid_language(self): data = copy(self.default_data) data["language"] = "invalid" form = UpdateUserInfoForm(data) self.assertEqual(form.is_valid(), False) def test_wrong_current_password(self): """ Tests if the form is invalidated because the wrong password was sent """ data = { 'current_password': '<PASSWORD>', 'timezone': 'UTC', 'language': 'en-us' } user = create_and_login_user(self.client) form = UpdateUserInfoForm(data, instance=user) self.assertEqual(form.is_valid(), False) self.assertTrue('current_password' in form.errors) def test_invalid_password(self): """ Tests that Django password constraints are being tested """ data = { 'current_password': '<PASSWORD>', 'new_password1': 'a', 'new_password2': 'a', 'timezone': 'UTC', 'language': 'en-us' } user = create_and_login_user(self.client) user.set_password('<PASSWORD>') user.save() form = UpdateUserInfoForm(data, instance=user) self.assertEqual(form.is_valid(), False) self.assertTrue('new_password2' in form.errors) def test_non_matching_passwords(self): """ Tests if the form invalidates when password are valid but different """ data = { 'current_password': '<PASSWORD>', 'new_password1': '<PASSWORD>', 'new_password2': '<PASSWORD>', 'timezone': 'UTC', 'language': 'en-us' } user = create_and_login_user(self.client) user.set_password('<PASSWORD>') user.save() form = UpdateUserInfoForm(data, instance=user) self.assertEqual(form.is_valid(), False) self.assertTrue('new_password2' in form.errors) def test_change_password(self): """ Tests if the password is actually changed """ data = { 'current_password': '<PASSWORD>', 'new_password1': '<PASSWORD>', 'new_password2': '<PASSWORD>23', 'timezone': 'UTC', 'language': 'en-us' } user = create_and_login_user(self.client) user.set_password('<PASSWORD>') user.save() form = UpdateUserInfoForm(data, instance=user) self.assertEqual(form.is_valid(), True) form.save() user.refresh_from_db() self.assertTrue(user.check_password(data["new_password1"])) class UtilsTests(TestCase): def test_invalid_key_state(self): fingerprint, *state = key_state("invalid stuff") self.assertEqual(state[0], "invalid") def test_expired_key_state(self): fingerprint, *state = key_state(EXPIRED_KEY) self.assertEqual(state[0], "expired") def test_revoked_key_state(self): fingerprint, *state = key_state(REVOKED_KEY) self.assertEqual(state[0], "revoked") def test_valid_key_state(self): fingerprint, *state = key_state(VALID_KEY) self.assertEqual(state[0], "valid") def test_key_days_to_expire(self): key = create_expiring_key(7) fingerprint, *state = key_state(key) self.assertEqual(state[0], "valid") self.assertGreaterEqual(state[1], 6) self.assertLess(state[1], 8) key = create_expiring_key(1) fingerprint, *state = key_state(key) self.assertEqual(state[0], "valid") self.assertGreaterEqual(state[1], 0) self.assertLess(state[1], 1) class UserModelTests(TestCase): def test_no_setup_complete(self): user = create_and_login_user(self.client) self.assertEqual(user.has_setup_complete(), False) def test_setup_complete(self): user = create_and_login_user(self.client) user.public_key = VALID_KEY user.fingerprint = VALID_KEY_FINGERPRINT user.save() self.assertEqual(user.has_setup_complete(), True) class NotificationsTests(TestCase): def setUp(self): celery_app.conf.update(task_always_eager=True) def test_delete_sent_notifications(self): notification = create_notification(sent=True) notification_id = notification.id self.assertEqual(notification.delete(), False) queryset = Notification.objects.filter(id=notification_id) self.assertEqual(len(queryset), 1) def test_delete_unsent_notification(self): notification = create_notification(sent=False) notification_id = notification.id self.assertNotEqual(notification.delete(), False) queryset = Notification.objects.filter(id=notification_id) self.assertEqual(len(queryset), 0) def test_send_when_group_is_defined(self): for i in range(4): create_and_login_user(self.client) last_user = create_and_login_user(self.client) group = Group.objects.create(name="Test Group") group.user_set.add(last_user) notification = create_notification(sent=False, group=group) enqueue_email_notifications(notification.id, notification.send_to.id) self.assertEqual(len(mail.outbox), 1) def test_send_when_group_is_not_defined(self): for i in range(4): create_and_login_user(self.client) notification = create_notification(sent=False) enqueue_email_notifications(notification.id, None) self.assertEqual(len(mail.outbox), User.objects.count()) class KeyChangeRecordsTests(TestCase): def setUp(self): self.user = create_and_login_user(self.client) self.data = { 'public_key': VALID_KEY, 'fingerprint': VALID_KEY_FINGERPRINT } def test_if_no_key_change_no_record(self): form = UpdateUserInfoForm({}, instance=self.user) form.is_valid() form.save() self.assertEqual(self.user.keychanges.count(), 0) def test_key_changes_are_recorded(self): form = UpdateUserInfoForm(self.data, instance=self.user) form.is_valid() form.save() self.assertEqual(self.user.keychanges.count(), 1) keychangerecord = self.user.keychanges.last() self.assertEqual(keychangerecord.ip_address, None) self.assertEqual(keychangerecord.agent, '') def test_ip_address_and_user_agent_are_recorded_when_available(self): form = UpdateUserInfoForm(self.data, instance=self.user) form.is_valid() form.save(ip='127.0.0.1', agent='test_agent') self.assertEqual(self.user.keychanges.count(), 1) keychangerecord = self.user.keychanges.last() self.assertEqual(keychangerecord.ip_address, '127.0.0.1') self.assertEqual(keychangerecord.agent, 'test_agent') class UpdateSettingsTests(TestCase): def test_unauthenticated_get_request(self): response = self.client.get(reverse("humans_update")) self.assertEqual(response.status_code, 302) def test_unauthenticated_post_request(self): response = self.client.post(reverse("humans_update")) self.assertEqual(response.status_code, 302) def test_authenticated_get_request(self): create_and_login_user(self.client) response = self.client.get(reverse("humans_update")) self.assertEqual(response.status_code, 200) def test_update_user_name(self): user = create_and_login_user(self.client) response = self.client.post(reverse("humans_update"), DEFAULT_USER_DATA, HTTP_USER_AGENT="testagent") self.assertEqual(response.status_code, 302) user.refresh_from_db() self.assertEqual(user.first_name, "some name") class DeleteUserTests(TestCase): def test_unauthenticated_get_request(self): response = self.client.get(reverse("humans_delete")) self.assertEqual(response.status_code, 302) def test_authenticated_get_request(self): create_and_login_user(self.client) response = self.client.get(reverse("humans_delete")) self.assertEqual(response.status_code, 200) def test_delete_without_password(self): user = create_and_login_user(self.client) user.set_password("<PASSWORD>") user.save() self.client.force_login(user) response = self.client.post(reverse("humans_delete")) self.assertEqual(response.status_code, 200) self.assertTrue(User.objects.all().exists()) self.assertIn( "In order to delete the account you must provide the current password.", [str(msg) for msg in response.context["messages"]]) def test_delete_with_wrong_password(self): user = create_and_login_user(self.client) user.set_password("<PASSWORD>") user.save() self.client.force_login(user) response = self.client.post(reverse("humans_delete"), { "current_password": "wrong"}) self.assertEqual(response.status_code, 200) self.assertIn( "In order to delete the account you must provide the current password.", [str(msg) for msg in response.context["messages"]]) self.assertTrue(User.objects.all().exists()) def test_delete_with_correct_password(self): password = "<PASSWORD>" user = create_and_login_user(self.client) user.set_password(password) user.save() self.client.force_login(user) response = self.client.post(reverse("humans_delete"), { "current_password": password}) self.assertEqual(response.status_code, 302) self.assertFalse(User.objects.all().exists()) def test_delete_passwordless_account(self): user = create_and_login_user(self.client) self.client.force_login(user) response = self.client.post(reverse("humans_delete")) self.assertEqual(response.status_code, 302) self.assertFalse(User.objects.all().exists())
6,063
20,995
// Copyright 2019 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_ #define V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_ #include "testing/gtest/include/gtest/gtest.h" namespace v8 { namespace internal { template <typename T> class TestWithBitmap : public ::testing::Test { public: TestWithBitmap() : memory_(new uint8_t[Bitmap::kSize]) { memset(memory_, 0, Bitmap::kSize); } ~TestWithBitmap() override { delete[] memory_; } T* bitmap() { return reinterpret_cast<T*>(memory_); } uint8_t* raw_bitmap() { return memory_; } private: uint8_t* memory_; }; using BitmapTypes = ::testing::Types<ConcurrentBitmap<AccessMode::NON_ATOMIC>, ConcurrentBitmap<AccessMode::ATOMIC>>; } // namespace internal } // namespace v8 #endif // V8_UNITTESTS_HEAP_BITMAP_TEST_UTILS_H_
385
879
<gh_stars>100-1000 package org.zstack.test.kvm; import junit.framework.Assert; import org.junit.Before; import org.junit.Test; import org.zstack.compute.host.HostGlobalConfig; import org.zstack.compute.vm.VmInstanceManagerImpl; import org.zstack.core.cloudbus.CloudBus; import org.zstack.core.componentloader.ComponentLoader; import org.zstack.core.db.DatabaseFacade; import org.zstack.header.identity.SessionInventory; import org.zstack.header.rest.RESTFacade; import org.zstack.header.vm.VmInstanceInventory; import org.zstack.header.vm.VmInstanceState; import org.zstack.header.vm.VmInstanceVO; import org.zstack.kvm.KVMConstant.KvmVmState; import org.zstack.simulator.kvm.KVMSimulatorConfig; import org.zstack.test.Api; import org.zstack.test.ApiSenderException; import org.zstack.test.DBUtil; import org.zstack.test.WebBeanConstructor; import org.zstack.test.deployer.Deployer; import org.zstack.test.storage.backup.sftp.TestSftpBackupStorageDeleteImage2; import org.zstack.utils.Utils; import org.zstack.utils.logging.CLogger; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; /** * 1. create a vm * 2. disconnect the host * <p> * confirm the vm's state becomes unknown * <p> * 3. reconnect the host * <p> * confirm the vm's state becomes running */ @Deprecated public class TestKvmVmTracer1 { CLogger logger = Utils.getLogger(TestSftpBackupStorageDeleteImage2.class); Deployer deployer; Api api; ComponentLoader loader; CloudBus bus; DatabaseFacade dbf; SessionInventory session; RESTFacade restf; KVMSimulatorConfig config; VmInstanceManagerImpl vmMgr; @Before public void setUp() throws Exception { DBUtil.reDeployDB(); WebBeanConstructor con = new WebBeanConstructor(); deployer = new Deployer("deployerXml/kvm/TestKvmVmTracer1.xml", con); deployer.addSpringConfig("KVMRelated.xml"); deployer.build(); api = deployer.getApi(); loader = deployer.getComponentLoader(); bus = loader.getComponent(CloudBus.class); dbf = loader.getComponent(DatabaseFacade.class); restf = loader.getComponent(RESTFacade.class); config = loader.getComponent(KVMSimulatorConfig.class); vmMgr = loader.getComponent(VmInstanceManagerImpl.class); session = api.loginAsAdmin(); HostGlobalConfig.PING_HOST_INTERVAL.updateValue(1); } @Test public void test() throws InterruptedException, ApiSenderException { HostGlobalConfig.AUTO_RECONNECT_ON_ERROR.updateValue(false); VmInstanceInventory vm = deployer.vms.get("TestVm"); config.pingSuccess = false; TimeUnit.SECONDS.sleep(3); VmInstanceVO vmvo = dbf.findByUuid(vm.getUuid(), VmInstanceVO.class); Assert.assertEquals(VmInstanceState.Unknown, vmvo.getState()); Map<String, String> m = new HashMap<String, String>(); m.put(vm.getUuid(), KvmVmState.Running.toString()); config.checkVmStatesConfig.put(vm.getHostUuid(), m); vmMgr.managementNodeReady(); TimeUnit.SECONDS.sleep(5); Assert.assertEquals(1, config.checkVmStateCmds.size()); vmvo = dbf.findByUuid(vm.getUuid(), VmInstanceVO.class); Assert.assertEquals(VmInstanceState.Running, vmvo.getState()); } }
1,312
2,603
<filename>FreeRTOS/Demo/RISC-V_RV64_PolarFire_SoftConsole/polarfire_hal/platform/mpfs_hal/common/mss_peripherals.c /******************************************************************************* * Copyright 2019-2021 Microchip FPGA Embedded Systems Solutions. * * SPDX-License-Identifier: MIT * * MPFS HAL Embedded Software * */ /******************************************************************************* * @file mss_peripherals.c * @author Microchip-FPGA Embedded Systems Solutions * @brief PolarFire SoC MSS functions related to peripherals. * */ /*=========================================================================*//** *//*=========================================================================*/ #include <stdio.h> #include <string.h> #include "mpfs_hal/mss_hal.h" const uint32_t LIBERO_SETTING_CONTEXT_EN[][2U] = { {LIBERO_SETTING_CONTEXT_A_EN, LIBERO_SETTING_CONTEXT_B_EN}, {LIBERO_SETTING_CONTEXT_A_EN_FIC, LIBERO_SETTING_CONTEXT_B_EN_FIC}, }; /* offsets used in PERIPHERAL_SETUP array */ #define PERIPHERAL_INDEX_OFFSET 0U /* used for sanity check */ #define CONTEXT_EN_INDEX_OFFSET 1U #define CONTEXT_MASK_INDEX_OFFSET 2U #define CONTEXT_SUBCLK_INDEX_OFFSET 3U const uint32_t PERIPHERAL_SETUP[][4U] = { {MSS_PERIPH_MMUART0,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMUART0,SUBBLK_CLOCK_CR_MMUART0_MASK}, {MSS_PERIPH_MMUART1,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMUART1,SUBBLK_CLOCK_CR_MMUART1_MASK}, {MSS_PERIPH_MMUART2,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMUART2,SUBBLK_CLOCK_CR_MMUART2_MASK}, {MSS_PERIPH_MMUART3,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMUART3,SUBBLK_CLOCK_CR_MMUART3_MASK}, {MSS_PERIPH_MMUART4,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMUART4,SUBBLK_CLOCK_CR_MMUART4_MASK}, {MSS_PERIPH_WDOG0,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_WDOG0,SUBBLK_CLOCK_NA_MASK}, {MSS_PERIPH_WDOG1,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_WDOG1,SUBBLK_CLOCK_NA_MASK}, {MSS_PERIPH_WDOG2,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_WDOG2,SUBBLK_CLOCK_NA_MASK}, {MSS_PERIPH_WDOG3,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_WDOG3,SUBBLK_CLOCK_NA_MASK}, {MSS_PERIPH_WDOG4,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_WDOG4,SUBBLK_CLOCK_NA_MASK}, {MSS_PERIPH_SPI0,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_SPI0,SUBBLK_CLOCK_CR_SPI0_MASK}, {MSS_PERIPH_SPI1,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_SPI1,SUBBLK_CLOCK_CR_SPI1_MASK}, {MSS_PERIPH_I2C0,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_I2C0,SUBBLK_CLOCK_CR_I2C0_MASK}, {MSS_PERIPH_I2C1,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_I2C1,SUBBLK_CLOCK_CR_I2C1_MASK}, {MSS_PERIPH_CAN0,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_CAN0,SUBBLK_CLOCK_CR_CAN0_MASK}, {MSS_PERIPH_CAN1,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_CAN1,SUBBLK_CLOCK_CR_CAN1_MASK}, {MSS_PERIPH_MAC0,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MAC0,SUBBLK_CLOCK_CR_MAC0_MASK}, {MSS_PERIPH_MAC1,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MAC1,SUBBLK_CLOCK_CR_MAC1_MASK}, {MSS_PERIPH_TIMER,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_TIMER,SUBBLK_CLOCK_CR_TIMER_MASK}, {MSS_PERIPH_GPIO0,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_GPIO0,SUBBLK_CLOCK_CR_GPIO0_MASK}, {MSS_PERIPH_GPIO1,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_GPIO1,SUBBLK_CLOCK_CR_GPIO1_MASK}, {MSS_PERIPH_GPIO2,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_GPIO2,SUBBLK_CLOCK_CR_GPIO2_MASK}, {MSS_PERIPH_RTC,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_RTC,SUBBLK_CLOCK_CR_RTC_MASK}, {MSS_PERIPH_H2FINT,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_H2FINT, SUBBLK_CLOCK_NA_MASK}, {MSS_PERIPH_CRYPTO,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_CRYPTO,SUBBLK_CLOCK_CR_ATHENA_MASK}, {MSS_PERIPH_USB,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_USB,SUBBLK_CLOCK_CR_USB_MASK}, {MSS_PERIPH_QSPIXIP,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_QSPIXIP,SUBBLK_CLOCK_CR_QSPI_MASK}, {MSS_PERIPH_ATHENA,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_ATHENA,SUBBLK_CLOCK_CR_ATHENA_MASK}, {MSS_PERIPH_TRACE,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMC,SUBBLK_CLOCK_CR_MMC_MASK}, {MSS_PERIPH_MAILBOX_SC,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMC,SUBBLK_CLOCK_CR_MMC_MASK}, {MSS_PERIPH_EMMC,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_MMC,SUBBLK_CLOCK_CR_MMC_MASK}, {MSS_PERIPH_CFM,CONTEXT_EN_INDEX, CONTEXT_EN_MASK_CFM,SUBBLK_CLOCK_CR_CFM_MASK}, {MSS_PERIPH_FIC0,CONTEXT_EN_INDEX_FIC, CONTEXT_EN_MASK_FIC0,SUBBLK_CLOCK_CR_FIC0_MASK}, {MSS_PERIPH_FIC1,CONTEXT_EN_INDEX_FIC, CONTEXT_EN_MASK_FIC1,SUBBLK_CLOCK_CR_FIC1_MASK}, {MSS_PERIPH_FIC2,CONTEXT_EN_INDEX_FIC, CONTEXT_EN_MASK_FIC2,SUBBLK_CLOCK_CR_FIC2_MASK}, {MSS_PERIPH_FIC3,CONTEXT_EN_INDEX_FIC, CONTEXT_EN_MASK_FIC3,SUBBLK_CLOCK_CR_FIC3_MASK} }; /** * If contexts set-up, verify allowed access to peripheral * @param option - Two option, , FIC enables set separately. CONTEXT_EN_INDEX_FIC or CONTEXT_EN_INDEX * @param periph_context_mask See CONTEXT_EN_MASK_ defines for options * @param hart The hart ID of origin of request. * @return */ static inline uint8_t verify_context_enable(uint8_t option, uint32_t periph_context_mask , uint32_t hart) { uint8_t result = 1U; #if ((LIBERO_SETTING_MEM_CONFIGS_ENABLED & PMP_ENABLED_MASK) == PMP_ENABLED_MASK) if (hart != (uint8_t) 0U) { if (LIBERO_SETTING_CONTEXT_A_HART_EN & hart ) { if (LIBERO_SETTING_CONTEXT_EN[option][0U] & periph_context_mask) { result = 0U; } } if (LIBERO_SETTING_CONTEXT_B_HART_EN & hart ) { if (LIBERO_SETTING_CONTEXT_EN[option][1U] & periph_context_mask) { result = 0U; } } } else { hart = 0U; } #else (void)hart; (void)periph_context_mask; (void)option; result = 0U; #endif return result; } /** * Turn on/off mss peripheral as required * @param peripheral_mask * @param req_state */ static inline void peripheral_on_off(uint32_t peripheral_mask , PERIPH_RESET_STATE req_state) { if (req_state == PERIPHERAL_OFF) { /* Turn off clock */ SYSREG->SUBBLK_CLOCK_CR &= (uint32_t)~(peripheral_mask); /* Hold in reset */ SYSREG->SOFT_RESET_CR |= (uint32_t)(peripheral_mask); } else { /* Turn on clock */ SYSREG->SUBBLK_CLOCK_CR |= (peripheral_mask); /* Remove soft reset */ SYSREG->SOFT_RESET_CR &= (uint32_t)~(peripheral_mask); } } /***************************************************************************//** * See mss_peripherals.h for details of how to use this function. */ __attribute__((weak)) uint8_t mss_config_clk_rst(mss_peripherals peripheral, uint8_t hart, PERIPH_RESET_STATE req_state) { uint8_t result = 1U; ASSERT(PERIPHERAL_SETUP[peripheral][PERIPHERAL_INDEX_OFFSET] == peripheral); result = verify_context_enable(PERIPHERAL_SETUP[peripheral][CONTEXT_EN_INDEX_OFFSET], PERIPHERAL_SETUP[peripheral][CONTEXT_MASK_INDEX_OFFSET] , hart); if (result == 0U) { peripheral_on_off(PERIPHERAL_SETUP[peripheral][CONTEXT_SUBCLK_INDEX_OFFSET] , req_state); } return result; }
3,645
31,928
import logging import threading import time from localstack import config from localstack.constants import MODULE_MAIN_PATH from localstack.services import install from localstack.services.infra import do_run, log_startup_message, start_proxy_for_service from localstack.services.kinesis import kinesis_listener from localstack.utils.aws import aws_stack from localstack.utils.common import ( chmod_r, get_free_tcp_port, mkdir, replace_in_file, start_thread, wait_for_port_open, ) LOG = logging.getLogger(__name__) # event to indicate that the kinesis backend service has stopped (the terminal command has returned) kinesis_stopped = threading.Event() # todo: will be replaced with plugin mechanism PROCESS_THREAD = None PORT_KINESIS_BACKEND = None def apply_patches_kinesalite(): files = [ "%s/kinesalite/validations/decreaseStreamRetentionPeriod.js", "%s/kinesalite/validations/increaseStreamRetentionPeriod.js", ] for file_path in files: file_path = file_path % install.INSTALL_DIR_NPM replace_in_file("lessThanOrEqual: 168", "lessThanOrEqual: 8760", file_path) def start_kinesis(port=None, asynchronous=False, update_listener=None): port = port or config.PORT_KINESIS if config.KINESIS_PROVIDER == "kinesis-mock": return start_kinesis_mock( port=port, asynchronous=asynchronous, update_listener=update_listener ) if config.KINESIS_PROVIDER == "kinesalite": return start_kinesalite( port=port, asynchronous=asynchronous, update_listener=update_listener ) raise Exception('Unsupported Kinesis provider "%s"' % config.KINESIS_PROVIDER) def _run_proxy_and_command(cmd, port, backend_port, update_listener, asynchronous): global PROCESS_THREAD log_startup_message("Kinesis") start_proxy_for_service("kinesis", port, backend_port, update_listener) # TODO: generalize into service manager once it is introduced LOG.debug("Running Kinesis startup command: %s", cmd) try: PROCESS_THREAD = do_run(cmd, asynchronous) finally: if asynchronous: def _return_listener(*_): try: ret_code = PROCESS_THREAD.result_future.result() if ret_code not in [0, None]: LOG.error("kinesis terminated with return code %s", ret_code) finally: kinesis_stopped.set() start_thread(_return_listener) else: kinesis_stopped.set() return PROCESS_THREAD def start_kinesis_mock(port=None, asynchronous=False, update_listener=None): kinesis_mock_bin = install.install_kinesis_mock() backend_port = get_free_tcp_port() global PORT_KINESIS_BACKEND PORT_KINESIS_BACKEND = backend_port kinesis_data_dir_param = "" if config.dirs.data: kinesis_data_dir = "%s/kinesis" % config.dirs.data mkdir(kinesis_data_dir) kinesis_data_dir_param = "SHOULD_PERSIST_DATA=true PERSIST_PATH=%s" % kinesis_data_dir if not config.LS_LOG: log_level = "INFO" elif config.LS_LOG == "warning": log_level = "WARN" else: log_level = config.LS_LOG.upper() log_level_param = "LOG_LEVEL=%s" % log_level latency = config.KINESIS_LATENCY + "ms" latency_param = ( "CREATE_STREAM_DURATION={l} DELETE_STREAM_DURATION={l} REGISTER_STREAM_CONSUMER_DURATION={l} " "START_STREAM_ENCRYPTION_DURATION={l} STOP_STREAM_ENCRYPTION_DURATION={l} " "DEREGISTER_STREAM_CONSUMER_DURATION={l} MERGE_SHARDS_DURATION={l} SPLIT_SHARD_DURATION={l} " "UPDATE_SHARD_COUNT_DURATION={l}" ).format(l=latency) if config.KINESIS_INITIALIZE_STREAMS != "": initialize_streams_param = "INITIALIZE_STREAMS=%s" % config.KINESIS_INITIALIZE_STREAMS else: initialize_streams_param = "" if kinesis_mock_bin.endswith(".jar"): cmd = "KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s java -XX:+UseG1GC -jar %s" % ( backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, initialize_streams_param, kinesis_mock_bin, ) else: chmod_r(kinesis_mock_bin, 0o777) cmd = "KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s %s --gc=G1" % ( backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, initialize_streams_param, kinesis_mock_bin, ) return _run_proxy_and_command( cmd=cmd, port=port, backend_port=backend_port, update_listener=update_listener, asynchronous=asynchronous, ) def start_kinesalite(port=None, asynchronous=False, update_listener=None): # install and apply patches install.install_kinesalite() apply_patches_kinesalite() # start up process backend_port = get_free_tcp_port() global PORT_KINESIS_BACKEND PORT_KINESIS_BACKEND = backend_port latency = config.KINESIS_LATENCY kinesis_data_dir_param = "" if config.dirs.data: kinesis_data_dir = "%s/kinesis" % config.dirs.data mkdir(kinesis_data_dir) kinesis_data_dir_param = "--path %s" % kinesis_data_dir cmd = ( "%s/node_modules/kinesalite/cli.js --shardLimit %s --port %s" " --createStreamMs %s --deleteStreamMs %s --updateStreamMs %s %s" ) % ( MODULE_MAIN_PATH, config.KINESIS_SHARD_LIMIT, backend_port, latency, latency, latency, kinesis_data_dir_param, ) return _run_proxy_and_command( cmd=cmd, port=port, backend_port=backend_port, update_listener=update_listener, asynchronous=asynchronous, ) def check_kinesis(expect_shutdown=False, print_error=False): if expect_shutdown is False and kinesis_stopped.is_set(): raise AssertionError("kinesis backend has stopped") out = None try: # check Kinesis wait_for_port_open(PORT_KINESIS_BACKEND, http_path="/", expect_success=False, sleep_time=1) endpoint_url = f"http://127.0.0.1:{PORT_KINESIS_BACKEND}" out = aws_stack.connect_to_service( service_name="kinesis", endpoint_url=endpoint_url ).list_streams() except Exception: if print_error: LOG.exception("Kinesis health check failed") if expect_shutdown: assert out is None or kinesis_stopped.is_set() else: assert not kinesis_stopped.is_set() assert out and isinstance(out.get("StreamNames"), list) def kinesis_running(): return PROCESS_THREAD is not None def restart_kinesis(): if PROCESS_THREAD: LOG.debug("Restarting Kinesis process ...") PROCESS_THREAD.stop() kinesis_stopped.wait() kinesis_stopped.clear() start_kinesis(asynchronous=True, update_listener=kinesis_listener.UPDATE_KINESIS) # giving the process some time to startup; TODO: to be replaced with service lifecycle plugin time.sleep(1)
3,300
4,283
/* * Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.topic.impl; import com.hazelcast.cluster.Address; import com.hazelcast.cluster.impl.MemberImpl; import com.hazelcast.config.Config; import com.hazelcast.config.TopicConfig; import com.hazelcast.internal.cluster.ClusterService; import com.hazelcast.internal.metrics.DynamicMetricsProvider; import com.hazelcast.internal.metrics.MetricDescriptor; import com.hazelcast.internal.metrics.MetricsCollectionContext; import com.hazelcast.internal.monitor.impl.LocalTopicStatsImpl; import com.hazelcast.internal.serialization.Data; import com.hazelcast.internal.services.ManagedService; import com.hazelcast.internal.services.RemoteService; import com.hazelcast.internal.services.StatisticsAwareService; import com.hazelcast.internal.util.ConstructorFunction; import com.hazelcast.internal.util.HashUtil; import com.hazelcast.internal.util.MapUtil; import com.hazelcast.spi.impl.NodeEngine; import com.hazelcast.spi.impl.NodeEngineImpl; import com.hazelcast.spi.impl.eventservice.EventPublishingService; import com.hazelcast.spi.impl.eventservice.EventRegistration; import com.hazelcast.spi.impl.eventservice.EventService; import com.hazelcast.spi.properties.ClusterProperty; import com.hazelcast.topic.ITopic; import com.hazelcast.topic.LocalTopicStats; import com.hazelcast.topic.Message; import com.hazelcast.topic.MessageListener; import javax.annotation.Nonnull; import java.util.Collection; import java.util.Map; import java.util.Properties; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import static com.hazelcast.internal.metrics.MetricDescriptorConstants.TOPIC_PREFIX; import static com.hazelcast.internal.metrics.impl.ProviderHelper.provide; import static com.hazelcast.internal.util.ConcurrencyUtil.CALLER_RUNS; import static com.hazelcast.internal.util.ConcurrencyUtil.getOrPutSynchronized; public class TopicService implements ManagedService, RemoteService, EventPublishingService, StatisticsAwareService<LocalTopicStats>, DynamicMetricsProvider { public static final String SERVICE_NAME = "hz:impl:topicService"; public static final int ORDERING_LOCKS_LENGTH = 1000; private final ConcurrentMap<String, LocalTopicStatsImpl> statsMap = new ConcurrentHashMap<>(); private final Lock[] orderingLocks = new Lock[ORDERING_LOCKS_LENGTH]; private NodeEngine nodeEngine; private final ConstructorFunction<String, LocalTopicStatsImpl> localTopicStatsConstructorFunction = mapName -> new LocalTopicStatsImpl(); private EventService eventService; private final AtomicInteger counter = new AtomicInteger(0); private Address localAddress; @Override public void init(NodeEngine nodeEngine, Properties properties) { this.nodeEngine = nodeEngine; this.localAddress = nodeEngine.getThisAddress(); for (int i = 0; i < orderingLocks.length; i++) { orderingLocks[i] = new ReentrantLock(); } eventService = nodeEngine.getEventService(); boolean dsMetricsEnabled = nodeEngine.getProperties().getBoolean(ClusterProperty.METRICS_DATASTRUCTURES); if (dsMetricsEnabled) { ((NodeEngineImpl) nodeEngine).getMetricsRegistry().registerDynamicMetricsProvider(this); } } // only for testing public ConcurrentMap<String, LocalTopicStatsImpl> getStatsMap() { return statsMap; } @Override public void reset() { statsMap.clear(); } @Override public void shutdown(boolean terminate) { reset(); } public Lock getOrderLock(String key) { int index = getOrderLockIndex(key); return orderingLocks[index]; } private int getOrderLockIndex(String key) { int hash = key.hashCode(); return HashUtil.hashToIndex(hash, orderingLocks.length); } @Override public ITopic createDistributedObject(String name, UUID source, boolean local) { TopicConfig topicConfig = nodeEngine.getConfig().findTopicConfig(name); if (topicConfig.isGlobalOrderingEnabled()) { return new TotalOrderedTopicProxy(name, nodeEngine, this); } else { return new TopicProxy(name, nodeEngine, this); } } @Override public void destroyDistributedObject(String objectId, boolean local) { statsMap.remove(objectId); nodeEngine.getEventService().deregisterAllListeners(SERVICE_NAME, objectId); } @Override public void dispatchEvent(Object event, Object listener) { TopicEvent topicEvent = (TopicEvent) event; ClusterService clusterService = nodeEngine.getClusterService(); MemberImpl member = clusterService.getMember(topicEvent.publisherAddress); if (member == null) { member = new MemberImpl.Builder(topicEvent.publisherAddress) .version(nodeEngine.getVersion()) .build(); } Message message = new DataAwareMessage(topicEvent.name, topicEvent.data, topicEvent.publishTime, member , nodeEngine.getSerializationService()); incrementReceivedMessages(topicEvent.name); MessageListener messageListener = (MessageListener) listener; messageListener.onMessage(message); } public LocalTopicStatsImpl getLocalTopicStats(String name) { return getOrPutSynchronized(statsMap, name, statsMap, localTopicStatsConstructorFunction); } /** * Increments the number of published messages on the ITopic * with the name {@code topicName}. * * @param topicName the name of the {@link ITopic} */ public void incrementPublishes(String topicName) { getLocalTopicStats(topicName).incrementPublishes(); } /** * Increments the number of received messages on the ITopic * with the name {@code topicName}. * * @param topicName the name of the {@link ITopic} */ public void incrementReceivedMessages(String topicName) { getLocalTopicStats(topicName).incrementReceives(); } public void publishMessage(String topicName, Object payload, boolean multithreaded) { Collection<EventRegistration> registrations = eventService.getRegistrations(SERVICE_NAME, topicName); if (!registrations.isEmpty()) { Data payloadData = nodeEngine.toData(payload); TopicEvent topicEvent = new TopicEvent(topicName, payloadData, localAddress); int partitionId = multithreaded ? counter.incrementAndGet() : topicName.hashCode(); eventService.publishEvent(SERVICE_NAME, registrations, topicEvent, partitionId); } } public UUID addLocalMessageListener(@Nonnull String name, @Nonnull MessageListener listener) { EventRegistration registration = eventService.registerLocalListener(TopicService.SERVICE_NAME, name, listener); if (registration == null) { return null; } return registration.getId(); } public UUID addMessageListener(@Nonnull String name, @Nonnull MessageListener listener) { return eventService.registerListener(TopicService.SERVICE_NAME, name, listener).getId(); } public Future<UUID> addMessageListenerAsync(@Nonnull String name, @Nonnull MessageListener listener) { return eventService.registerListenerAsync(TopicService.SERVICE_NAME, name, listener) .thenApplyAsync(EventRegistration::getId, CALLER_RUNS); } public boolean removeMessageListener(@Nonnull String name, @Nonnull UUID registrationId) { return eventService.deregisterListener(TopicService.SERVICE_NAME, name, registrationId); } public Future<Boolean> removeMessageListenerAsync(@Nonnull String name, @Nonnull UUID registrationId) { return eventService.deregisterListenerAsync(TopicService.SERVICE_NAME, name, registrationId); } @Override public Map<String, LocalTopicStats> getStats() { Map<String, LocalTopicStats> topicStats = MapUtil.createHashMap(statsMap.size()); Config config = nodeEngine.getConfig(); for (Map.Entry<String, LocalTopicStatsImpl> statEntry : statsMap.entrySet()) { String name = statEntry.getKey(); if (config.getTopicConfig(name).isStatisticsEnabled()) { topicStats.put(name, statEntry.getValue()); } } return topicStats; } @Override public void provideDynamicMetrics(MetricDescriptor descriptor, MetricsCollectionContext context) { provide(descriptor, context, TOPIC_PREFIX, getStats()); } }
3,271
521
# -*- coding: utf-8 -*- # $Id: wuiadminbuild.py $ """ Test Manager WUI - Builds. """ __copyright__ = \ """ Copyright (C) 2012-2017 Oracle Corporation This file is part of VirtualBox Open Source Edition (OSE), as available from http://www.virtualbox.org. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License (GPL) as published by the Free Software Foundation, in version 2 as it comes in the "COPYING" file of the VirtualBox OSE distribution. VirtualBox OSE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. The contents of this file may alternatively be used under the terms of the Common Development and Distribution License Version 1.0 (CDDL) only, as it comes in the "COPYING.CDDL" file of the VirtualBox OSE distribution, in which case the provisions of the CDDL are applicable instead of those of the GPL. You may elect to license modified versions of this file under the terms and conditions of either the GPL or the CDDL or both. """ __version__ = "$Revision: 118412 $" # Validation Kit imports. from testmanager.webui.wuicontentbase import WuiFormContentBase, WuiListContentBase, WuiTmLink, WuiBuildLogLink, \ WuiSvnLinkWithTooltip; from testmanager.core.build import BuildData, BuildCategoryLogic; from testmanager.core.buildblacklist import BuildBlacklistData; from testmanager.core.db import isDbTimestampInfinity; class WuiAdminBuild(WuiFormContentBase): """ WUI Build HTML content generator. """ def __init__(self, oData, sMode, oDisp): if sMode == WuiFormContentBase.ksMode_Add: sTitle = 'Add Build' elif sMode == WuiFormContentBase.ksMode_Edit: sTitle = 'Modify Build - #%s' % (oData.idBuild,); else: assert sMode == WuiFormContentBase.ksMode_Show; sTitle = 'Build - #%s' % (oData.idBuild,); WuiFormContentBase.__init__(self, oData, sMode, 'Build', oDisp, sTitle); def _populateForm(self, oForm, oData): oForm.addIntRO (BuildData.ksParam_idBuild, oData.idBuild, 'Build ID') oForm.addTimestampRO(BuildData.ksParam_tsCreated, oData.tsCreated, 'Created') oForm.addTimestampRO(BuildData.ksParam_tsEffective, oData.tsEffective, 'Last changed') oForm.addTimestampRO(BuildData.ksParam_tsExpire, oData.tsExpire, 'Expires (excl)') oForm.addIntRO (BuildData.ksParam_uidAuthor, oData.uidAuthor, 'Changed by UID') oForm.addComboBox (BuildData.ksParam_idBuildCategory, oData.idBuildCategory, 'Build category', BuildCategoryLogic(self._oDisp.getDb()).fetchForCombo()); oForm.addInt (BuildData.ksParam_iRevision, oData.iRevision, 'Revision') oForm.addText (BuildData.ksParam_sVersion, oData.sVersion, 'Version') oForm.addWideText (BuildData.ksParam_sLogUrl, oData.sLogUrl, 'Log URL') oForm.addWideText (BuildData.ksParam_sBinaries, oData.sBinaries, 'Binaries') oForm.addCheckBox (BuildData.ksParam_fBinariesDeleted, oData.fBinariesDeleted, 'Binaries deleted') oForm.addSubmit() return True; class WuiAdminBuildList(WuiListContentBase): """ WUI Admin Build List Content Generator. """ def __init__(self, aoEntries, iPage, cItemsPerPage, tsEffective, fnDPrint, oDisp, aiSelectedSortColumns = None): WuiListContentBase.__init__(self, aoEntries, iPage, cItemsPerPage, tsEffective, sTitle = 'Builds', sId = 'builds', fnDPrint = fnDPrint, oDisp = oDisp, aiSelectedSortColumns = aiSelectedSortColumns); self._asColumnHeaders = ['ID', 'Product', 'Branch', 'Version', 'Type', 'OS(es)', 'Author', 'Added', 'Files', 'Action' ]; self._asColumnAttribs = ['align="right"', 'align="center"', 'align="center"', 'align="center"', 'align="center"', 'align="center"', 'align="center"', 'align="center"', '', 'align="center"']; def _formatListEntry(self, iEntry): from testmanager.webui.wuiadmin import WuiAdmin oEntry = self._aoEntries[iEntry]; aoActions = []; if oEntry.sLogUrl is not None: aoActions.append(WuiBuildLogLink(oEntry.sLogUrl, 'Build Log')); dParams = { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildBlacklistAdd, BuildBlacklistData.ksParam_sProduct: oEntry.oCat.sProduct, BuildBlacklistData.ksParam_sBranch: oEntry.oCat.sBranch, BuildBlacklistData.ksParam_asTypes: oEntry.oCat.sType, BuildBlacklistData.ksParam_asOsArches: oEntry.oCat.asOsArches, BuildBlacklistData.ksParam_iFirstRevision: oEntry.iRevision, BuildBlacklistData.ksParam_iLastRevision: oEntry.iRevision } if self._oDisp is None or not self._oDisp.isReadOnlyUser(): aoActions += [ WuiTmLink('Blacklist', WuiAdmin.ksScriptName, dParams), WuiTmLink('Details', WuiAdmin.ksScriptName, { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildDetails, BuildData.ksParam_idBuild: oEntry.idBuild, WuiAdmin.ksParamEffectiveDate: self._tsEffectiveDate, }), WuiTmLink('Clone', WuiAdmin.ksScriptName, { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildClone, BuildData.ksParam_idBuild: oEntry.idBuild, WuiAdmin.ksParamEffectiveDate: self._tsEffectiveDate, }), ]; if isDbTimestampInfinity(oEntry.tsExpire): aoActions += [ WuiTmLink('Modify', WuiAdmin.ksScriptName, { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildEdit, BuildData.ksParam_idBuild: oEntry.idBuild }), WuiTmLink('Remove', WuiAdmin.ksScriptName, { WuiAdmin.ksParamAction: WuiAdmin.ksActionBuildDoRemove, BuildData.ksParam_idBuild: oEntry.idBuild }, sConfirm = 'Are you sure you want to remove build #%d?' % (oEntry.idBuild,) ), ]; return [ oEntry.idBuild, oEntry.oCat.sProduct, oEntry.oCat.sBranch, WuiSvnLinkWithTooltip(oEntry.iRevision, oEntry.oCat.sRepository, sName = '%s r%s' % (oEntry.sVersion, oEntry.iRevision,)), oEntry.oCat.sType, ' '.join(oEntry.oCat.asOsArches), 'batch' if oEntry.uidAuthor is None else oEntry.uidAuthor, self.formatTsShort(oEntry.tsCreated), oEntry.sBinaries if not oEntry.fBinariesDeleted else '<Deleted>', aoActions, ];
3,584
445
# encoding: utf-8 """ Timezone utilities Just UTC-awareness right now """ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from datetime import tzinfo, timedelta, datetime # constant for zero offset ZERO = timedelta(0) class tzUTC(tzinfo): """tzinfo object for UTC (zero offset)""" def utcoffset(self, d): return ZERO def dst(self, d): return ZERO UTC = tzUTC() def utc_aware(unaware): """decorator for adding UTC tzinfo to datetime's utcfoo methods""" def utc_method(*args, **kwargs): dt = unaware(*args, **kwargs) return dt.replace(tzinfo=UTC) return utc_method utcfromtimestamp = utc_aware(datetime.utcfromtimestamp) utcnow = utc_aware(datetime.utcnow) def isoformat(dt): """Return iso-formatted timestamp Like .isoformat(), but uses Z for UTC instead of +00:00 """ return dt.isoformat().replace('+00:00', 'Z')
369
2,329
<gh_stars>1000+ /* * Copyright 2010-2012 VMware and contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springsource.loaded.agent; import java.lang.ref.Reference; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.security.ProtectionDomain; import java.util.ArrayList; import java.util.List; import org.springsource.loaded.GlobalConfiguration; import org.springsource.loaded.LoadtimeInstrumentationPlugin; import org.springsource.loaded.ReloadEventProcessorPlugin; /** * * * @author <NAME> * @since 0.7.3 */ public class GrailsPlugin implements LoadtimeInstrumentationPlugin, ReloadEventProcessorPlugin { // private static Logger log = Logger.getLogger(GrailsPlugin.class.getName()); private static final String DefaultClassPropertyFetcher = "org/codehaus/groovy/grails/commons/ClassPropertyFetcher"; private static List<WeakReference<Object>> classPropertyFetcherInstances = new ArrayList<WeakReference<Object>>(); private static ReferenceQueue<Object> rq = new ReferenceQueue<Object>(); /** * @return true for types this plugin would like to change on startup */ public boolean accept(String slashedTypeName, ClassLoader classLoader, ProtectionDomain protectionDomain, byte[] bytes) { // TODO take classloader into account? return false;//DefaultClassPropertyFetcher.equals(slashedTypeName); } public byte[] modify(String slashedClassName, ClassLoader classLoader, byte[] bytes) { return PluginUtils.addInstanceTracking(bytes, "org/springsource/loaded/agent/GrailsPlugin"); } // called by the modified code public static void recordInstance(Object obj) { // obj will be a ClassPropertyFetcher instance System.err.println("new instance queued " + System.identityHashCode(obj)); // TODO urgent - race condition here, can create Co-modification problem if adding whilst another thread is processing classPropertyFetcherInstances.add(new WeakReference<Object>(obj, rq)); } private Field classPropertyFetcher_clazz; private Method classPropertyFetcher_init; public void reloadEvent(String typename, Class<?> reloadedClazz, String versionsuffix) { // Clear references to objects that have been GCd // Do they ever get cleared out?? Reference<?> r = rq.poll(); while (r != null) { classPropertyFetcherInstances.remove(r); r = rq.poll(); } try { // Currently not needing to track classPropertyFetcherInstances for (WeakReference<Object> ref : classPropertyFetcherInstances) { Object instance = ref.get(); if (instance != null) { if (classPropertyFetcher_clazz == null) { classPropertyFetcher_clazz = instance.getClass().getDeclaredField("clazz"); } classPropertyFetcher_clazz.setAccessible(true); Class<?> clazz = (Class<?>) classPropertyFetcher_clazz.get(instance); if (clazz == reloadedClazz) { if (classPropertyFetcher_init == null) { classPropertyFetcher_init = instance.getClass().getDeclaredMethod("init"); } classPropertyFetcher_init.setAccessible(true); classPropertyFetcher_init.invoke(instance); if (GlobalConfiguration.debugplugins) { System.err.println("GrailsPlugin: re-initing classPropertyFetcher instance for " + clazz.getName() + " " + System.identityHashCode(instance)); } // System.out.println("re-initing " + reloadedClazz.getName()); } } } } catch (Exception e) { e.printStackTrace(); } } public boolean shouldRerunStaticInitializer(String typename, Class<?> clazz, String encodedTimestamp) { return false; } }
1,346
35,083
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hive.service.auth; import javax.security.sasl.AuthenticationException; /** * This class helps select a {@link PasswdAuthenticationProvider} for a given {@code AuthMethod}. */ public final class AuthenticationProviderFactory { public enum AuthMethods { LDAP("LDAP"), PAM("PAM"), CUSTOM("CUSTOM"), NONE("NONE"); private final String authMethod; AuthMethods(String authMethod) { this.authMethod = authMethod; } public String getAuthMethod() { return authMethod; } public static AuthMethods getValidAuthMethod(String authMethodStr) throws AuthenticationException { for (AuthMethods auth : AuthMethods.values()) { if (authMethodStr.equals(auth.getAuthMethod())) { return auth; } } throw new AuthenticationException("Not a valid authentication method"); } } private AuthenticationProviderFactory() { } public static PasswdAuthenticationProvider getAuthenticationProvider(AuthMethods authMethod) throws AuthenticationException { if (authMethod == AuthMethods.LDAP) { return new LdapAuthenticationProviderImpl(); } else if (authMethod == AuthMethods.PAM) { return new PamAuthenticationProviderImpl(); } else if (authMethod == AuthMethods.CUSTOM) { return new CustomAuthenticationProviderImpl(); } else if (authMethod == AuthMethods.NONE) { return new AnonymousAuthenticationProviderImpl(); } else { throw new AuthenticationException("Unsupported authentication method"); } } }
700
2,338
//===-- ABIX86_64.h ---------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLDB_SOURCE_PLUGINS_ABI_X86_ABIX86_64_H #define LLDB_SOURCE_PLUGINS_ABI_X86_ABIX86_64_H #include "lldb/Target/ABI.h" #include "lldb/lldb-private.h" class ABIX86_64 : public lldb_private::MCBasedABI { protected: std::string GetMCName(std::string name) override { MapRegisterName(name, "stmm", "st"); return name; } private: using lldb_private::MCBasedABI::MCBasedABI; }; #endif // LLDB_SOURCE_PLUGINS_ABI_X86_ABIX86_64_H
300
1,085
<gh_stars>1000+ /* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dremio.dac.service.flight; import org.apache.arrow.flight.Action; import org.apache.arrow.flight.ActionType; import org.apache.arrow.flight.Criteria; import org.apache.arrow.flight.FlightDescriptor; import org.apache.arrow.flight.FlightInfo; import org.apache.arrow.flight.FlightProducer; import org.apache.arrow.flight.FlightStream; import org.apache.arrow.flight.PutResult; import org.apache.arrow.flight.Result; import org.apache.arrow.flight.Ticket; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.dremio.common.AutoCloseables; import com.dremio.common.exceptions.GrpcExceptionUtil; import com.dremio.common.exceptions.UserException; import com.dremio.exec.proto.FlightProtos.CoordinatorFlightTicket; import com.dremio.exec.proto.FlightProtos.CoordinatorFlightTicket.IdentifierCase; import com.dremio.service.jobs.JobsFlightProducer; import com.dremio.service.sysflight.SysFlightProducer; import io.grpc.Status; /** * Proxy Flight Producer that delegates to sys-flight or jobs producer based on the ticket, * as we can have only one flight producer on conduit */ public class CoordinatorFlightProducer implements FlightProducer, AutoCloseable { private static final Logger LOGGER = LoggerFactory.getLogger(CoordinatorFlightProducer.class); private final JobsFlightProducer jobsFlightProducer; private final SysFlightProducer sysFlightProducer; public CoordinatorFlightProducer(JobsFlightProducer jobsFlightProducer, SysFlightProducer sysFlightProducer) { this.jobsFlightProducer = jobsFlightProducer; this.sysFlightProducer = sysFlightProducer; } @Override public void getStream(CallContext callContext, Ticket ticket, ServerStreamListener listener) { try { final CoordinatorFlightTicket cticket = CoordinatorFlightTicket.parseFrom(ticket.getBytes()); if (cticket.getIdentifierCase() == IdentifierCase.JOBS_FLIGHT_TICKET) { LOGGER.debug("Got getStream request for JOBS_FLIGHT_TICKET ticket: {}", ticket); jobsFlightProducer.getStream(callContext, ticket, listener); } else { LOGGER.debug("Got getStream request for SYS_FLIGHT_TICKET ticket: {}", ticket); sysFlightProducer.getStream(callContext, ticket, listener); } } catch (UserException ue) { LOGGER.error("Exception while getStream for ticket {}: ", ticket, ue); listener.error(GrpcExceptionUtil.toStatusRuntimeException(ue)); } catch (Exception e) { LOGGER.error("Exception while getStream for ticket {}: ", ticket, e); listener.error(Status.UNKNOWN.withCause(e).withDescription(e.getMessage()).asException()); } } @Override public void listFlights(CallContext callContext, Criteria criteria, StreamListener<FlightInfo> listener) { LOGGER.debug("Got listFlights request"); try { sysFlightProducer.listFlights(callContext, criteria, listener); } catch (UserException ue) { LOGGER.error("Exception while listFlights: ", ue); listener.onError(GrpcExceptionUtil.toStatusRuntimeException(ue)); } catch (Exception e) { LOGGER.error("Exception while listFlights: ", e); listener.onError(Status.UNKNOWN.withCause(e).withDescription(e.getMessage()).asException()); } } @Override public FlightInfo getFlightInfo(CallContext callContext, FlightDescriptor desc) { LOGGER.debug("Got getFlightInfo request for descriptor: {}", desc); try { return sysFlightProducer.getFlightInfo(callContext, desc); } catch (UserException e) { LOGGER.error("Exception while getFlightInfo: ", e); throw e; } } @Override public Runnable acceptPut(CallContext callContext, FlightStream flightStream, StreamListener<PutResult> listener) { throw Status.UNIMPLEMENTED.asRuntimeException(); } @Override public void doAction(CallContext callContext, Action action, StreamListener<Result> listener) { throw Status.UNIMPLEMENTED.asRuntimeException(); } @Override public void listActions(CallContext callContext, StreamListener<ActionType> listener) { throw Status.UNIMPLEMENTED.asRuntimeException(); } @Override public void close() throws Exception { AutoCloseables.close(jobsFlightProducer, sysFlightProducer); } }
1,547