max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
5,169 | {
"name": "RMCircularAlert",
"version": "0.2.0",
"summary": "A circular AlertView with animations",
"description": "A circular AlertView with animations. Really easy to use!",
"homepage": "https://github.com/raulmo1337/RMCircularAlert",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/raulmo1337/RMCircularAlert.git",
"tag": "0.2.0"
},
"platforms": {
"ios": "8.0"
},
"source_files": "RMCircularAlert/Classes/**/*",
"resource_bundles": {
"RMCircularAlert": [
"RMCircularAlert/Assets/**/*.xcassets"
]
},
"frameworks": [
"UIKit",
"MapKit"
]
}
| 305 |
578 | <reponame>lllrrr2/bk-job
/*
* Tencent is pleased to support the open source community by making BK-JOB蓝鲸智云作业平台 available.
*
* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
*
* BK-JOB蓝鲸智云作业平台 is licensed under the MIT License.
*
* License for BK-JOB蓝鲸智云作业平台:
* --------------------------------------------------------------------
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
* to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
package com.tencent.bk.job.manage.api.inner.impl;
import com.tencent.bk.job.common.model.InternalResponse;
import com.tencent.bk.job.manage.api.inner.ServiceCheckScriptResource;
import com.tencent.bk.job.manage.common.consts.script.ScriptTypeEnum;
import com.tencent.bk.job.manage.model.dto.ScriptCheckResultItemDTO;
import com.tencent.bk.job.manage.model.inner.ServiceScriptCheckResultItemDTO;
import com.tencent.bk.job.manage.model.inner.request.ServiceCheckScriptRequest;
import com.tencent.bk.job.manage.service.ScriptCheckService;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RestController;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
@Slf4j
@RestController
public class ServiceCheckScriptResourceImpl implements ServiceCheckScriptResource {
private final ScriptCheckService scriptCheckService;
@Autowired
public ServiceCheckScriptResourceImpl(ScriptCheckService scriptCheckService) {
this.scriptCheckService = scriptCheckService;
}
@Override
public InternalResponse<List<ServiceScriptCheckResultItemDTO>> check(ServiceCheckScriptRequest checkScriptRequest) {
if (StringUtils.isEmpty(checkScriptRequest.getScriptContent())) {
return InternalResponse.buildSuccessResp(Collections.emptyList());
}
ScriptTypeEnum scriptType = checkScriptRequest.getScriptType() == null ?
null : ScriptTypeEnum.valueOf(checkScriptRequest.getScriptType());
if (scriptType == null) {
return InternalResponse.buildSuccessResp(Collections.emptyList());
}
List<ScriptCheckResultItemDTO> checkResultItems = scriptCheckService.checkScriptWithDangerousRule(
scriptType, checkScriptRequest.getScriptContent());
return InternalResponse.buildSuccessResp(checkResultItems.stream()
.map(ScriptCheckResultItemDTO::toServiceScriptCheckResultDTO)
.collect(Collectors.toList()));
}
}
| 1,136 |
345 | <filename>huntbugs/src/main/java/one/util/huntbugs/detect/BadMonitorObject.java
/*
* Copyright 2016 HuntBugs contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package one.util.huntbugs.detect;
import com.strobel.assembler.metadata.TypeReference;
import com.strobel.decompiler.ast.AstCode;
import com.strobel.decompiler.ast.Expression;
import com.strobel.decompiler.ast.Variable;
import one.util.huntbugs.flow.ValuesFlow;
import one.util.huntbugs.registry.MethodContext;
import one.util.huntbugs.registry.anno.AstNodes;
import one.util.huntbugs.registry.anno.AstVisitor;
import one.util.huntbugs.registry.anno.WarningDefinition;
import one.util.huntbugs.util.Types;
import one.util.huntbugs.warning.Role.TypeRole;
import one.util.huntbugs.warning.Roles;
/**
* @author <NAME>
*
*/
@WarningDefinition(category="Multithreading", name="SynchronizationOnBoolean", maxScore=70)
@WarningDefinition(category="Multithreading", name="SynchronizationOnBoxedNumber", maxScore=65)
@WarningDefinition(category="Multithreading", name="SynchronizationOnUnsharedBoxed", maxScore=40)
public class BadMonitorObject {
private static final TypeRole MONITOR_TYPE = TypeRole.forName("MONITOR_TYPE");
@AstVisitor(nodes=AstNodes.EXPRESSIONS)
public void visit(Expression expr, MethodContext mc) {
if(expr.getCode() == AstCode.MonitorEnter) {
Expression arg = expr.getArguments().get(0);
if(arg.getCode() == AstCode.Load && ((Variable)arg.getOperand()).isGenerated())
arg = ValuesFlow.getSource(arg);
TypeReference type = arg.getInferredType();
if(type != null && Types.isBoxed(type)) {
String warningType;
if(arg.getCode() == AstCode.InitObject) {
warningType = "SynchronizationOnUnsharedBoxed";
} else if(type.getInternalName().equals("java/lang/Boolean")) {
warningType = "SynchronizationOnBoolean";
} else {
warningType = "SynchronizationOnBoxedNumber";
}
mc.report(warningType, 0, arg, MONITOR_TYPE.create(type), Roles.EXPRESSION.create(arg));
}
}
}
}
| 1,017 |
480 | #ifndef MLIBC_IN_ADDR_H
#define MLIBC_IN_ADDR_H
typedef uint32_t in_addr_t;
#endif // MLIBC_IN_ADDR_H
| 58 |
326 | package org.araymond.joal.web.config.obfuscation;
import org.apache.http.NoHttpResponseException;
import org.araymond.joal.TestConstant;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.web.client.TestRestTemplate;
import org.springframework.boot.web.server.LocalServerPort;
import org.springframework.context.annotation.Import;
import org.springframework.http.ResponseEntity;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.client.ResourceAccessException;
import javax.inject.Inject;
import static org.assertj.core.api.Assertions.*;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.fail;
@RunWith(SpringRunner.class)
@SpringBootTest(
classes = {
AbortNonPrefixedRequestFilter.class,
org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration.class,
org.springframework.boot.autoconfigure.context.ConfigurationPropertiesAutoConfiguration.class,
org.springframework.boot.autoconfigure.context.MessageSourceAutoConfiguration.class,
org.springframework.boot.autoconfigure.web.servlet.DispatcherServletAutoConfiguration.class,
org.springframework.boot.autoconfigure.web.servlet.ServletWebServerFactoryAutoConfiguration.class,
org.springframework.boot.autoconfigure.web.servlet.HttpEncodingAutoConfiguration.class,
org.springframework.boot.autoconfigure.http.HttpMessageConvertersAutoConfiguration.class,
org.springframework.boot.autoconfigure.web.servlet.WebMvcAutoConfiguration.class
},
webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT,
properties = {
"spring.main.web-environment=true",
"joal.ui.path.prefix=" + TestConstant.UI_PATH_PREFIX
}
)
@Import({ AbortNonPrefixedRequestFilterTest.UnprefixedController.class, AbortNonPrefixedRequestFilterTest.PrefixedController.class })
public class AbortNonPrefixedRequestFilterTest {
@LocalServerPort
private int port;
@Inject
private TestRestTemplate restTemplate;
@RestController
public static class UnprefixedController {
@RequestMapping(path = "/hello", method = RequestMethod.GET)
public String hello() {
return "this should not been reached :)";
}
}
@RestController
public static class PrefixedController {
@RequestMapping(path = "/" + TestConstant.UI_PATH_PREFIX + "/hello", method = RequestMethod.GET)
public String hello() {
return "hello prefixed";
}
}
@Test
public void shouldHaveNoResponseFromUnprefixedRequest() {
try {
final ResponseEntity<String> response = this.restTemplate.getForEntity(
"http://localhost:" + port + "/hello",
String.class
);
fail("shouldn't have had a response");
} catch (final ResourceAccessException e) {
assertThat(e).hasCauseInstanceOf(NoHttpResponseException.class);
}
}
@Test
public void shouldHaveResponseFromPrefixedRequest() {
final ResponseEntity<String> response = this.restTemplate.getForEntity(
"http://localhost:" + port + "/" + TestConstant.UI_PATH_PREFIX + "/hello",
String.class
);
assertThat(response.getStatusCodeValue()).isEqualTo(200);
assertThat(response.getBody()).isEqualTo("hello prefixed");
}
}
| 1,450 |
1,302 | <gh_stars>1000+
#include "src/dictionaries/node_webrtc/rtc_stats_response_init.h"
#include <node-addon-api/napi.h>
#include "src/functional/validation.h"
#include "src/interfaces/rtc_stats_response.h"
namespace node_webrtc {
TO_NAPI_IMPL(RTCStatsResponseInit, pair) {
Napi::EscapableHandleScope scope(pair.first);
return Pure(scope.Escape(RTCStatsResponse::Create(pair.second.first, pair.second.second)->Value()));
}
} // namespace node_webrtc
| 174 |
348 | <filename>docs/data/leg-t2/027/02702602.json
{"nom":"Saint-Sébastien-de-Morsent","circ":"2ème circonscription","dpt":"Eure","inscrits":3761,"abs":2039,"votants":1722,"blancs":152,"nuls":32,"exp":1538,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":1099},{"nuance":"FN","nom":"M. <NAME>","voix":439}]} | 127 |
438 | <reponame>stillmatic/plaitpy
import perf
from plaitpy.fakerb import decode
iter_count=1000
def get_names():
return [decode("#{name.name}") for x in range(iter_count)]
def run_bench():
runner = perf.Runner()
runner.timeit("get_names", "get_names()", "from __main__ import get_names",
inner_loops=iter_count)
if __name__ == "__main__":
run_bench()
"""
okay@chalk:~/tonka/src/plait.py$ python tests/perftest.py
.....................
get_names: Mean +- std dev: 9.81 us +- 0.45 us
okay@chalk:~/tonka/src/plait.py$ pypy tests/perftest.py
.........
get_names: Mean +- std dev: 2.24 us +- 0.12 us
"""
| 266 |
4,262 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.test.cdi;
import java.lang.reflect.Type;
import javax.enterprise.inject.spi.BeanManager;
import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement;
final class FrameworkMethodCdiInjection extends Statement {
private final FrameworkMethod method;
private final Object test;
private final CamelCdiContext context;
FrameworkMethodCdiInjection(FrameworkMethod method, Object test, CamelCdiContext context) {
this.method = method;
this.test = test;
this.context = context;
}
@Override
public void evaluate() throws Throwable {
BeanManager manager = context.getBeanManager();
Type[] types = method.getMethod().getGenericParameterTypes();
Object[] parameters = new Object[types.length];
for (int i = 0; i < types.length; i++) {
// TODO: use a proper CreationalContext...
parameters[i] = manager.getInjectableReference(new FrameworkMethodInjectionPoint(method.getMethod(), i, manager),
manager.createCreationalContext(null));
}
method.invokeExplosively(test, parameters);
}
}
| 622 |
575 | <gh_stars>100-1000
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/bindings/trace_wrapper_v8_reference.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_testing.h"
#include "third_party/blink/renderer/platform/heap/persistent.h"
namespace blink {
namespace {
using TraceWrapperV8ReferenceTest = BindingTestSupportingGC;
class TraceWrapperV8ReferenceHolder final
: public GarbageCollected<TraceWrapperV8ReferenceHolder> {
public:
TraceWrapperV8ReferenceHolder() = default;
TraceWrapperV8ReferenceHolder(v8::Isolate* isolate,
v8::Local<v8::Value> value)
: value_(isolate, value) {}
TraceWrapperV8ReferenceHolder(TraceWrapperV8ReferenceHolder&& other)
: value_(std::move(other.value_)) {}
TraceWrapperV8ReferenceHolder(const TraceWrapperV8ReferenceHolder& other)
: value_(other.value_) {}
virtual void Trace(Visitor* visitor) const { visitor->Trace(value_); }
TraceWrapperV8Reference<v8::Value>* ref() { return &value_; }
private:
TraceWrapperV8Reference<v8::Value> value_;
};
void CreateObject(v8::Isolate* isolate,
Persistent<TraceWrapperV8ReferenceHolder>* holder,
v8::Persistent<v8::Value>* observer) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Value> value = v8::Object::New(isolate);
*holder = MakeGarbageCollected<TraceWrapperV8ReferenceHolder>(isolate, value);
observer->Reset(isolate, value);
observer->SetWeak();
}
} // namespace
TEST_F(TraceWrapperV8ReferenceTest, DefaultCtorIntializesAsEmpty) {
Persistent<TraceWrapperV8ReferenceHolder> holder(
MakeGarbageCollected<TraceWrapperV8ReferenceHolder>());
CHECK(holder->ref()->IsEmpty());
}
TEST_F(TraceWrapperV8ReferenceTest, CtorWithValue) {
V8TestingScope testing_scope;
SetIsolate(testing_scope.GetIsolate());
Persistent<TraceWrapperV8ReferenceHolder> holder1;
v8::Persistent<v8::Value> observer;
CreateObject(GetIsolate(), &holder1, &observer);
CHECK(!holder1->ref()->IsEmpty());
CHECK(!observer.IsEmpty());
RunV8FullGC();
CHECK(!holder1->ref()->IsEmpty());
CHECK(!observer.IsEmpty());
holder1->ref()->Clear();
RunV8FullGC();
CHECK(holder1->ref()->IsEmpty());
CHECK(observer.IsEmpty());
}
TEST_F(TraceWrapperV8ReferenceTest, CopyOverEmpty) {
V8TestingScope testing_scope;
SetIsolate(testing_scope.GetIsolate());
Persistent<TraceWrapperV8ReferenceHolder> holder1;
v8::Persistent<v8::Value> observer1;
CreateObject(GetIsolate(), &holder1, &observer1);
Persistent<TraceWrapperV8ReferenceHolder> holder2;
CHECK(!holder1->ref()->IsEmpty());
CHECK(!holder2.Get());
CHECK(!observer1.IsEmpty());
holder2 = MakeGarbageCollected<TraceWrapperV8ReferenceHolder>(*holder1);
CHECK(!holder1->ref()->IsEmpty());
CHECK(*holder1->ref() == *holder2->ref());
CHECK(!observer1.IsEmpty());
RunV8FullGC();
CHECK(!holder1->ref()->IsEmpty());
CHECK(*holder1->ref() == *holder2->ref());
CHECK(!observer1.IsEmpty());
holder1.Clear();
RunV8FullGC();
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
holder2.Clear();
RunV8FullGC();
CHECK(observer1.IsEmpty());
}
TEST_F(TraceWrapperV8ReferenceTest, CopyOverNonEmpty) {
V8TestingScope testing_scope;
SetIsolate(testing_scope.GetIsolate());
Persistent<TraceWrapperV8ReferenceHolder> holder1;
v8::Persistent<v8::Value> observer1;
CreateObject(GetIsolate(), &holder1, &observer1);
Persistent<TraceWrapperV8ReferenceHolder> holder2;
v8::Persistent<v8::Value> observer2;
CreateObject(GetIsolate(), &holder2, &observer2);
CHECK(!holder1->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer2.IsEmpty());
holder2 = MakeGarbageCollected<TraceWrapperV8ReferenceHolder>(*holder1);
CHECK(!holder1->ref()->IsEmpty());
CHECK(*holder1->ref() == *holder2->ref());
CHECK(!observer1.IsEmpty());
CHECK(!observer2.IsEmpty());
RunV8FullGC();
CHECK(!holder1->ref()->IsEmpty());
CHECK(*holder1->ref() == *holder2->ref());
CHECK(!observer1.IsEmpty());
// Old object in holder2 already gone.
CHECK(observer2.IsEmpty());
holder1.Clear();
RunV8FullGC();
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
holder2.Clear();
RunV8FullGC();
CHECK(observer1.IsEmpty());
}
TEST_F(TraceWrapperV8ReferenceTest, MoveOverEmpty) {
V8TestingScope testing_scope;
SetIsolate(testing_scope.GetIsolate());
Persistent<TraceWrapperV8ReferenceHolder> holder1;
v8::Persistent<v8::Value> observer1;
CreateObject(GetIsolate(), &holder1, &observer1);
Persistent<TraceWrapperV8ReferenceHolder> holder2;
CHECK(!holder1->ref()->IsEmpty());
CHECK(!holder2.Get());
CHECK(!observer1.IsEmpty());
holder2 =
MakeGarbageCollected<TraceWrapperV8ReferenceHolder>(std::move(*holder1));
CHECK(holder1->ref()->IsEmpty());
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
RunV8FullGC();
CHECK(holder1->ref()->IsEmpty());
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
holder1.Clear();
holder2.Clear();
RunV8FullGC();
CHECK(observer1.IsEmpty());
}
TEST_F(TraceWrapperV8ReferenceTest, MoveOverNonEmpty) {
V8TestingScope testing_scope;
SetIsolate(testing_scope.GetIsolate());
Persistent<TraceWrapperV8ReferenceHolder> holder1;
v8::Persistent<v8::Value> observer1;
CreateObject(GetIsolate(), &holder1, &observer1);
Persistent<TraceWrapperV8ReferenceHolder> holder2;
v8::Persistent<v8::Value> observer2;
CreateObject(GetIsolate(), &holder2, &observer2);
CHECK(!holder1->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer2.IsEmpty());
holder2 =
MakeGarbageCollected<TraceWrapperV8ReferenceHolder>(std::move(*holder1));
CHECK(holder1->ref()->IsEmpty());
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
CHECK(!observer2.IsEmpty());
RunV8FullGC();
CHECK(holder1->ref()->IsEmpty());
CHECK(!holder2->ref()->IsEmpty());
CHECK(!observer1.IsEmpty());
CHECK(observer2.IsEmpty());
holder1.Clear();
holder2.Clear();
RunV8FullGC();
CHECK(observer1.IsEmpty());
}
TEST_F(TraceWrapperV8ReferenceTest, HeapVector) {
V8TestingScope testing_scope;
SetIsolate(testing_scope.GetIsolate());
using VectorContainer = HeapVector<TraceWrapperV8Reference<v8::Value>>;
Persistent<VectorContainer> holder(MakeGarbageCollected<VectorContainer>());
v8::Persistent<v8::Value> observer;
{
v8::HandleScope handle_scope(GetIsolate());
v8::Local<v8::Value> value = v8::Object::New(GetIsolate());
observer.Reset(GetIsolate(), value);
observer.SetWeak();
holder->push_back(TraceWrapperV8Reference<v8::Value>(GetIsolate(), value));
}
RunV8FullGC();
CHECK(!observer.IsEmpty());
holder.Clear();
RunV8FullGC();
CHECK(observer.IsEmpty());
}
TEST_F(TraceWrapperV8ReferenceTest, Ephemeron) {
V8TestingScope testing_scope;
SetIsolate(testing_scope.GetIsolate());
using EphemeronMap = HeapHashMap<WeakMember<TraceWrapperV8ReferenceHolder>,
TraceWrapperV8Reference<v8::Value>>;
Persistent<EphemeronMap> holder(MakeGarbageCollected<EphemeronMap>());
v8::Persistent<v8::Value> observer;
Persistent<TraceWrapperV8ReferenceHolder> object(
MakeGarbageCollected<TraceWrapperV8ReferenceHolder>());
{
v8::HandleScope handle_scope(GetIsolate());
v8::Local<v8::Value> value = v8::Object::New(GetIsolate());
observer.Reset(GetIsolate(), value);
observer.SetWeak();
holder->insert(WeakMember<TraceWrapperV8ReferenceHolder>(object),
TraceWrapperV8Reference<v8::Value>(GetIsolate(), value));
}
RunV8FullGC();
EXPECT_TRUE(!observer.IsEmpty());
holder.Clear();
RunV8FullGC();
CHECK(observer.IsEmpty());
}
} // namespace blink
| 3,057 |
305 | <gh_stars>100-1000
package org.mamute.reputation.rules;
import org.mamute.model.ReputationEvent;
public class KarmaCalculator {
public static final int SOLVED_QUESTION_AUTHOR = 5;
public static final int SOLUTION_AUTHOR = 20;
public static final int MY_ANSWER_VOTED_UP = 10;
public static final int MY_ANSWER_VOTED_DOWN = -2;
public static final int MY_QUESTION_VOTED_DOWN = MY_ANSWER_VOTED_DOWN;
public static final int MY_QUESTION_VOTED_UP = 5;
public static final int APPROVED_INFORMATION = 2;
public static final int COMMENT_VOTED_UP = 1;
public static final int ASKED_QUESTION = 2;
public static final int ANSWERED_QUESTION = 2;
public static final int DOWNVOTED_QUESTION_OR_ANSWER = -2;
public static final int MY_NEWS_VOTED_UP = 5;
public static final int MY_NEWS_VOTED_DOWN = -2;
public int karmaFor(ReputationEvent voteEvent) {
return voteEvent.getKarmaReward();
}
}
| 349 |
5,535 | <reponame>bradfordb-vmware/gpdb
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2008 - 2010 Greenplum, Inc.
//
// @filename:
// CWorker.cpp
//
// @doc:
// Worker abstraction, e.g. thread
//---------------------------------------------------------------------------
#include "gpos/task/CWorker.h"
#include "gpos/common/syslibwrapper.h"
#include "gpos/memory/CMemoryPoolManager.h"
#include "gpos/string/CWStringStatic.h"
#include "gpos/task/CWorkerPoolManager.h"
using namespace gpos;
// host system callback function to report abort requests
bool (*CWorker::abort_requested_by_system)(void);
//---------------------------------------------------------------------------
// @function:
// CWorker::CWorker
//
// @doc:
// ctor
//
//---------------------------------------------------------------------------
CWorker::CWorker(ULONG stack_size, ULONG_PTR stack_start)
: m_task(nullptr), m_stack_size(stack_size), m_stack_start(stack_start)
{
GPOS_ASSERT(stack_size >= 2 * 1024 &&
"Worker has to have at least 2KB stack");
// register worker
GPOS_ASSERT(nullptr == Self() && "Found registered worker!");
CWorkerPoolManager::WorkerPoolManager()->RegisterWorker(this);
GPOS_ASSERT(this == CWorkerPoolManager::WorkerPoolManager()->Self());
}
//---------------------------------------------------------------------------
// @function:
// CWorker::~CWorker
//
// @doc:
// dtor
//
//---------------------------------------------------------------------------
CWorker::~CWorker()
{
// unregister worker
GPOS_ASSERT(this == Self() && "Unidentified worker found.");
CWorkerPoolManager::WorkerPoolManager()->RemoveWorker();
}
//---------------------------------------------------------------------------
// @function:
// CWorker::Execute
//
// @doc:
// Execute single task
//
//---------------------------------------------------------------------------
void
CWorker::Execute(CTask *task)
{
GPOS_ASSERT(task);
GPOS_ASSERT(nullptr == m_task && "Another task is assigned to worker");
m_task = task;
GPOS_TRY
{
m_task->Execute();
m_task = nullptr;
}
GPOS_CATCH_EX(ex)
{
m_task = nullptr;
GPOS_RETHROW(ex);
}
GPOS_CATCH_END;
}
//---------------------------------------------------------------------------
// @function:
// CWorker::CheckForAbort
//
// @doc:
// Check pending abort flag; throw if abort is flagged
//
//---------------------------------------------------------------------------
void
CWorker::CheckForAbort(const CHAR *, ULONG)
{
// check if there is a task assigned to worker,
// task is still running and CFA is not suspended
if (nullptr != m_task && m_task->IsRunning() && !m_task->IsAbortSuspended())
{
GPOS_ASSERT(!m_task->GetErrCtxt()->IsPending() &&
"Check-For-Abort while an exception is pending");
if ((nullptr != abort_requested_by_system &&
abort_requested_by_system()) ||
m_task->IsCanceled())
{
// raise exception
GPOS_ABORT;
}
}
}
//---------------------------------------------------------------------------
// @function:
// CWorker::CheckStackSize
//
// @doc:
// Size of stack within context of this worker;
// effectively calculates distance of local variable to stack start;
// if stack space is exhausted we throw an exception;
// else we check if requested space can fit in stack
//
//---------------------------------------------------------------------------
BOOL
CWorker::CheckStackSize(ULONG request) const
{
ULONG_PTR ptr = 0;
// get current stack size
ULONG_PTR size = m_stack_start - (ULONG_PTR) &ptr;
// check if we have exceeded stack space
if (size >= m_stack_size)
{
// raise stack overflow exception
GPOS_RAISE(CException::ExmaSystem, CException::ExmiOutOfStack);
}
// check if there is enough stack space for request
if (size + request >= m_stack_size)
{
return false;
}
return true;
}
// EOF
| 1,206 |
329 | <gh_stars>100-1000
/////////////////////////////////////////////////////////////////////
// = NMatrix
//
// A linear algebra library for scientific computation in Ruby.
// NMatrix is part of SciRuby.
//
// NMatrix was originally inspired by and derived from NArray, by
// <NAME>: http://narray.rubyforge.org
//
// == Copyright Information
//
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
// NMatrix is Copyright (c) 2012 - 2014, <NAME> and the Ruby Science Foundation
//
// Please see LICENSE.txt for additional copyright notices.
//
// == Contributing
//
// By contributing source code to SciRuby, you agree to be bound by
// our Contributor Agreement:
//
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
//
// == yale.c
//
// "new yale" storage format for 2D matrices (like yale, but with
// the diagonal pulled out for O(1) access).
//
// Specifications:
// * dtype and index dtype must necessarily differ
// * index dtype is defined by whatever unsigned type can store
// max(rows,cols)
// * that means vector ija stores only index dtype, but a stores
// dtype
// * vectors must be able to grow as necessary
// * maximum size is rows*cols+1
/*
* Standard Includes
*/
#include <ruby.h>
#include <algorithm> // std::min
#include <cstdio> // std::fprintf
#include <iostream>
#include <array>
#include <typeinfo>
#include <tuple>
#include <queue>
/*
* Project Includes
*/
// #include "types.h"
#include "../../data/data.h"
#include "../../math/math.h"
#include "../common.h"
#include "../../nmatrix.h"
#include "../../data/meta.h"
#include "iterators/base.h"
#include "iterators/stored_diagonal.h"
#include "iterators/row_stored_nd.h"
#include "iterators/row_stored.h"
#include "iterators/row.h"
#include "iterators/iterator.h"
#include "class.h"
#include "yale.h"
#include "../../ruby_constants.h"
/*
* Macros
*/
#ifndef NM_MAX
#define NM_MAX(a,b) (((a)>(b))?(a):(b))
#define NM_MIN(a,b) (((a)<(b))?(a):(b))
#endif
/*
* Forward Declarations
*/
extern "C" {
static YALE_STORAGE* alloc(nm::dtype_t dtype, size_t* shape, size_t dim);
static size_t yale_count_slice_copy_ndnz(const YALE_STORAGE* s, size_t*, size_t*);
static void* default_value_ptr(const YALE_STORAGE* s);
static VALUE default_value(const YALE_STORAGE* s);
static VALUE obj_at(YALE_STORAGE* s, size_t k);
/* Ruby-accessible functions */
static VALUE nm_size(VALUE self);
static VALUE nm_a(int argc, VALUE* argv, VALUE self);
static VALUE nm_d(int argc, VALUE* argv, VALUE self);
static VALUE nm_lu(VALUE self);
static VALUE nm_ia(VALUE self);
static VALUE nm_ja(VALUE self);
static VALUE nm_ija(int argc, VALUE* argv, VALUE self);
static VALUE nm_row_keys_intersection(VALUE m1, VALUE ii1, VALUE m2, VALUE ii2);
static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self);
static inline size_t src_ndnz(const YALE_STORAGE* s) {
return reinterpret_cast<YALE_STORAGE*>(s->src)->ndnz;
}
} // end extern "C" block
namespace nm { namespace yale_storage {
template <typename LD, typename RD>
static VALUE map_merged_stored(VALUE left, VALUE right, VALUE init);
template <typename DType>
static bool ndrow_is_empty(const YALE_STORAGE* s, IType ija, const IType ija_next);
template <typename LDType, typename RDType>
static bool ndrow_eqeq_ndrow(const YALE_STORAGE* l, const YALE_STORAGE* r, IType l_ija, const IType l_ija_next, IType r_ija, const IType r_ija_next);
template <typename LDType, typename RDType>
static bool eqeq(const YALE_STORAGE* left, const YALE_STORAGE* right);
template <typename LDType, typename RDType>
static bool eqeq_different_defaults(const YALE_STORAGE* s, const LDType& s_init, const YALE_STORAGE* t, const RDType& t_init);
static void increment_ia_after(YALE_STORAGE* s, IType ija_size, IType i, long n);
static IType insert_search(YALE_STORAGE* s, IType left, IType right, IType key, bool& found);
template <typename DType>
static char vector_insert(YALE_STORAGE* s, size_t pos, size_t* j, void* val_, size_t n, bool struct_only);
template <typename DType>
static char vector_insert_resize(YALE_STORAGE* s, size_t current_size, size_t pos, size_t* j, size_t n, bool struct_only);
template <typename DType>
static std::tuple<long,bool,std::queue<std::tuple<IType,IType,int> > > count_slice_set_ndnz_change(YALE_STORAGE* s, size_t* coords, size_t* lengths, DType* v, size_t v_size);
static inline IType* IJA(const YALE_STORAGE* s) {
return reinterpret_cast<YALE_STORAGE*>(s->src)->ija;
}
static inline IType IJA_SET(const YALE_STORAGE* s, size_t loc, IType val) {
return IJA(s)[loc] = val;
}
template <typename DType>
static inline DType* A(const YALE_STORAGE* s) {
return reinterpret_cast<DType*>(reinterpret_cast<YALE_STORAGE*>(s->src)->a);
}
template <typename DType>
static inline DType A_SET(const YALE_STORAGE* s, size_t loc, DType val) {
return A<DType>(s)[loc] = val;
}
/*
* Functions
*/
/*
* Copy a vector from one DType to another.
*/
template <typename LType, typename RType>
static inline void copy_recast_vector(const void* in_, void* out_, size_t length) {
const RType* in = reinterpret_cast<const RType*>(in_);
LType* out = reinterpret_cast<LType*>(out_);
for (size_t i = 0; i < length; ++i) {
out[i] = in[i];
}
out;
}
/*
* Create Yale storage from IA, JA, and A vectors given in Old Yale format (probably from a file, since NMatrix only uses
* new Yale for its storage).
*
* This function is needed for Matlab .MAT v5 IO.
*/
template <typename LDType, typename RDType>
YALE_STORAGE* create_from_old_yale(dtype_t dtype, size_t* shape, char* r_ia, char* r_ja, char* r_a) {
IType* ir = reinterpret_cast<IType*>(r_ia);
IType* jr = reinterpret_cast<IType*>(r_ja);
RDType* ar = reinterpret_cast<RDType*>(r_a);
// Read through ia and ja and figure out the ndnz (non-diagonal non-zeros) count.
size_t ndnz = 0, i, p, p_next;
for (i = 0; i < shape[0]; ++i) { // Walk down rows
for (p = ir[i], p_next = ir[i+1]; p < p_next; ++p) { // Now walk through columns
if (i != jr[p]) ++ndnz; // entry is non-diagonal and probably nonzero
}
}
// Having walked through the matrix, we now go about allocating the space for it.
YALE_STORAGE* s = alloc(dtype, shape, 2);
s->capacity = shape[0] + ndnz + 1;
s->ndnz = ndnz;
// Setup IJA and A arrays
s->ija = NM_ALLOC_N( IType, s->capacity );
s->a = NM_ALLOC_N( LDType, s->capacity );
IType* ijl = reinterpret_cast<IType*>(s->ija);
LDType* al = reinterpret_cast<LDType*>(s->a);
// set the diagonal to zero -- this prevents uninitialized values from popping up.
for (size_t index = 0; index < shape[0]; ++index) {
al[index] = 0;
}
// Figure out where to start writing JA in IJA:
size_t pp = s->shape[0]+1;
// Find beginning of first row
p = ir[0];
// Now fill the arrays
for (i = 0; i < s->shape[0]; ++i) {
// Set the beginning of the row (of output)
ijl[i] = pp;
// Now walk through columns, starting at end of row (of input)
for (size_t p_next = ir[i+1]; p < p_next; ++p, ++pp) {
if (i == jr[p]) { // diagonal
al[i] = ar[p];
--pp;
} else { // nondiagonal
ijl[pp] = jr[p];
al[pp] = ar[p];
}
}
}
ijl[i] = pp; // Set the end of the last row
// Set the zero position for our output matrix
al[i] = 0;
return s;
}
/*
* Empty the matrix by initializing the IJA vector and setting the diagonal to 0.
*
* Called when most YALE_STORAGE objects are created.
*
* Can't go inside of class YaleStorage because YaleStorage creation requires that
* IJA already be initialized.
*/
template <typename DType>
void init(YALE_STORAGE* s, void* init_val) {
IType IA_INIT = s->shape[0] + 1;
IType* ija = reinterpret_cast<IType*>(s->ija);
// clear out IJA vector
for (IType i = 0; i < IA_INIT; ++i) {
ija[i] = IA_INIT; // set initial values for IJA
}
clear_diagonal_and_zero<DType>(s, init_val);
}
template <typename LDType, typename RDType>
static YALE_STORAGE* slice_copy(YALE_STORAGE* s) {
YaleStorage<RDType> y(s);
return y.template alloc_copy<LDType, false>();
}
/*
* Template version of copy transposed. This could also, in theory, allow a map -- but transpose.h
* would need to be updated.
*
* TODO: Update for slicing? Update for different dtype in and out? We can cast rather easily without
* too much modification.
*/
template <typename D>
YALE_STORAGE* copy_transposed(YALE_STORAGE* rhs) {
YaleStorage<D> y(rhs);
return y.template alloc_copy_transposed<D, false>();
}
///////////////
// Accessors //
///////////////
/*
* Determine the number of non-diagonal non-zeros in a not-yet-created copy of a slice or matrix.
*/
template <typename DType>
static size_t count_slice_copy_ndnz(const YALE_STORAGE* s, size_t* offset, size_t* shape) {
IType* ija = s->ija;
DType* a = reinterpret_cast<DType*>(s->a);
DType ZERO(*reinterpret_cast<DType*>(default_value_ptr(s)));
// Calc ndnz for the destination
size_t ndnz = 0;
size_t i, j; // indexes of destination matrix
size_t k, l; // indexes of source matrix
for (i = 0; i < shape[0]; i++) {
k = i + offset[0];
for (j = 0; j < shape[1]; j++) {
l = j + offset[1];
if (j == i) continue;
if (k == l) { // for diagonal element of source
if (a[k] != ZERO) ++ndnz;
} else { // for non-diagonal element
for (size_t c = ija[k]; c < ija[k+1]; c++) {
if (ija[c] == l) {
++ndnz;
break;
}
}
}
}
}
return ndnz;
}
/*
* Get a single element of a yale storage object
*/
template <typename DType>
static void* get_single(YALE_STORAGE* storage, SLICE* slice) {
YaleStorage<DType> y(storage);
return reinterpret_cast<void*>(y.get_single_p(slice));
}
/*
* Returns a reference-slice of a matrix.
*/
template <typename DType>
YALE_STORAGE* ref(YALE_STORAGE* s, SLICE* slice) {
return YaleStorage<DType>(s).alloc_ref(slice);
}
/*
* Attempt to set a cell or cells in a Yale matrix.
*/
template <typename DType>
void set(VALUE left, SLICE* slice, VALUE right) {
YALE_STORAGE* storage = NM_STORAGE_YALE(left);
YaleStorage<DType> y(storage);
y.insert(slice, right);
}
///////////
// Tests //
///////////
/*
* Yale eql? -- for whole-matrix comparison returning a single value.
*/
template <typename LDType, typename RDType>
static bool eqeq(const YALE_STORAGE* left, const YALE_STORAGE* right) {
return YaleStorage<LDType>(left) == YaleStorage<RDType>(right);
}
//////////
// Math //
//////////
#define YALE_IA(s) (reinterpret_cast<IType*>(s->ija))
#define YALE_IJ(s) (reinterpret_cast<IType*>(s->ija) + s->shape[0] + 1)
#define YALE_COUNT(yale) (yale->ndnz + yale->shape[0])
/////////////
// Utility //
/////////////
/*
* Binary search for finding the beginning of a slice. Returns the position of the first element which is larger than
* bound.
*/
IType binary_search_left_boundary(const YALE_STORAGE* s, IType left, IType right, IType bound) {
if (left > right) return -1;
IType* ija = IJA(s);
if (ija[left] >= bound) return left; // shortcut
IType mid = (left + right) / 2;
IType mid_j = ija[mid];
if (mid_j == bound)
return mid;
else if (mid_j > bound) { // eligible! don't exclude it.
return binary_search_left_boundary(s, left, mid, bound);
} else // (mid_j < bound)
return binary_search_left_boundary(s, mid + 1, right, bound);
}
/*
* Binary search for returning stored values. Returns a non-negative position, or -1 for not found.
*/
int binary_search(YALE_STORAGE* s, IType left, IType right, IType key) {
if (s->src != s) throw; // need to fix this quickly
if (left > right) return -1;
IType* ija = s->ija;
IType mid = (left + right)/2;
IType mid_j = ija[mid];
if (mid_j == key)
return mid;
else if (mid_j > key)
return binary_search(s, left, mid - 1, key);
else
return binary_search(s, mid + 1, right, key);
}
/*
* Resize yale storage vectors A and IJA, copying values.
*/
static void vector_grow(YALE_STORAGE* s) {
if (s != s->src) {
throw; // need to correct this quickly.
}
nm_yale_storage_register(s);
size_t new_capacity = s->capacity * GROWTH_CONSTANT;
size_t max_capacity = YaleStorage<uint8_t>::max_size(s->shape);
if (new_capacity > max_capacity) new_capacity = max_capacity;
IType* new_ija = NM_ALLOC_N(IType, new_capacity);
void* new_a = NM_ALLOC_N(char, DTYPE_SIZES[s->dtype] * new_capacity);
IType* old_ija = s->ija;
void* old_a = s->a;
memcpy(new_ija, old_ija, s->capacity * sizeof(IType));
memcpy(new_a, old_a, s->capacity * DTYPE_SIZES[s->dtype]);
s->capacity = new_capacity;
if (s->dtype == nm::RUBYOBJ)
nm_yale_storage_register_a(new_a, s->capacity * DTYPE_SIZES[s->dtype]);
NM_FREE(old_ija);
nm_yale_storage_unregister(s);
NM_FREE(old_a);
if (s->dtype == nm::RUBYOBJ)
nm_yale_storage_unregister_a(new_a, s->capacity * DTYPE_SIZES[s->dtype]);
s->ija = new_ija;
s->a = new_a;
}
/*
* Resize yale storage vectors A and IJA in preparation for an insertion.
*/
template <typename DType>
static char vector_insert_resize(YALE_STORAGE* s, size_t current_size, size_t pos, size_t* j, size_t n, bool struct_only) {
if (s != s->src) throw;
// Determine the new capacity for the IJA and A vectors.
size_t new_capacity = s->capacity * GROWTH_CONSTANT;
size_t max_capacity = YaleStorage<DType>::max_size(s->shape);
if (new_capacity > max_capacity) {
new_capacity = max_capacity;
if (current_size + n > max_capacity) rb_raise(rb_eNoMemError, "insertion size exceeded maximum yale matrix size");
}
if (new_capacity < current_size + n)
new_capacity = current_size + n;
nm_yale_storage_register(s);
// Allocate the new vectors.
IType* new_ija = NM_ALLOC_N( IType, new_capacity );
NM_CHECK_ALLOC(new_ija);
DType* new_a = NM_ALLOC_N( DType, new_capacity );
NM_CHECK_ALLOC(new_a);
IType* old_ija = reinterpret_cast<IType*>(s->ija);
DType* old_a = reinterpret_cast<DType*>(s->a);
// Copy all values prior to the insertion site to the new IJA and new A
if (struct_only) {
for (size_t i = 0; i < pos; ++i) {
new_ija[i] = old_ija[i];
}
} else {
for (size_t i = 0; i < pos; ++i) {
new_ija[i] = old_ija[i];
new_a[i] = old_a[i];
}
}
// Copy all values subsequent to the insertion site to the new IJA and new A, leaving room (size n) for insertion.
if (struct_only) {
for (size_t i = pos; i < current_size; ++i) {
new_ija[i+n] = old_ija[i];
}
} else {
for (size_t i = pos; i < current_size; ++i) {
new_ija[i+n] = old_ija[i];
new_a[i+n] = old_a[i];
}
}
s->capacity = new_capacity;
if (s->dtype == nm::RUBYOBJ)
nm_yale_storage_register_a(new_a, new_capacity);
NM_FREE(s->ija);
nm_yale_storage_unregister(s);
NM_FREE(s->a);
if (s->dtype == nm::RUBYOBJ)
nm_yale_storage_unregister_a(new_a, new_capacity);
s->ija = new_ija;
s->a = reinterpret_cast<void*>(new_a);
return 'i';
}
/*
* Insert a value or contiguous values in the ija and a vectors (after ja and
* diag). Does not free anything; you are responsible!
*
* TODO: Improve this so it can handle non-contiguous element insertions
* efficiently. For now, we can just sort the elements in the row in
* question.)
*/
template <typename DType>
static char vector_insert(YALE_STORAGE* s, size_t pos, size_t* j, void* val_, size_t n, bool struct_only) {
if (pos < s->shape[0]) {
rb_raise(rb_eArgError, "vector insert pos (%lu) is before beginning of ja (%lu); this should not happen", pos, s->shape[0]);
}
DType* val = reinterpret_cast<DType*>(val_);
size_t size = s->ija[s->shape[0]];
IType* ija = s->ija;
DType* a = reinterpret_cast<DType*>(s->a);
if (size + n > s->capacity) {
vector_insert_resize<DType>(s, size, pos, j, n, struct_only);
// Need to get the new locations for ija and a.
ija = s->ija;
a = reinterpret_cast<DType*>(s->a);
} else {
/*
* No resize required:
* easy (but somewhat slow), just copy elements to the tail, starting at
* the end, one element at a time.
*
* TODO: This can be made slightly more efficient, but only after the tests
* are written.
*/
if (struct_only) {
for (size_t i = 0; i < size - pos; ++i) {
ija[size+n-1-i] = ija[size-1-i];
}
} else {
for (size_t i = 0; i < size - pos; ++i) {
ija[size+n-1-i] = ija[size-1-i];
a[size+n-1-i] = a[size-1-i];
}
}
}
// Now insert the new values.
if (struct_only) {
for (size_t i = 0; i < n; ++i) {
ija[pos+i] = j[i];
}
} else {
for (size_t i = 0; i < n; ++i) {
ija[pos+i] = j[i];
a[pos+i] = val[i];
}
}
return 'i';
}
/*
* If we add n items to row i, we need to increment ija[i+1] and onward.
*/
static void increment_ia_after(YALE_STORAGE* s, IType ija_size, IType i, long n) {
IType* ija = s->ija;
++i;
for (; i <= ija_size; ++i) {
ija[i] += n;
}
}
/*
* Binary search for returning insertion points.
*/
static IType insert_search(YALE_STORAGE* s, IType left, IType right, IType key, bool& found) {
if (left > right) {
found = false;
return left;
}
IType* ija = s->ija;
IType mid = (left + right)/2;
IType mid_j = ija[mid];
if (mid_j == key) {
found = true;
return mid;
} else if (mid_j > key) {
return insert_search(s, left, mid-1, key, found);
} else {
return insert_search(s, mid+1, right, key, found);
}
}
/////////////////////////
// Copying and Casting //
/////////////////////////
/*
* Templated copy constructor for changing dtypes.
*/
template <typename L, typename R>
YALE_STORAGE* cast_copy(const YALE_STORAGE* rhs) {
YaleStorage<R> y(rhs);
return y.template alloc_copy<L>();
}
/*
* Template access for getting the size of Yale storage.
*/
size_t get_size(const YALE_STORAGE* storage) {
return storage->ija[ storage->shape[0] ];
}
template <typename DType>
static STORAGE* matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {
YALE_STORAGE *left = (YALE_STORAGE*)(casted_storage.left),
*right = (YALE_STORAGE*)(casted_storage.right);
nm_yale_storage_register(left);
nm_yale_storage_register(right);
// We can safely get dtype from the casted matrices; post-condition of binary_storage_cast_alloc is that dtype is the
// same for left and right.
// int8_t dtype = left->dtype;
IType* ijl = left->ija;
IType* ijr = right->ija;
// First, count the ndnz of the result.
// TODO: This basically requires running symbmm twice to get the exact ndnz size. That's frustrating. Are there simple
// cases where we can avoid running it?
size_t result_ndnz = nm::math::symbmm(resulting_shape[0], left->shape[1], resulting_shape[1], ijl, ijl, true, ijr, ijr, true, NULL, true);
// Create result storage.
YALE_STORAGE* result = nm_yale_storage_create(left->dtype, resulting_shape, 2, result_ndnz);
init<DType>(result, NULL);
IType* ija = result->ija;
// Symbolic multiplication step (build the structure)
nm::math::symbmm(resulting_shape[0], left->shape[1], resulting_shape[1], ijl, ijl, true, ijr, ijr, true, ija, true);
// Numeric multiplication step (fill in the elements)
nm::math::numbmm<DType>(result->shape[0], left->shape[1], result->shape[1],
ijl, ijl, reinterpret_cast<DType*>(left->a), true,
ijr, ijr, reinterpret_cast<DType*>(right->a), true,
ija, ija, reinterpret_cast<DType*>(result->a), true);
// Sort the columns
nm::math::smmp_sort_columns<DType>(result->shape[0], ija, ija, reinterpret_cast<DType*>(result->a));
nm_yale_storage_unregister(right);
nm_yale_storage_unregister(left);
return reinterpret_cast<STORAGE*>(result);
}
/*
* Get the sum of offsets from the original matrix (for sliced iteration).
*/
static std::array<size_t,2> get_offsets(YALE_STORAGE* x) {
std::array<size_t, 2> offsets{ {0,0} };
while (x != x->src) {
offsets[0] += x->offset[0];
offsets[1] += x->offset[1];
x = reinterpret_cast<YALE_STORAGE*>(x->src);
}
return offsets;
}
class RowIterator {
protected:
YALE_STORAGE* s;
IType* ija;
void* a;
IType i, k, k_end;
size_t j_offset, j_shape;
bool diag, End;
VALUE init;
public:
RowIterator(YALE_STORAGE* s_, IType* ija_, IType i_, size_t j_shape_, size_t j_offset_ = 0)
: s(s_),
ija(ija_),
a(reinterpret_cast<YALE_STORAGE*>(s->src)->a),
i(i_),
k(ija[i]),
k_end(ija[i+1]),
j_offset(j_offset_),
j_shape(j_shape_),
diag(row_has_no_nd() || diag_is_first()),
End(false),
init(default_value(s))
{ }
RowIterator(YALE_STORAGE* s_, IType i_, size_t j_shape_, size_t j_offset_ = 0)
: s(s_),
ija(IJA(s)),
a(reinterpret_cast<YALE_STORAGE*>(s->src)->a),
i(i_),
k(ija[i]),
k_end(ija[i+1]),
j_offset(j_offset_),
j_shape(j_shape_),
diag(row_has_no_nd() || diag_is_first()),
End(false),
init(default_value(s))
{ }
RowIterator(const RowIterator& rhs) : s(rhs.s), ija(rhs.ija), a(reinterpret_cast<YALE_STORAGE*>(s->src)->a), i(rhs.i), k(rhs.k), k_end(rhs.k_end), j_offset(rhs.j_offset), j_shape(rhs.j_shape), diag(rhs.diag), End(rhs.End), init(rhs.init) { }
VALUE obj() const {
return diag ? obj_at(s, i) : obj_at(s, k);
}
template <typename T>
T cobj() const {
if (typeid(T) == typeid(RubyObject)) return obj();
return A<T>(s)[diag ? i : k];
}
inline IType proper_j() const {
return diag ? i : ija[k];
}
inline IType offset_j() const {
return proper_j() - j_offset;
}
inline size_t capacity() const {
return reinterpret_cast<YALE_STORAGE*>(s->src)->capacity;
}
inline void vector_grow() {
YALE_STORAGE* src = reinterpret_cast<YALE_STORAGE*>(s->src);
nm::yale_storage::vector_grow(src);
ija = reinterpret_cast<IType*>(src->ija);
a = src->a;
}
/* Returns true if an additional value is inserted, false if it goes on the diagonal */
bool insert(IType j, VALUE v) {
if (j == i) { // insert regardless on diagonal
reinterpret_cast<VALUE*>(a)[j] = v;
return false;
} else {
if (rb_funcall(v, rb_intern("!="), 1, init) == Qtrue) {
if (k >= capacity()) {
vector_grow();
}
reinterpret_cast<VALUE*>(a)[k] = v;
ija[k] = j;
k++;
return true;
}
return false;
}
}
void update_row_end() {
ija[i+1] = k;
k_end = k;
}
/* Past the j_shape? */
inline bool end() const {
if (End) return true;
//if (diag) return i - j_offset >= j_shape;
//else return k >= s->capacity || ija[k] - j_offset >= j_shape;
return (int)(diag ? i : ija[k]) - (int)(j_offset) >= (int)(j_shape);
}
inline bool row_has_no_nd() const { return ija[i] == k_end; /* k_start == k_end */ }
inline bool diag_is_first() const { return i < ija[ija[i]]; }
inline bool diag_is_last() const { return i > ija[k_end-1]; } // only works if !row_has_no_nd()
inline bool k_is_last_nd() const { return k == k_end-1; }
inline bool k_is_last() const { return k_is_last_nd() && !diag_is_last(); }
inline bool diag_is_ahead() const { return i > ija[k]; }
inline bool row_has_diag() const { return i < s->shape[1]; }
inline bool diag_is_next() const { // assumes we've already tested for diag, row_has_no_nd(), diag_is_first()
if (i == ija[k]+1) return true; // definite next
else if (k+1 < k_end && i >= ija[k+1]+1) return false; // at least one item before it
else return true;
}
RowIterator& operator++() {
if (diag) { // we're at the diagonal
if (row_has_no_nd() || diag_is_last()) End = true; // and there are no non-diagonals (or none still to visit)
diag = false;
} else if (!row_has_diag()) { // row has no diagonal entries
if (row_has_no_nd() || k_is_last_nd()) End = true; // row is totally empty, or we're at last entry
else k++; // still entries to visit
} else { // not at diag but it exists somewhere in the row, and row has at least one nd entry
if (diag_is_ahead()) { // diag is ahead
if (k_is_last_nd()) diag = true; // diag is next and last
else if (diag_is_next()) { // diag is next and not last
diag = true;
k++;
} else k++; // diag is not next
} else { // diag is past
if (k_is_last_nd()) End = true; // and we're at the end
else k++; // and we're not at the end
}
}
return *this;
}
RowIterator operator++(int unused) {
RowIterator x(*this);
++(*this);
return x;
}
};
// Helper function used only for the RETURN_SIZED_ENUMERATOR macro. Returns the length of
// the matrix's storage.
static VALUE nm_yale_stored_enumerator_length(VALUE nmatrix) {
NM_CONSERVATIVE(nm_register_value(&nmatrix));
YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
YALE_STORAGE* src = s->src == s ? s : reinterpret_cast<YALE_STORAGE*>(s->src);
size_t ia_size = src->shape[0];
// FIXME: This needs to be corrected for slicing.
size_t len = std::min( s->shape[0] + s->offset[0], s->shape[1] + s->offset[1] ) + nm_yale_storage_get_size(src) - ia_size;
NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
return INT2FIX(len);
}
// Helper function used only for the RETURN_SIZED_ENUMERATOR macro. Returns the length of
// the matrix's storage.
static VALUE nm_yale_stored_nondiagonal_enumerator_length(VALUE nmatrix) {
NM_CONSERVATIVE(nm_register_value(&nmatrix));
YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
if (s->src != s) s = reinterpret_cast<YALE_STORAGE*>(s->src); // need to get the original storage shape
size_t ia_size = s->shape[0];
size_t len = nm_yale_storage_get_size(NM_STORAGE_YALE(nmatrix)) - ia_size;
NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
return INT2FIX(len);
}
// Helper function for diagonal length.
static VALUE nm_yale_stored_diagonal_enumerator_length(VALUE nmatrix) {
NM_CONSERVATIVE(nm_register_value(&nmatrix));
YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
size_t len = std::min( s->shape[0] + s->offset[0], s->shape[1] + s->offset[1] );
NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
return INT2FIX(len);
}
// Helper function for full enumerator length.
static VALUE nm_yale_enumerator_length(VALUE nmatrix) {
NM_CONSERVATIVE(nm_register_value(&nmatrix));
YALE_STORAGE* s = NM_STORAGE_YALE(nmatrix);
size_t len = s->shape[0] * s->shape[1];
NM_CONSERVATIVE(nm_unregister_value(&nmatrix));
return INT2FIX(len);
}
/*
* Map the stored values of a matrix in storage order.
*/
template <typename D>
static VALUE map_stored(VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
YALE_STORAGE* s = NM_STORAGE_YALE(self);
YaleStorage<D> y(s);
RETURN_SIZED_ENUMERATOR_PRE
NM_CONSERVATIVE(nm_unregister_value(&self));
RETURN_SIZED_ENUMERATOR(self, 0, 0, nm_yale_stored_enumerator_length);
YALE_STORAGE* r = y.template alloc_copy<nm::RubyObject, true>();
nm_yale_storage_register(r);
NMATRIX* m = nm_create(nm::YALE_STORE, reinterpret_cast<STORAGE*>(r));
VALUE to_return = Data_Wrap_Struct(CLASS_OF(self), nm_mark, nm_delete, m);
nm_yale_storage_unregister(r);
NM_CONSERVATIVE(nm_unregister_value(&self));
return to_return;
}
/*
* map_stored which visits the stored entries of two matrices in order.
*/
template <typename LD, typename RD>
static VALUE map_merged_stored(VALUE left, VALUE right, VALUE init) {
nm::YaleStorage<LD> l(NM_STORAGE_YALE(left));
nm::YaleStorage<RD> r(NM_STORAGE_YALE(right));
VALUE to_return = l.map_merged_stored(CLASS_OF(left), r, init);
return to_return;
}
/*
* Iterate over the stored entries in Yale (diagonal and non-diagonal non-zeros)
*/
template <typename DType>
static VALUE each_stored_with_indices(VALUE nm) {
NM_CONSERVATIVE(nm_register_value(&nm));
YALE_STORAGE* s = NM_STORAGE_YALE(nm);
YaleStorage<DType> y(s);
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
NM_CONSERVATIVE(nm_unregister_value(&nm));
RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_enumerator_length);
for (typename YaleStorage<DType>::const_stored_diagonal_iterator d = y.csdbegin(); d != y.csdend(); ++d) {
rb_yield_values(3, ~d, d.rb_i(), d.rb_j());
}
for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {
for (auto jt = it.ndbegin(); jt != it.ndend(); ++jt) {
rb_yield_values(3, ~jt, it.rb_i(), jt.rb_j());
}
}
NM_CONSERVATIVE(nm_unregister_value(&nm));
return nm;
}
/*
* Iterate over the stored diagonal entries in Yale.
*/
template <typename DType>
static VALUE stored_diagonal_each_with_indices(VALUE nm) {
NM_CONSERVATIVE(nm_register_value(&nm));
YALE_STORAGE* s = NM_STORAGE_YALE(nm);
YaleStorage<DType> y(s);
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
NM_CONSERVATIVE(nm_unregister_value(&nm));
RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_diagonal_length); // FIXME: need diagonal length
for (typename YaleStorage<DType>::const_stored_diagonal_iterator d = y.csdbegin(); d != y.csdend(); ++d) {
rb_yield_values(3, ~d, d.rb_i(), d.rb_j());
}
NM_CONSERVATIVE(nm_unregister_value(&nm));
return nm;
}
/*
* Iterate over the stored diagonal entries in Yale.
*/
template <typename DType>
static VALUE stored_nondiagonal_each_with_indices(VALUE nm) {
NM_CONSERVATIVE(nm_register_value(&nm));
YALE_STORAGE* s = NM_STORAGE_YALE(nm);
YaleStorage<DType> y(s);
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
NM_CONSERVATIVE(nm_unregister_value(&nm));
RETURN_SIZED_ENUMERATOR(nm, 0, 0, 0); // FIXME: need diagonal length
for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {
for (auto jt = it.ndbegin(); jt != it.ndend(); ++jt) {
rb_yield_values(3, ~jt, it.rb_i(), jt.rb_j());
}
}
NM_CONSERVATIVE(nm_unregister_value(&nm));
return nm;
}
/*
* Iterate over the stored entries in Yale in order of i,j. Visits every diagonal entry, even if it's the default.
*/
template <typename DType>
static VALUE each_ordered_stored_with_indices(VALUE nm) {
NM_CONSERVATIVE(nm_register_value(&nm));
YALE_STORAGE* s = NM_STORAGE_YALE(nm);
YaleStorage<DType> y(s);
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
NM_CONSERVATIVE(nm_unregister_value(&nm));
RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_stored_enumerator_length);
for (typename YaleStorage<DType>::const_row_iterator it = y.cribegin(); it != y.criend(); ++it) {
for (auto jt = it.begin(); jt != it.end(); ++jt) {
rb_yield_values(3, ~jt, it.rb_i(), jt.rb_j());
}
}
NM_CONSERVATIVE(nm_unregister_value(&nm));
return nm;
}
template <typename DType>
static VALUE each_with_indices(VALUE nm) {
NM_CONSERVATIVE(nm_register_value(&nm));
YALE_STORAGE* s = NM_STORAGE_YALE(nm);
YaleStorage<DType> y(s);
// If we don't have a block, return an enumerator.
RETURN_SIZED_ENUMERATOR_PRE
NM_CONSERVATIVE(nm_unregister_value(&nm));
RETURN_SIZED_ENUMERATOR(nm, 0, 0, nm_yale_enumerator_length);
for (typename YaleStorage<DType>::const_iterator iter = y.cbegin(); iter != y.cend(); ++iter) {
rb_yield_values(3, ~iter, iter.rb_i(), iter.rb_j());
}
NM_CONSERVATIVE(nm_unregister_value(&nm));
return nm;
}
template <typename D>
static bool is_pos_default_value(YALE_STORAGE* s, size_t apos) {
YaleStorage<D> y(s);
return y.is_pos_default_value(apos);
}
} // end of namespace nm::yale_storage
} // end of namespace nm.
///////////////////
// Ruby Bindings //
///////////////////
/* These bindings are mostly only for debugging Yale. They are called from Init_nmatrix. */
extern "C" {
void nm_init_yale_functions() {
/*
* This module stores methods that are useful for debugging Yale matrices,
* i.e. the ones with +:yale+ stype.
*/
cNMatrix_YaleFunctions = rb_define_module_under(cNMatrix, "YaleFunctions");
// Expert recommendation. Eventually this should go in a separate gem, or at least a separate module.
rb_define_method(cNMatrix_YaleFunctions, "yale_row_keys_intersection", (METHOD)nm_row_keys_intersection, 3);
// Debugging functions.
rb_define_method(cNMatrix_YaleFunctions, "yale_ija", (METHOD)nm_ija, -1);
rb_define_method(cNMatrix_YaleFunctions, "yale_a", (METHOD)nm_a, -1);
rb_define_method(cNMatrix_YaleFunctions, "yale_size", (METHOD)nm_size, 0);
rb_define_method(cNMatrix_YaleFunctions, "yale_ia", (METHOD)nm_ia, 0);
rb_define_method(cNMatrix_YaleFunctions, "yale_ja", (METHOD)nm_ja, 0);
rb_define_method(cNMatrix_YaleFunctions, "yale_d", (METHOD)nm_d, -1);
rb_define_method(cNMatrix_YaleFunctions, "yale_lu", (METHOD)nm_lu, 0);
rb_define_method(cNMatrix_YaleFunctions, "yale_nd_row", (METHOD)nm_nd_row, -1);
/* Document-const:
* Defines the growth rate of the sparse NMatrix's size. Default is 1.5.
*/
rb_define_const(cNMatrix_YaleFunctions, "YALE_GROWTH_CONSTANT", rb_float_new(nm::yale_storage::GROWTH_CONSTANT));
// This is so the user can easily check the IType size, mostly for debugging.
size_t itype_size = sizeof(IType);
VALUE itype_dtype;
if (itype_size == sizeof(uint64_t)) {
itype_dtype = ID2SYM(rb_intern("int64"));
} else if (itype_size == sizeof(uint32_t)) {
itype_dtype = ID2SYM(rb_intern("int32"));
} else if (itype_size == sizeof(uint16_t)) {
itype_dtype = ID2SYM(rb_intern("int16"));
} else {
rb_raise(rb_eStandardError, "unhandled length for sizeof(IType): %lu; note that IType is probably defined as size_t", sizeof(IType));
}
rb_define_const(cNMatrix, "INDEX_DTYPE", itype_dtype);
}
/////////////////
// C ACCESSORS //
/////////////////
/* C interface for NMatrix#each_with_indices (Yale) */
VALUE nm_yale_each_with_indices(VALUE nmatrix) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::each_with_indices, VALUE, VALUE)
return ttable[ NM_DTYPE(nmatrix) ](nmatrix);
}
/* C interface for NMatrix#each_stored_with_indices (Yale) */
VALUE nm_yale_each_stored_with_indices(VALUE nmatrix) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::each_stored_with_indices, VALUE, VALUE)
return ttable[ NM_DTYPE(nmatrix) ](nmatrix);
}
/* Iterate along stored diagonal (not actual diagonal!) */
VALUE nm_yale_stored_diagonal_each_with_indices(VALUE nmatrix) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::stored_diagonal_each_with_indices, VALUE, VALUE)
return ttable[ NM_DTYPE(nmatrix) ](nmatrix);
}
/* Iterate through stored nondiagonal (not actual diagonal!) */
VALUE nm_yale_stored_nondiagonal_each_with_indices(VALUE nmatrix) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::stored_nondiagonal_each_with_indices, VALUE, VALUE)
return ttable[ NM_DTYPE(nmatrix) ](nmatrix);
}
/* C interface for NMatrix#each_ordered_stored_with_indices (Yale) */
VALUE nm_yale_each_ordered_stored_with_indices(VALUE nmatrix) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::each_ordered_stored_with_indices, VALUE, VALUE)
return ttable[ NM_DTYPE(nmatrix) ](nmatrix);
}
/*
* C accessor for inserting some value in a matrix (or replacing an existing cell).
*/
void nm_yale_storage_set(VALUE left, SLICE* slice, VALUE right) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::set, void, VALUE left, SLICE* slice, VALUE right);
ttable[NM_DTYPE(left)](left, slice, right);
}
/*
* Determine the number of non-diagonal non-zeros in a not-yet-created copy of a slice or matrix.
*/
static size_t yale_count_slice_copy_ndnz(const YALE_STORAGE* s, size_t* offset, size_t* shape) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::count_slice_copy_ndnz, size_t, const YALE_STORAGE*, size_t*, size_t*)
return ttable[s->dtype](s, offset, shape);
}
/*
* C accessor for yale_storage::get, which returns a slice of YALE_STORAGE object by copy
*
* Slicing-related.
*/
void* nm_yale_storage_get(const STORAGE* storage, SLICE* slice) {
YALE_STORAGE* casted_storage = (YALE_STORAGE*)storage;
if (slice->single) {
NAMED_DTYPE_TEMPLATE_TABLE(elem_copy_table, nm::yale_storage::get_single, void*, YALE_STORAGE*, SLICE*)
return elem_copy_table[casted_storage->dtype](casted_storage, slice);
} else {
nm_yale_storage_register(casted_storage);
//return reinterpret_cast<void*>(nm::YaleStorage<nm::dtype_enum_T<storage->dtype>::type>(casted_storage).alloc_ref(slice));
NAMED_DTYPE_TEMPLATE_TABLE(ref_table, nm::yale_storage::ref, YALE_STORAGE*, YALE_STORAGE* storage, SLICE* slice)
YALE_STORAGE* ref = ref_table[casted_storage->dtype](casted_storage, slice);
NAMED_LR_DTYPE_TEMPLATE_TABLE(slice_copy_table, nm::yale_storage::slice_copy, YALE_STORAGE*, YALE_STORAGE*)
YALE_STORAGE* ns = slice_copy_table[casted_storage->dtype][casted_storage->dtype](ref);
NM_FREE(ref);
nm_yale_storage_unregister(casted_storage);
return ns;
}
}
/*
* C accessor for yale_storage::vector_insert
*/
static char nm_yale_storage_vector_insert(YALE_STORAGE* s, size_t pos, size_t* js, void* vals, size_t n, bool struct_only, nm::dtype_t dtype) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::vector_insert, char, YALE_STORAGE*, size_t, size_t*, void*, size_t, bool);
return ttable[dtype](s, pos, js, vals, n, struct_only);
}
/*
* C accessor for yale_storage::increment_ia_after, typically called after ::vector_insert
*/
static void nm_yale_storage_increment_ia_after(YALE_STORAGE* s, size_t ija_size, size_t i, long n) {
nm::yale_storage::increment_ia_after(s, ija_size, i, n);
}
/*
* C accessor for yale_storage::ref, which returns either a pointer to the correct location in a YALE_STORAGE object
* for some set of coordinates, or a pointer to a single element.
*/
void* nm_yale_storage_ref(const STORAGE* storage, SLICE* slice) {
YALE_STORAGE* casted_storage = (YALE_STORAGE*)storage;
if (slice->single) {
//return reinterpret_cast<void*>(nm::YaleStorage<nm::dtype_enum_T<storage->dtype>::type>(casted_storage).get_single_p(slice));
NAMED_DTYPE_TEMPLATE_TABLE(elem_copy_table, nm::yale_storage::get_single, void*, YALE_STORAGE*, SLICE*)
return elem_copy_table[casted_storage->dtype](casted_storage, slice);
} else {
//return reinterpret_cast<void*>(nm::YaleStorage<nm::dtype_enum_T<storage->dtype>::type>(casted_storage).alloc_ref(slice));
NAMED_DTYPE_TEMPLATE_TABLE(ref_table, nm::yale_storage::ref, YALE_STORAGE*, YALE_STORAGE* storage, SLICE* slice)
return reinterpret_cast<void*>(ref_table[casted_storage->dtype](casted_storage, slice));
}
}
/*
* C accessor for determining whether two YALE_STORAGE objects have the same contents.
*/
bool nm_yale_storage_eqeq(const STORAGE* left, const STORAGE* right) {
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::eqeq, bool, const YALE_STORAGE* left, const YALE_STORAGE* right);
const YALE_STORAGE* casted_left = reinterpret_cast<const YALE_STORAGE*>(left);
return ttable[casted_left->dtype][right->dtype](casted_left, (const YALE_STORAGE*)right);
}
/*
* Copy constructor for changing dtypes. (C accessor)
*/
STORAGE* nm_yale_storage_cast_copy(const STORAGE* rhs, nm::dtype_t new_dtype, void* dummy) {
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::cast_copy, YALE_STORAGE*, const YALE_STORAGE* rhs);
const YALE_STORAGE* casted_rhs = reinterpret_cast<const YALE_STORAGE*>(rhs);
//return reinterpret_cast<STORAGE*>(nm::YaleStorage<nm::dtype_enum_T< rhs->dtype >::type>(rhs).alloc_copy<nm::dtype_enum_T< new_dtype >::type>());
return (STORAGE*)ttable[new_dtype][casted_rhs->dtype](casted_rhs);
}
/*
* Returns size of Yale storage as a size_t (no matter what the itype is). (C accessor)
*/
size_t nm_yale_storage_get_size(const YALE_STORAGE* storage) {
return nm::yale_storage::get_size(storage);
}
/*
* Return a pointer to the matrix's default value entry.
*/
static void* default_value_ptr(const YALE_STORAGE* s) {
return reinterpret_cast<void*>(reinterpret_cast<char*>(((YALE_STORAGE*)(s->src))->a) + (((YALE_STORAGE*)(s->src))->shape[0] * DTYPE_SIZES[s->dtype]));
}
/*
* Return the Ruby object at a given location in storage.
*/
static VALUE obj_at(YALE_STORAGE* s, size_t k) {
if (s->dtype == nm::RUBYOBJ) return reinterpret_cast<VALUE*>(((YALE_STORAGE*)(s->src))->a)[k];
else return nm::rubyobj_from_cval(reinterpret_cast<void*>(reinterpret_cast<char*>(((YALE_STORAGE*)(s->src))->a) + k * DTYPE_SIZES[s->dtype]), s->dtype).rval;
}
/*
* Return the matrix's default value as a Ruby VALUE.
*/
static VALUE default_value(const YALE_STORAGE* s) {
if (s->dtype == nm::RUBYOBJ) return *reinterpret_cast<VALUE*>(default_value_ptr(s));
else return nm::rubyobj_from_cval(default_value_ptr(s), s->dtype).rval;
}
/*
* Check to see if a default value is some form of zero. Easy for non-Ruby object matrices, which should always be 0.
*/
static bool default_value_is_numeric_zero(const YALE_STORAGE* s) {
return rb_funcall(default_value(s), rb_intern("=="), 1, INT2FIX(0)) == Qtrue;
}
/*
* Transposing copy constructor.
*/
STORAGE* nm_yale_storage_copy_transposed(const STORAGE* rhs_base) {
YALE_STORAGE* rhs = (YALE_STORAGE*)rhs_base;
NAMED_DTYPE_TEMPLATE_TABLE(transp, nm::yale_storage::copy_transposed, YALE_STORAGE*, YALE_STORAGE*)
return (STORAGE*)(transp[rhs->dtype](rhs));
}
/*
* C accessor for multiplying two YALE_STORAGE matrices, which have already been casted to the same dtype.
*
* FIXME: There should be some mathematical way to determine the worst-case IType based on the input ITypes. Right now
* it just uses the default.
*/
STORAGE* nm_yale_storage_matrix_multiply(const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector) {
DTYPE_TEMPLATE_TABLE(nm::yale_storage::matrix_multiply, STORAGE*, const STORAGE_PAIR& casted_storage, size_t* resulting_shape, bool vector);
YALE_STORAGE* left = reinterpret_cast<YALE_STORAGE*>(casted_storage.left);
YALE_STORAGE* right = reinterpret_cast<YALE_STORAGE*>(casted_storage.right);
if (!default_value_is_numeric_zero(left) || !default_value_is_numeric_zero(right)) {
rb_raise(rb_eNotImpError, "matrix default value must be some form of zero (not false or nil) for multiplication");
return NULL;
}
return ttable[left->dtype](casted_storage, resulting_shape, vector);
}
///////////////
// Lifecycle //
///////////////
/*
* C accessor function for creating a YALE_STORAGE object. Prior to calling this function, you MUST
* allocate shape (should be size_t * 2) -- don't use use a regular size_t array!
*
* For this type, dim must always be 2. The final argument is the initial capacity with which to
* create the storage.
*/
YALE_STORAGE* nm_yale_storage_create(nm::dtype_t dtype, size_t* shape, size_t dim, size_t init_capacity) {
if (dim != 2) {
rb_raise(nm_eStorageTypeError, "yale supports only 2-dimensional matrices");
}
DTYPE_OBJECT_STATIC_TABLE(nm::YaleStorage, create, YALE_STORAGE*, size_t* shape, size_t init_capacity)
return ttable[dtype](shape, init_capacity);
}
/*
* Destructor for yale storage (C-accessible).
*/
void nm_yale_storage_delete(STORAGE* s) {
if (s) {
YALE_STORAGE* storage = (YALE_STORAGE*)s;
if (storage->count-- == 1) {
NM_FREE(storage->shape);
NM_FREE(storage->offset);
NM_FREE(storage->ija);
NM_FREE(storage->a);
NM_FREE(storage);
}
}
}
/*
* Destructor for the yale storage ref
*/
void nm_yale_storage_delete_ref(STORAGE* s) {
if (s) {
YALE_STORAGE* storage = (YALE_STORAGE*)s;
nm_yale_storage_delete( reinterpret_cast<STORAGE*>(storage->src) );
NM_FREE(storage->shape);
NM_FREE(storage->offset);
NM_FREE(s);
}
}
/*
* C accessor for yale_storage::init, a templated function.
*
* Initializes the IJA vector of the YALE_STORAGE matrix.
*/
void nm_yale_storage_init(YALE_STORAGE* s, void* init_val) {
DTYPE_TEMPLATE_TABLE(nm::yale_storage::init, void, YALE_STORAGE*, void*);
ttable[s->dtype](s, init_val);
}
/*
* Ruby GC mark function for YALE_STORAGE. C accessible.
*/
void nm_yale_storage_mark(STORAGE* storage_base) {
YALE_STORAGE* storage = (YALE_STORAGE*)storage_base;
if (storage && storage->dtype == nm::RUBYOBJ) {
VALUE* a = (VALUE*)(storage->a);
rb_gc_mark_locations(a, &(a[storage->capacity-1]));
}
}
void nm_yale_storage_register_a(void* a, size_t size) {
nm_register_values(reinterpret_cast<VALUE*>(a), size);
}
void nm_yale_storage_unregister_a(void* a, size_t size) {
nm_unregister_values(reinterpret_cast<VALUE*>(a), size);
}
void nm_yale_storage_register(const STORAGE* s) {
const YALE_STORAGE* y = reinterpret_cast<const YALE_STORAGE*>(s);
if (y->dtype == nm::RUBYOBJ) {
nm_register_values(reinterpret_cast<VALUE*>(y->a), nm::yale_storage::get_size(y));
}
}
void nm_yale_storage_unregister(const STORAGE* s) {
const YALE_STORAGE* y = reinterpret_cast<const YALE_STORAGE*>(s);
if (y->dtype == nm::RUBYOBJ) {
nm_unregister_values(reinterpret_cast<VALUE*>(y->a), nm::yale_storage::get_size(y));
}
}
/*
* Allocates and initializes the basic struct (but not the IJA or A vectors).
*
* This function is ONLY used when creating from old yale.
*/
static YALE_STORAGE* alloc(nm::dtype_t dtype, size_t* shape, size_t dim) {
YALE_STORAGE* s;
s = NM_ALLOC( YALE_STORAGE );
s->ndnz = 0;
s->dtype = dtype;
s->shape = shape;
s->offset = NM_ALLOC_N(size_t, dim);
for (size_t i = 0; i < dim; ++i)
s->offset[i] = 0;
s->dim = dim;
s->src = reinterpret_cast<STORAGE*>(s);
s->count = 1;
return s;
}
YALE_STORAGE* nm_yale_storage_create_from_old_yale(nm::dtype_t dtype, size_t* shape, char* ia, char* ja, char* a, nm::dtype_t from_dtype) {
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::create_from_old_yale, YALE_STORAGE*, nm::dtype_t dtype, size_t* shape, char* r_ia, char* r_ja, char* r_a);
return ttable[dtype][from_dtype](dtype, shape, ia, ja, a);
}
//////////////////////////////////////////////
// YALE-SPECIFIC FUNCTIONS (RUBY ACCESSORS) //
//////////////////////////////////////////////
/*
* call-seq:
* yale_size -> Integer
*
* Get the size of a Yale matrix (the number of elements actually stored).
*
* For capacity (the maximum number of elements that can be stored without a resize), use capacity instead.
*/
static VALUE nm_size(VALUE self) {
YALE_STORAGE* s = (YALE_STORAGE*)(NM_SRC(self));
VALUE to_return = INT2FIX(nm::yale_storage::IJA(s)[s->shape[0]]);
return to_return;
}
/*
* Determine if some pos in the diagonal is the default. No bounds checking!
*/
static bool is_pos_default_value(YALE_STORAGE* s, size_t apos) {
DTYPE_TEMPLATE_TABLE(nm::yale_storage::is_pos_default_value, bool, YALE_STORAGE*, size_t)
return ttable[s->dtype](s, apos);
}
/*
* call-seq:
* yale_row_keys_intersection(i, m2, i2) -> Array
*
* This function is experimental.
*
* It finds the intersection of row i of the current matrix with row i2 of matrix m2.
* Both matrices must be Yale. They may not be slices.
*
* Only checks the stored indices; does not care about matrix default value.
*/
static VALUE nm_row_keys_intersection(VALUE m1, VALUE ii1, VALUE m2, VALUE ii2) {
NM_CONSERVATIVE(nm_register_value(&m1));
NM_CONSERVATIVE(nm_register_value(&m2));
if (NM_SRC(m1) != NM_STORAGE(m1) || NM_SRC(m2) != NM_STORAGE(m2)) {
NM_CONSERVATIVE(nm_unregister_value(&m2));
NM_CONSERVATIVE(nm_unregister_value(&m1));
rb_raise(rb_eNotImpError, "must be called on a real matrix and not a slice");
}
size_t i1 = FIX2INT(ii1),
i2 = FIX2INT(ii2);
YALE_STORAGE *s = NM_STORAGE_YALE(m1),
*t = NM_STORAGE_YALE(m2);
size_t pos1 = s->ija[i1],
pos2 = t->ija[i2];
size_t nextpos1 = s->ija[i1+1],
nextpos2 = t->ija[i2+1];
size_t diff1 = nextpos1 - pos1,
diff2 = nextpos2 - pos2;
// Does the diagonal have a nonzero in it?
bool diag1 = i1 < s->shape[0] && !is_pos_default_value(s, i1),
diag2 = i2 < t->shape[0] && !is_pos_default_value(t, i2);
// Reserve max(diff1,diff2) space -- that's the max intersection possible.
VALUE ret = rb_ary_new2(std::max(diff1,diff2)+1);
nm_register_value(&ret);
// Handle once the special case where both have the diagonal in exactly
// the same place.
if (diag1 && diag2 && i1 == i2) {
rb_ary_push(ret, INT2FIX(i1));
diag1 = false; diag2 = false; // no need to deal with diagonals anymore.
}
// Now find the intersection.
size_t idx1 = pos1, idx2 = pos2;
while (idx1 < nextpos1 && idx2 < nextpos2) {
if (s->ija[idx1] == t->ija[idx2]) {
rb_ary_push(ret, INT2FIX(s->ija[idx1]));
++idx1; ++idx2;
} else if (diag1 && i1 == t->ija[idx2]) {
rb_ary_push(ret, INT2FIX(i1));
diag1 = false;
++idx2;
} else if (diag2 && i2 == s->ija[idx1]) {
rb_ary_push(ret, INT2FIX(i2));
diag2 = false;
++idx1;
} else if (s->ija[idx1] < t->ija[idx2]) {
++idx1;
} else { // s->ija[idx1] > t->ija[idx2]
++idx2;
}
}
// Past the end of row i2's stored entries; need to try to find diagonal
if (diag2 && idx1 < nextpos1) {
idx1 = nm::yale_storage::binary_search_left_boundary(s, idx1, nextpos1, i2);
if (s->ija[idx1] == i2) rb_ary_push(ret, INT2FIX(i2));
}
// Find the diagonal, if possible, in the other one.
if (diag1 && idx2 < nextpos2) {
idx2 = nm::yale_storage::binary_search_left_boundary(t, idx2, nextpos2, i1);
if (t->ija[idx2] == i1) rb_ary_push(ret, INT2FIX(i1));
}
nm_unregister_value(&ret);
NM_CONSERVATIVE(nm_unregister_value(&m1));
NM_CONSERVATIVE(nm_unregister_value(&m2));
return ret;
}
/*
* call-seq:
* yale_a -> Array
* yale_d(index) -> ...
*
* Get the A array of a Yale matrix (which stores the diagonal and the LU portions of the matrix).
*/
static VALUE nm_a(int argc, VALUE* argv, VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
VALUE idx;
rb_scan_args(argc, argv, "01", &idx);
NM_CONSERVATIVE(nm_register_value(&idx));
YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
size_t size = nm_yale_storage_get_size(s);
if (idx == Qnil) {
VALUE* vals = NM_ALLOCA_N(VALUE, size);
nm_register_values(vals, size);
if (NM_DTYPE(self) == nm::RUBYOBJ) {
for (size_t i = 0; i < size; ++i) {
vals[i] = reinterpret_cast<VALUE*>(s->a)[i];
}
} else {
for (size_t i = 0; i < size; ++i) {
vals[i] = nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*i, s->dtype).rval;
}
}
VALUE ary = rb_ary_new4(size, vals);
for (size_t i = size; i < s->capacity; ++i)
rb_ary_push(ary, Qnil);
nm_unregister_values(vals, size);
NM_CONSERVATIVE(nm_unregister_value(&idx));
NM_CONSERVATIVE(nm_unregister_value(&self));
return ary;
} else {
size_t index = FIX2INT(idx);
NM_CONSERVATIVE(nm_unregister_value(&idx));
NM_CONSERVATIVE(nm_unregister_value(&self));
if (index >= size) rb_raise(rb_eRangeError, "out of range");
return nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype] * index, s->dtype).rval;
}
}
/*
* call-seq:
* yale_d -> Array
* yale_d(index) -> ...
*
* Get the diagonal ("D") portion of the A array of a Yale matrix.
*/
static VALUE nm_d(int argc, VALUE* argv, VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
VALUE idx;
rb_scan_args(argc, argv, "01", &idx);
NM_CONSERVATIVE(nm_register_value(&idx));
YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
if (idx == Qnil) {
VALUE* vals = NM_ALLOCA_N(VALUE, s->shape[0]);
nm_register_values(vals, s->shape[0]);
if (NM_DTYPE(self) == nm::RUBYOBJ) {
for (size_t i = 0; i < s->shape[0]; ++i) {
vals[i] = reinterpret_cast<VALUE*>(s->a)[i];
}
} else {
for (size_t i = 0; i < s->shape[0]; ++i) {
vals[i] = nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*i, s->dtype).rval;
}
}
nm_unregister_values(vals, s->shape[0]);
NM_CONSERVATIVE(nm_unregister_value(&idx));
NM_CONSERVATIVE(nm_unregister_value(&self));
return rb_ary_new4(s->shape[0], vals);
} else {
size_t index = FIX2INT(idx);
NM_CONSERVATIVE(nm_unregister_value(&idx));
NM_CONSERVATIVE(nm_unregister_value(&self));
if (index >= s->shape[0]) rb_raise(rb_eRangeError, "out of range");
return nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype] * index, s->dtype).rval;
}
}
/*
* call-seq:
* yale_lu -> Array
*
* Get the non-diagonal ("LU") portion of the A array of a Yale matrix.
*/
static VALUE nm_lu(VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
size_t size = nm_yale_storage_get_size(s);
VALUE* vals = NM_ALLOCA_N(VALUE, size - s->shape[0] - 1);
nm_register_values(vals, size - s->shape[0] - 1);
if (NM_DTYPE(self) == nm::RUBYOBJ) {
for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {
vals[i] = reinterpret_cast<VALUE*>(s->a)[s->shape[0] + 1 + i];
}
} else {
for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {
vals[i] = nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*(s->shape[0] + 1 + i), s->dtype).rval;
}
}
VALUE ary = rb_ary_new4(size - s->shape[0] - 1, vals);
for (size_t i = size; i < s->capacity; ++i)
rb_ary_push(ary, Qnil);
nm_unregister_values(vals, size - s->shape[0] - 1);
NM_CONSERVATIVE(nm_unregister_value(&self));
return ary;
}
/*
* call-seq:
* yale_ia -> Array
*
* Get the IA portion of the IJA array of a Yale matrix. This gives the start and end positions of rows in the
* JA and LU portions of the IJA and A arrays, respectively.
*/
static VALUE nm_ia(VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
VALUE* vals = NM_ALLOCA_N(VALUE, s->shape[0] + 1);
for (size_t i = 0; i < s->shape[0] + 1; ++i) {
vals[i] = INT2FIX(s->ija[i]);
}
NM_CONSERVATIVE(nm_unregister_value(&self));
return rb_ary_new4(s->shape[0]+1, vals);
}
/*
* call-seq:
* yale_ja -> Array
*
* Get the JA portion of the IJA array of a Yale matrix. This gives the column indices for entries in corresponding
* positions in the LU portion of the A array.
*/
static VALUE nm_ja(VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
size_t size = nm_yale_storage_get_size(s);
VALUE* vals = NM_ALLOCA_N(VALUE, size - s->shape[0] - 1);
nm_register_values(vals, size - s->shape[0] - 1);
for (size_t i = 0; i < size - s->shape[0] - 1; ++i) {
vals[i] = INT2FIX(s->ija[s->shape[0] + 1 + i]);
}
VALUE ary = rb_ary_new4(size - s->shape[0] - 1, vals);
for (size_t i = size; i < s->capacity; ++i)
rb_ary_push(ary, Qnil);
nm_unregister_values(vals, size - s->shape[0] - 1);
NM_CONSERVATIVE(nm_unregister_value(&self));
return ary;
}
/*
* call-seq:
* yale_ija -> Array
* yale_ija(index) -> ...
*
* Get the IJA array of a Yale matrix (or a component of the IJA array).
*/
static VALUE nm_ija(int argc, VALUE* argv, VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
VALUE idx;
rb_scan_args(argc, argv, "01", &idx);
NM_CONSERVATIVE(nm_register_value(&idx));
YALE_STORAGE* s = reinterpret_cast<YALE_STORAGE*>(NM_SRC(self));
size_t size = nm_yale_storage_get_size(s);
if (idx == Qnil) {
VALUE* vals = NM_ALLOCA_N(VALUE, size);
nm_register_values(vals, size);
for (size_t i = 0; i < size; ++i) {
vals[i] = INT2FIX(s->ija[i]);
}
VALUE ary = rb_ary_new4(size, vals);
for (size_t i = size; i < s->capacity; ++i)
rb_ary_push(ary, Qnil);
nm_unregister_values(vals, size);
NM_CONSERVATIVE(nm_unregister_value(&idx));
NM_CONSERVATIVE(nm_unregister_value(&self));
return ary;
} else {
size_t index = FIX2INT(idx);
if (index >= size) rb_raise(rb_eRangeError, "out of range");
NM_CONSERVATIVE(nm_unregister_value(&self));
NM_CONSERVATIVE(nm_unregister_value(&idx));
return INT2FIX(s->ija[index]);
}
}
/*
* call-seq:
* yale_nd_row -> ...
*
* This function gets the non-diagonal contents of a Yale matrix row.
* The first argument should be the row index. The optional second argument may be :hash or :keys, but defaults
* to :hash. If :keys is given, it will only return the Hash keys (the column indices).
*
* This function is meant to accomplish its purpose as efficiently as possible. It does not check for appropriate
* range.
*/
static VALUE nm_nd_row(int argc, VALUE* argv, VALUE self) {
NM_CONSERVATIVE(nm_register_value(&self));
if (NM_SRC(self) != NM_STORAGE(self)) {
NM_CONSERVATIVE(nm_unregister_value(&self));
rb_raise(rb_eNotImpError, "must be called on a real matrix and not a slice");
}
VALUE i_, as;
rb_scan_args(argc, argv, "11", &i_, &as);
NM_CONSERVATIVE(nm_register_value(&as));
NM_CONSERVATIVE(nm_register_value(&i_));
bool keys = false;
if (as != Qnil && rb_to_id(as) != nm_rb_hash) keys = true;
size_t i = FIX2INT(i_);
YALE_STORAGE* s = NM_STORAGE_YALE(self);
//nm::dtype_t dtype = NM_DTYPE(self);
if (i >= s->shape[0]) {
NM_CONSERVATIVE(nm_unregister_value(&self));
NM_CONSERVATIVE(nm_unregister_value(&as));
NM_CONSERVATIVE(nm_unregister_value(&i_));
rb_raise(rb_eRangeError, "out of range (%lu >= %lu)", i, s->shape[0]);
}
size_t pos = s->ija[i];
size_t nextpos = s->ija[i+1];
size_t diff = nextpos - pos;
VALUE ret;
if (keys) {
ret = rb_ary_new3(diff);
for (size_t idx = pos; idx < nextpos; ++idx) {
rb_ary_store(ret, idx - pos, INT2FIX(s->ija[idx]));
}
} else {
ret = rb_hash_new();
for (size_t idx = pos; idx < nextpos; ++idx) {
rb_hash_aset(ret, INT2FIX(s->ija[idx]), nm::rubyobj_from_cval((char*)(s->a) + DTYPE_SIZES[s->dtype]*idx, s->dtype).rval);
}
}
NM_CONSERVATIVE(nm_unregister_value(&as));
NM_CONSERVATIVE(nm_unregister_value(&i_));
NM_CONSERVATIVE(nm_unregister_value(&self));
return ret;
}
/*
* call-seq:
* yale_vector_set(i, column_index_array, cell_contents_array, pos) -> Fixnum
*
* Insert at position pos an array of non-diagonal elements with column indices given. Note that the column indices and values
* must be storage-contiguous -- that is, you can't insert them around existing elements in some row, only amid some
* elements in some row. You *can* insert them around a diagonal element, since this is stored separately. This function
* may not be used for the insertion of diagonal elements in most cases, as these are already present in the data
* structure and are typically modified by replacement rather than insertion.
*
* The last argument, pos, may be nil if you want to insert at the beginning of a row. Otherwise it needs to be provided.
* Don't expect this function to know the difference. It really does very little checking, because its goal is to make
* multiple contiguous insertion as quick as possible.
*
* You should also not attempt to insert values which are the default (0). These are not supposed to be stored, and may
* lead to undefined behavior.
*
* Example:
* m.yale_vector_set(3, [0,3,4], [1,1,1], 15)
*
* The example above inserts the values 1, 1, and 1 in columns 0, 3, and 4, assumed to be located at position 15 (which
* corresponds to row 3).
*
* Example:
* next = m.yale_vector_set(3, [0,3,4], [1,1,1])
*
* This example determines that i=3 is at position 15 automatically. The value returned, next, is the position where the
* next value(s) should be inserted.
*/
VALUE nm_vector_set(int argc, VALUE* argv, VALUE self) { //, VALUE i_, VALUE jv, VALUE vv, VALUE pos_) {
NM_CONSERVATIVE(nm_register_value(&self));
if (NM_SRC(self) != NM_STORAGE(self)) {
NM_CONSERVATIVE(nm_unregister_value(&self));
rb_raise(rb_eNotImpError, "must be called on a real matrix and not a slice");
}
// i, jv, vv are mandatory; pos is optional; thus "31"
VALUE i_, jv, vv, pos_;
rb_scan_args(argc, argv, "31", &i_, &jv, &vv, &pos_);
NM_CONSERVATIVE(nm_register_value(&i_));
NM_CONSERVATIVE(nm_register_value(&jv));
NM_CONSERVATIVE(nm_register_value(&vv));
NM_CONSERVATIVE(nm_register_value(&pos_));
size_t len = RARRAY_LEN(jv); // need length in order to read the arrays in
size_t vvlen = RARRAY_LEN(vv);
if (len != vvlen) {
NM_CONSERVATIVE(nm_unregister_value(&pos_));
NM_CONSERVATIVE(nm_unregister_value(&vv));
NM_CONSERVATIVE(nm_unregister_value(&jv));
NM_CONSERVATIVE(nm_unregister_value(&i_));
NM_CONSERVATIVE(nm_unregister_value(&self));
rb_raise(rb_eArgError, "lengths must match between j array (%lu) and value array (%lu)", len, vvlen);
}
YALE_STORAGE* s = NM_STORAGE_YALE(self);
nm::dtype_t dtype = NM_DTYPE(self);
size_t i = FIX2INT(i_); // get the row
size_t pos = s->ija[i];
// Allocate the j array and the values array
size_t* j = NM_ALLOCA_N(size_t, len);
void* vals = NM_ALLOCA_N(char, DTYPE_SIZES[dtype] * len);
if (dtype == nm::RUBYOBJ){
nm_register_values(reinterpret_cast<VALUE*>(vals), len);
}
// Copy array contents
for (size_t idx = 0; idx < len; ++idx) {
j[idx] = FIX2INT(rb_ary_entry(jv, idx));
rubyval_to_cval(rb_ary_entry(vv, idx), dtype, (char*)vals + idx * DTYPE_SIZES[dtype]);
}
nm_yale_storage_vector_insert(s, pos, j, vals, len, false, dtype);
nm_yale_storage_increment_ia_after(s, s->shape[0], i, len);
s->ndnz += len;
if (dtype == nm::RUBYOBJ){
nm_unregister_values(reinterpret_cast<VALUE*>(vals), len);
}
NM_CONSERVATIVE(nm_unregister_value(&pos_));
NM_CONSERVATIVE(nm_unregister_value(&vv));
NM_CONSERVATIVE(nm_unregister_value(&jv));
NM_CONSERVATIVE(nm_unregister_value(&i_));
NM_CONSERVATIVE(nm_unregister_value(&self));
// Return the updated position
pos += len;
return INT2FIX(pos);
}
/*
* call-seq:
* __yale_default_value__ -> ...
*
* Get the default_value property from a yale matrix.
*/
VALUE nm_yale_default_value(VALUE self) {
VALUE to_return = default_value(NM_STORAGE_YALE(self));
return to_return;
}
/*
* call-seq:
* __yale_map_merged_stored__(right) -> Enumerator
*
* A map operation on two Yale matrices which only iterates across the stored indices.
*/
VALUE nm_yale_map_merged_stored(VALUE left, VALUE right, VALUE init) {
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::map_merged_stored, VALUE, VALUE, VALUE, VALUE)
return ttable[NM_DTYPE(left)][NM_DTYPE(right)](left, right, init);
//return nm::yale_storage::map_merged_stored(left, right, init);
}
/*
* call-seq:
* __yale_map_stored__ -> Enumerator
*
* A map operation on two Yale matrices which only iterates across the stored indices.
*/
VALUE nm_yale_map_stored(VALUE self) {
NAMED_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::map_stored, VALUE, VALUE)
return ttable[NM_DTYPE(self)](self);
}
} // end of extern "C" block
| 26,247 |
505 | /*
Author: <NAME> (<EMAIL>)
Copyright (c) 1995-1996 MEDICAL RESEARCH COUNCIL
All rights reserved
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1 Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2 Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3 Neither the name of the MEDICAL RESEARCH COUNCIL, THE LABORATORY OF
MOLECULAR BIOLOGY nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Copyright (c) 2004, 2009, 2011-2012 Genome Research Ltd.
Author: <NAME> <<EMAIL>>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the names Genome Research Ltd and Wellcome Trust Sanger
Institute nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY GENOME RESEARCH LTD AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL GENOME RESEARCH LTD OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <sys/types.h>
#include <string.h>
#include "cram/vlen.h"
#include "cram/os.h"
#ifndef MAX
#define MAX(a,b) ((a)>(b)?(a):(b))
#endif
#ifndef ABS
#define ABS(a) ((a)>0?(a):-(a))
#endif
/* #define DEBUG_printf(a,n) printf(a,n) */
#define DEBUG_printf(a,n)
/*
* vlen: 27/10/95 written by <NAME>, <EMAIL>
*
* Given sprintf style of arguments this routine returns the maximum
* size of buffer needed to allocate to use with sprintf. It errs on
* the side of caution by being simplistic in its approach: we assume
* all numbers are of maximum length.
*
* Handles the usual type conversions (%[%diuaxXcfeEgGpns]), but not
* the 'wide' character conversions (%C and %S).
* Precision is handled in the correct formats, including %*.*
* notations.
* Additionally, some of the more dubious (but probably illegal) cases
* are supported (eg "%10%" will expand to " %" on many
* systems).
*
* We also assume that the largest integer and larger pointer are 64
* bits, which at least covers the machines we'll need it for.
*/
int flen(char *fmt, ...)
{
va_list args;
va_start(args, fmt);
return vflen(fmt, args);
}
int vflen(char *fmt, va_list ap)
{
int len = 0;
char *cp, c;
long long l;
int i;
double d;
/*
* This code modifies 'ap', but we do not know if va_list is a structure
* or a pointer to an array so we do not know if it is a local variable
* or not.
* C99 gets around this by defining va_copy() to make copies of ap, but
* this does not exist on all systems.
* For now, I just assume that when va_list is a pointer the system also
* provides a va_copy macro to work around this problem. The only system
* I have seen needing this so far was Linux on AMD64.
*/
#if defined(HAVE_VA_COPY)
va_list ap_local;
va_copy(ap_local, ap);
# define ap ap_local
#endif
for(cp = fmt; *cp; cp++) {
switch(*cp) {
/* A format specifier */
case '%': {
char *endp;
long conv_len1=0, conv_len2=0, conv_len=0;
signed int arg_size;
/* Firstly, strip the modifier flags (+-#0 and [space]) */
for(; (c=*++cp);) {
if ('#' == c)
len+=2; /* Worst case of "0x" */
else if ('-' == c || '+' == c || ' ' == c)
len++;
else
break;
}
/* Width specifier */
l = strtol(cp, &endp, 10);
if (endp != cp) {
cp = endp;
conv_len = conv_len1 = l;
} else if (*cp == '*') {
conv_len = conv_len1 = (int)va_arg(ap, int);
cp++;
}
/* Precision specifier */
if ('.' == *cp) {
cp++;
conv_len2 = strtol(cp, &endp, 10);
if (endp != cp) {
cp = endp;
} else if (*cp == '*') {
conv_len2 = (int)va_arg(ap, int);
cp++;
}
conv_len = MAX(conv_len1, conv_len2);
}
/* Short/long identifier */
if ('h' == *cp) {
arg_size = -1; /* short */
cp++;
} else if ('l' == *cp) {
arg_size = 1; /* long */
cp++;
if ('l' == *cp) {
arg_size = 2; /* long long */
cp++;
}
} else {
arg_size = 0; /* int */
}
/* The actual type */
switch (*cp) {
case '%':
/*
* Not real ANSI I suspect, but we'll allow for the
* completely daft "%10%" example.
*/
len += MAX(conv_len1, 1);
break;
case 'd':
case 'i':
case 'u':
case 'a':
case 'x':
case 'X':
/* Remember: char and short are sent as int on the stack */
if (arg_size == -1)
l = (long)va_arg(ap, int);
else if (arg_size == 1)
l = va_arg(ap, long);
else if (arg_size == 2)
l = va_arg(ap, long long);
else
l = (long)va_arg(ap, int);
DEBUG_printf("%d", l);
/*
* No number can be more than 24 characters so we'll take
* the max of conv_len and 24 (23 is len(2^64) in octal).
* All that work above and we then go and estimate ;-),
* but it's needed incase someone does %500d.
*/
len += MAX(conv_len, 23);
break;
case 'c':
i = va_arg(ap, int);
DEBUG_printf("%c", i);
/*
* Note that %10c and %.10c act differently.
* Besides, I think precision is not really allowed for %c.
*/
len += MAX(conv_len1, i>=0x80 ?MB_CUR_MAX :1);
break;
case 'f':
d = va_arg(ap, double);
DEBUG_printf("%f", d);
/*
* Maybe "Inf" or "NaN", but we'll not worry about that.
* Again, err on side of caution and take max of conv_len
* and max length of a double. The worst case I can
* think of is 317 characters (-1[308 zeros].000000)
* without using precision codes. That's horrid. I
* cheat and either use 317 or 15 depending on how
* large the number is as I reckon 99% of floats
* aren't that long.
*/
l = (ABS(d) > 1000000) ? 317 : 15;
l = MAX(l, conv_len1 + 2);
if (conv_len2) l += conv_len2 - 6;
len += l;
break;
case 'e':
case 'E':
case 'g':
case 'G':
d = va_arg(ap, double);
DEBUG_printf("%g", d);
/*
* Maybe "Inf" or "NaN", but we'll not worry about that
* Again, err on side of caution and take max of conv_len
* and max length of a double (which defaults to only
* '-' + 6 + '.' + 'E[+-]xxx' == 13.
*/
len += MAX(conv_len, 13);
break;
case 'p':
l = (long)va_arg(ap, void *);
/*
* Max pointer is 64bits == 16 chars (on alpha),
* == 20 with + "0x".
*/
DEBUG_printf("%p", (void *)l);
len += MAX(conv_len, 20);
break;
case 'n':
/* produces no output */
break;
case 's': {
char *s = (char *)va_arg(ap, char *);
DEBUG_printf("%s", s);
if (!conv_len2) {
len += MAX(conv_len, (int)strlen(s));
} else {
len += conv_len;
}
break;
}
default:
/* wchar_t types of 'C' and 'S' aren't supported */
DEBUG_printf("Arg is %c\n", *cp);
}
}
case '\0':
break;
default:
DEBUG_printf("%c", *cp);
len++;
}
}
va_end(ap);
return len+1; /* one for the null character */
}
#if 0
int main() {
int l;
char buf[10000];
sprintf(buf, "d: %d\n", 500);
l = flen("d: %d\n", 500);
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "");
l = flen("");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%s\n","test");
l = flen("%s\n", "test");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%c\n", 'a');
l = flen("%c\n", 'a');
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%31.30f\n", -9999.99);
l = flen("%31.30f\n", -9999.99);
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%f\n", -1e308);
l = flen("%f\n", -1e308);
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%.9f\n", -1e308);
l = flen("%.9f\n", -1e308);
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%10.20f\n", -1.999222333);
l = flen("%10.20f\n", -1.999222333);
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%#g\n", -3.14159265358e-222);
l = flen("%#g\n", -3.1415927e-222);
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%e\n", -123456789123456789.1);
l = flen("%e\n", -123456789123456789.1);
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%c %f %d %s %c %g %ld %s\n", 'a', 3.1, 9, "one", 'b', 4.2, 9, "two");
l = flen("%c %f %d %s %c %g %ld %s\n", 'a', 3.1, 9, "one", 'b', 4.2, 9, "two");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%*.*e %*c\n", 10, 5, 9.0, 20, 'x');
l = flen("%*.*e %*c\n", 10, 5, 9.0, 20, 'x');
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%10c\n", 'z');
l = flen("%10c\n", 'z');
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%.10c\n", 'z');
l = flen("%.10c\n", 'z');
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%10d\n", 'z');
l = flen("%10d\n", 'z');
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%.10d\n", 'z');
l = flen("%.10d\n", 'z');
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%10%\n");
l = flen("%10%\n");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%.10%\n");
l = flen("%.10%\n");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%s\n", "0123456789");
l = flen("%s\n", "0123456789");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%5s\n", "0123456789");
l = flen("%5s\n", "0123456789");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%50s\n", "0123456789");
l = flen("%50s\n", "0123456789");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%.5s\n", "0123456789");
l = flen("%.5s\n", "0123456789");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%.50s\n", "0123456789");
l = flen("%.50s\n", "0123456789");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%5.50s\n", "0123456789");
l = flen("%5.50s\n", "0123456789");
printf("%d %d\n\n", strlen(buf), l);
sprintf(buf, "%50.5s\n", "0123456789");
l = flen("%50.5s\n", "0123456789");
printf("%d %d\n\n", strlen(buf), l);
return 0;
}
#endif
| 5,129 |
1,006 | <gh_stars>1000+
/****************************************************************************
* boards/arm/stm32f0l0g0/nucleo-f072rb/src/stm32_bringup.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <debug.h>
#include <nuttx/fs/fs.h>
#include <nuttx/i2c/i2c_master.h>
#include "stm32_i2c.h"
#include "nucleo-f072rb.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#undef HAVE_I2C_DRIVER
#if defined(CONFIG_STM32F0L0G0_I2C1) && defined(CONFIG_I2C_DRIVER)
# define HAVE_I2C_DRIVER 1
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: stm32_bringup
*
* Description:
* Perform architecture-specific initialization
*
* CONFIG_BOARD_LATE_INITIALIZE=y :
* Called from board_late_initialize().
*
* CONFIG_BOARD_LATE_INITIALIZE=n && CONFIG_BOARDCTL=y :
* Called from the NSH library
*
****************************************************************************/
int stm32_bringup(void)
{
#ifdef HAVE_I2C_DRIVER
FAR struct i2c_master_s *i2c;
#endif
int ret;
#ifdef CONFIG_FS_PROCFS
/* Mount the procfs file system */
ret = nx_mount(NULL, "/proc", "procfs", 0, NULL);
if (ret < 0)
{
ferr("ERROR: Failed to mount procfs at /proc: %d\n", ret);
}
#endif
#ifdef HAVE_I2C_DRIVER
/* Get the I2C lower half instance */
i2c = stm32_i2cbus_initialize(1);
if (i2c == NULL)
{
i2cerr("ERROR: Inialize I2C1: %d\n", ret);
}
else
{
/* Register the I2C character driver */
ret = i2c_register(i2c, 1);
if (ret < 0)
{
i2cerr("ERROR: Failed to register I2C1 device: %d\n", ret);
}
}
#endif
UNUSED(ret);
return OK;
}
| 931 |
5,169 | <gh_stars>1000+
{
"name": "AnalyticsGenerator",
"version": "0.1.0",
"summary": "A tool to unify analytics event definitions between projects with same analytics pre-requisites",
"description": "A tool to unify analytics event definitions between projects with same analytics pre-requisites. Using it you can ensure two projetcs, say iOS and Android, will share the same analytics event specs, avoiding discrepancy in event names, missing event parameters and so on.",
"homepage": "https://github.com/Movile/analytics-generator",
"license": "MIT",
"authors": {
"<EMAIL>": "<EMAIL>"
},
"source": {
"http": "https://github.com/Movile/analytics-generator/releases/download/0.1.0/analytics-generator.zip"
},
"preserve_paths": "*",
"platforms": {
"osx": null,
"ios": null,
"tvos": null,
"watchos": null
}
}
| 282 |
1,351 | '''
Common utilities for the Proxy Verifier extensions.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from string import Template
def create_address_argument(ports):
"""
>>> create_address_argument([8080, 8081])
'"127.0.0.1:8080,127.0.0.1:8081"'
"""
is_first = True
argument = '"'
for port in ports:
if is_first:
is_first = False
else:
argument += ','
argument += "127.0.0.1:{}".format(port)
argument += '"'
return argument
def substitute_context_in_replay_file(process, replay_path, context):
'''
Perform substitution base on the passed context dict.
This function will return the new replay_path file
'''
# Only files for now
if os.path.isdir(replay_path):
raise ValueError(f"Mapping substitution not supported for directories.")
with open(os.path.join(process.TestDirectory, replay_path), 'r') as replay_file:
replay_template = Template(replay_file.read())
replay_content = replay_template.substitute(context)
tf = tempfile.NamedTemporaryFile(delete=False, dir=process.RunDirectory, suffix=f"_{os.path.basename(replay_path)}")
replay_path = tf.name
with open(replay_path, "w") as new_replay_file:
new_replay_file.write(replay_content)
# use this as replay_path
return replay_path
| 736 |
1,160 | <gh_stars>1000+
"""
summary: produce listing
description:
automate IDA to perform auto-analysis on a file and,
once that is done, produce a .lst file with the disassembly.
Run like so:
ida -A "-S...path/to/produce_lst_file.py" <binary-file>
where:
* -A instructs IDA to run in non-interactive mode
* -S holds a path to the script to run (note this is a single token;
there is no space between '-S' and its path.)
"""
import ida_auto
import ida_fpro
import ida_ida
import ida_loader
import ida_pro
# derive output file name
idb_path = ida_loader.get_path(ida_loader.PATH_TYPE_IDB)
lst_path = "%s.lst" % idb_path
ida_auto.auto_wait() # wait for end of auto-analysis
fptr = ida_fpro.qfile_t() # FILE * wrapper
if fptr.open(lst_path, "wt"):
try:
ida_loader.gen_file( # generate .lst file
ida_loader.OFILE_LST,
fptr.get_fp(),
ida_ida.inf_get_min_ea(),
ida_ida.inf_get_max_ea(),
0)
finally:
fptr.close()
ida_pro.qexit(0)
| 462 |
992 | /* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 2010 Couchbase, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#define __DEBUG_E2E
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <time.h>
#if !defined(WIN32) && !defined(_WIN32)
#include <unistd.h>
#endif
#include "time_utils.h"
#ifdef __cplusplus
extern "C" {
#endif
// Forestdb APIs wrappers where time taken in nano secs is returned on success
static const long int ERR_NS = 0xFFFFFFFF;
ts_nsec timed_fdb_get(fdb_kvs_handle *kv, fdb_doc *doc);
ts_nsec timed_fdb_set(fdb_kvs_handle *kv, fdb_doc *doc);
ts_nsec timed_fdb_delete(fdb_kvs_handle *kv, fdb_doc *doc);
ts_nsec timed_fdb_compact(fdb_file_handle *fhandle);
ts_nsec timed_fdb_commit(fdb_file_handle *fhandle, bool walflush);
ts_nsec timed_fdb_snapshot(fdb_kvs_handle *kv, fdb_kvs_handle **snap_kv);
ts_nsec timed_fdb_iterator_init(fdb_kvs_handle *kv, fdb_iterator **it);
ts_nsec timed_fdb_iterator_get(fdb_iterator *it, fdb_doc **doc);
ts_nsec timed_fdb_iterator_next(fdb_iterator *it);
ts_nsec timed_fdb_iterator_close(fdb_iterator *it);
ts_nsec timed_fdb_kvs_close(fdb_kvs_handle *kv);
ts_nsec timed_fdb_close(fdb_file_handle *fhandle);
ts_nsec timed_fdb_shutdown();
#ifdef __cplusplus
}
#endif
| 713 |
8,772 | <reponame>dgusoff/cas<filename>api/cas-server-core-api-configuration-model/src/main/java/org/apereo/cas/configuration/model/support/sms/AmazonSnsProperties.java
package org.apereo.cas.configuration.model.support.sms;
import org.apereo.cas.configuration.model.support.aws.BaseAmazonWebServicesProperties;
import org.apereo.cas.configuration.support.RequiresModule;
import com.fasterxml.jackson.annotation.JsonFilter;
import lombok.Getter;
import lombok.Setter;
import lombok.experimental.Accessors;
/**
* This is {@link AmazonSnsProperties}.
*
* @author <NAME>
* @since 5.3.0
*/
@RequiresModule(name = "cas-server-support-sms-aws-sns")
@Getter
@Setter
@Accessors(chain = true)
@JsonFilter("AmazonSnsProperties")
public class AmazonSnsProperties extends BaseAmazonWebServicesProperties {
private static final long serialVersionUID = -3366665169030844517L;
/**
* A custom ID that contains up to 11 alphanumeric characters, including at least one letter and no spaces.
*
* The sender ID is displayed as the message sender on the receiving device. For example, you can use your
* business brand to make the message source easier to recognize.
* Support for sender IDs varies by country and/or region. For example, messages delivered to
* U.S. phone numbers will not display the sender ID.
* If you do not specify a sender ID, the message will display a long code as the sender ID in
* supported countries and regions. For countries or regions that require an alphabetic sender ID,
* the message displays NOTICE as the sender ID.
*/
private String senderId;
/**
* The maximum amount in USD that you are willing to spend to send the SMS message.
*
* Amazon SNS will not send the message if it determines that doing so would incur a cost that exceeds the maximum price.
* This attribute has no effect if your month-to-date SMS costs have already exceeded the limit set
* for the MonthlySpendLimit attribute, which you set by using the SetSMSAttributes request.
* If you are sending the message to an Amazon SNS topic, the maximum price applies to each message
* delivery to each phone number that is subscribed to the topic.
*/
private String maxPrice;
/**
* The type of message that you are sending.
*
* <ul>
* <li>Promotional - Noncritical messages, such as marketing messages. Amazon
* SNS optimizes the message delivery to incur the lowest cost.</li>
* <li>Transactional – Critical messages that support customer transactions, such as one-time passcodes
* for multi-factor authentication. Amazon SNS optimizes the message delivery to achieve the highest reliability. </li>
* </ul>
*/
private String smsType = "Transactional";
}
| 852 |
348 | //
// PhotoDetailPipeline.h
// MinyaDemo
//
// Created by Konka on 2016/10/13.
// Copyright © 2016年 Minya. All rights reserved.
//
#import "MIPipeline.h"
#import "Photo.h"
@interface PhotoDetailPipeline : MIPipeline
// Normal data
@property (nonatomic, strong) Photo *photo;
// Flag data
@property (nonatomic, assign) BOOL flagRequestFinished;
// Input data
@property (nonatomic, assign) BOOL inputPrev;
@property (nonatomic, assign) BOOL inputNext;
@end
| 164 |
359 | /**
* Copyright 2020 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "matchbind.h"
#include <stdlib.h>
#include <stdio.h>
matchbind * matchbind_new(char * value)
{
matchbind * ret = (matchbind *)malloc(sizeof(matchbind));
ret->id = value;
ret->param_value = NULL;
ret->enumerator_value = NULL;
ret->enumtype_value = NULL;
ret->index = -1;
ret->stack_level = 0;
ret->line_no = 0;
return ret;
}
void matchbind_delete(matchbind * value)
{
if (value->id != NULL)
{
free(value->id);
}
free(value);
}
void matchbind_print(matchbind * value)
{
printf("matchbind %s\n", value->id);
}
matchbind_list_node * matchbind_list_node_new(matchbind * value)
{
matchbind_list_node * node =
(matchbind_list_node *)malloc(sizeof(matchbind_list_node));
node->value = value;
node->prev = NULL;
node->next = NULL;
return node;
}
void matchbind_list_node_delete(matchbind_list_node * node)
{
if (node->value)
{
matchbind_delete(node->value);
}
free(node);
}
matchbind_list * matchbind_list_new()
{
matchbind_list * list = (matchbind_list *)malloc(sizeof(matchbind_list));
list->count = 0;
list->head = NULL;
list->tail = NULL;
return list;
}
void matchbind_list_delete(matchbind_list * list)
{
matchbind_list_node * node = list->tail;
while (node != NULL)
{
matchbind_list_node * tmp = node->next;
matchbind_list_node_delete(node);
node = tmp;
}
free(list);
}
void matchbind_list_add_beg(matchbind_list * list, matchbind * value)
{
matchbind_list_node * node = matchbind_list_node_new(value);
list->count++;
if (list->head == NULL && list->tail == NULL)
{
list->head = list->tail = node;
}
else
{
list->tail->prev = node;
node->next = list->tail;
list->tail = node;
}
}
void matchbind_list_add_end(matchbind_list * list, matchbind * value)
{
matchbind_list_node * node = matchbind_list_node_new(value);
list->count++;
if (list->head == NULL && list->tail == NULL)
{
list->head = list->tail = node;
}
else
{
list->head->next = node;
node->prev = list->head;
list->head = node;
}
}
void matchbind_list_set_stack_level(matchbind_list * list, int stack_level)
{
matchbind_list_node * node = list->tail;
while (node != NULL)
{
matchbind * value = node->value;
if (value != NULL)
{
value->stack_level = stack_level;
}
node = node->next;
}
}
| 1,416 |
5,305 | <reponame>tanishiking/dotty
package repeatable;
import java.lang.annotation.*;
@Retention(RetentionPolicy.RUNTIME)
public @interface SecondLevel_0 {
FirstLevel_0[] value();
}
| 62 |
2,391 | <filename>tests/test_data_ava_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import contextlib
import pathlib
import random
import tempfile
import unittest
import torch
from pytorchvideo.data import Ava
from pytorchvideo.data.clip_sampling import make_clip_sampler
from utils import temp_frame_video
AVA_FPS = 30
@contextlib.contextmanager
def temp_ava_dataset_2_videos():
frame_names = [f"{str(i)}.png" for i in range(90)]
# Create csv containing 2 test frame videos.
with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as frames_file:
frames_file.write("original_vido_id video_id frame_id path labels\n".encode())
# Frame video 1
with temp_frame_video(frame_names) as (frame_1_video_dir, data_1):
for i, frame_name in enumerate(frame_names):
original_video_id_1 = str(frame_1_video_dir)
video_id = "1"
frame_id = str(i)
path = pathlib.Path(frame_1_video_dir) / frame_name
label = "0"
frames_file.write(
f"{original_video_id_1} {video_id} {frame_id} {path} {label}\n".encode()
)
# Frame video 2
with temp_frame_video(frame_names, height=5, width=5) as (
frame_2_video_dir,
data_2,
):
for i, frame_name in enumerate(frame_names):
original_video_id_2 = str(frame_2_video_dir)
video_id = "2"
frame_id = str(i)
path = pathlib.Path(frame_2_video_dir) / frame_name
label = "1"
frames_file.write(
f"{original_video_id_2} {video_id} {frame_id} {path} {label}\n".encode()
)
frames_file.close()
yield frames_file.name, data_1, data_2, original_video_id_1, original_video_id_2
def get_random_bbox():
bb_list = [round(random.random(), 3) for x in range(4)]
converted_list = [str(element) for element in bb_list]
return bb_list, ",".join(converted_list)
class TestAvaDataset(unittest.TestCase):
def test_multiple_videos(self):
with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as data_file:
with temp_ava_dataset_2_videos() as (
frame_paths_file,
video_1,
video_2,
video_1_name,
video_2_name,
):
# add bounding boxes
# video 1
bb_1_a, bb_1_a_string = get_random_bbox()
action_1_a, iou_1_a = 1, 0.85
bb_1_b, bb_1_b_string = get_random_bbox()
action_1_b, iou_1_b = 2, 0.4
data_file.write(
(
f"{video_1_name},902,{bb_1_a_string},"
+ f"{str(action_1_a)},{str(iou_1_a)}\n"
).encode()
)
data_file.write(
(
f"{video_1_name},902,{bb_1_b_string},"
+ f"{str(action_1_b)},{str(iou_1_b)}\n"
).encode()
)
# video 2
bb_2_a, bb_2_a_string = get_random_bbox()
action_2_a, iou_2_a = 3, 0.95
bb_2_b, bb_2_b_string = get_random_bbox()
action_2_b, iou_2_b = 4, 0.9
data_file.write(
(
f"{video_2_name},902,{bb_2_a_string},"
+ f"{str(action_2_a)},{str(iou_2_a)}\n"
).encode()
)
data_file.write(
(
f"{video_2_name},902,{bb_2_b_string},"
+ f"{str(action_2_b)},{str(iou_2_b)}\n"
).encode()
)
data_file.close()
dataset = Ava(
frame_paths_file=frame_paths_file,
frame_labels_file=data_file.name,
clip_sampler=make_clip_sampler("random", 1.0),
)
# All videos are of the form cthw and fps is 30
# Clip is samples at time step = 2 secs in video
sample_1 = next(dataset)
self.assertTrue(sample_1["video"].equal(video_1[:, 45:75, :, :]))
self.assertTrue(
torch.tensor(sample_1["boxes"]).equal(
torch.tensor([bb_1_a, bb_1_b])
)
)
self.assertTrue(
torch.tensor(sample_1["labels"]).equal(
torch.tensor([[action_1_a], [action_1_b]])
)
)
sample_2 = next(dataset)
self.assertTrue(sample_2["video"].equal(video_2[:, 45:75, :, :]))
self.assertTrue(
torch.tensor(sample_2["boxes"]).equal(
torch.tensor([bb_2_a, bb_2_b])
)
)
self.assertTrue(
torch.tensor(sample_2["labels"]).equal(
torch.tensor([[action_2_a], [action_2_b]])
)
)
def test_multiple_videos_with_label_map(self):
with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as label_map_file:
with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as data_file:
with temp_ava_dataset_2_videos() as (
frame_paths_file,
video_1,
video_2,
video_1_name,
video_2_name,
):
# Create labelmap file
label_map = """item {
name: "bend/bow (at the waist)"
id: 1
}
item {
name: "crouch/kneel"
id: 3
}
item {
name: "dance"
id: 4
}"""
label_map_file.write(label_map.encode())
label_map_file.close()
# add bounding boxes
# video 1
bb_1_a, bb_1_a_string = get_random_bbox()
action_1_a, iou_1_a = 1, 0.85
bb_1_b, bb_1_b_string = get_random_bbox()
action_1_b, iou_1_b = 2, 0.4
data_file.write(
(
f"{video_1_name},902,{bb_1_a_string},"
+ f"{str(action_1_a)},{str(iou_1_a)}\n"
).encode()
)
data_file.write(
(
f"{video_1_name},902,{bb_1_b_string},"
+ f"{str(action_1_b)},{str(iou_1_b)}\n"
).encode()
)
# video 2
bb_2_a, bb_2_a_string = get_random_bbox()
action_2_a, iou_2_a = 3, 0.95
bb_2_b, bb_2_b_string = get_random_bbox()
action_2_b, iou_2_b = 4, 0.9
data_file.write(
(
f"{video_2_name},902,{bb_2_a_string},"
+ f"{str(action_2_a)},{str(iou_2_a)}\n"
).encode()
)
data_file.write(
(
f"{video_2_name},902,{bb_2_b_string},"
+ f"{str(action_2_b)},{str(iou_2_b)}\n"
).encode()
)
data_file.close()
dataset = Ava(
frame_paths_file=frame_paths_file,
frame_labels_file=data_file.name,
clip_sampler=make_clip_sampler("random", 1.0),
label_map_file=label_map_file.name,
)
# All videos are of the form cthw and fps is 30
# Clip is samples at time step = 2 secs in video
sample_1 = next(dataset)
self.assertTrue(sample_1["video"].equal(video_1[:, 45:75, :, :]))
self.assertTrue(
torch.tensor(sample_1["boxes"]).equal(torch.tensor([bb_1_a]))
)
self.assertTrue(
torch.tensor(sample_1["labels"]).equal(
torch.tensor([[action_1_a]])
)
)
sample_2 = next(dataset)
self.assertTrue(sample_2["video"].equal(video_2[:, 45:75, :, :]))
self.assertTrue(
torch.tensor(sample_2["boxes"]).equal(
torch.tensor([bb_2_a, bb_2_b])
)
)
self.assertTrue(
torch.tensor(sample_2["labels"]).equal(
torch.tensor([[action_2_a], [action_2_b]])
)
)
| 5,918 |
375 | package io.lumify.benchmark;
import com.google.common.collect.ImmutableList;
import io.lumify.it.TestClassAndMethod;
import io.lumify.it.VertexTestBase;
import io.lumify.web.clientapi.LumifyApi;
import io.lumify.web.clientapi.codegen.ApiException;
import io.lumify.web.clientapi.codegen.VertexApi;
import io.lumify.web.clientapi.model.ClientApiVertex;
import io.lumify.web.clientapi.model.ClientApiVertexMultipleResponse;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
@Category(BenchmarkCategory.class)
public class VertexBenchmark extends VertexTestBase {
private TimedExecution timedExecution;
@Rule
public TestClassAndMethod testClassAndMethod = new TestClassAndMethod();
@Before
public void setUp() throws ApiException {
super.setUp();
timedExecution = new TimedExecution(testClassAndMethod);
}
@Test
public void benchmarkFindMultiple10Vertices10Properties() {
benchmarkFindMultipleVertices(10, 10, 60);
}
@Test
public void benchmarkFindMultiple100Vertices10Properties() {
benchmarkFindMultipleVertices(100, 10, 250);
}
private void benchmarkFindMultipleVertices(int numVertices, int numPropertiesPerVertex, long maxTimeMillis) {
try {
final List<String> allVertexVisibilities = ImmutableList.of("a", "b", "c");
final List<String> allPropertyVisibilities = ImmutableList.of("x", "y", "z");
final List<String> allVertexIds = createVertices(
numVertices, allVertexVisibilities,
numPropertiesPerVertex, allPropertyVisibilities);
String setupWorkspaceId = setupLumifyApi.getCurrentWorkspaceId();
LumifyApi lumifyApi = login(USERNAME_TEST_USER_2);
List<String> userVertexAuthorizations = allVertexVisibilities.subList(0, 2);
List<String> userPropertyAuthorizations = allPropertyVisibilities.subList(0, 2);
List<String> allUserAuthorizations = new ArrayList<>();
allUserAuthorizations.addAll(userVertexAuthorizations);
allUserAuthorizations.addAll(userPropertyAuthorizations);
allUserAuthorizations.add(setupWorkspaceId);
addUserAuths(lumifyApi, USERNAME_TEST_USER_2,
allUserAuthorizations.toArray(new String[allUserAuthorizations.size()]));
final VertexApi vertexApi = lumifyApi.getVertexApi();
TimedExecution.Result<ClientApiVertexMultipleResponse> timedResponse = timedExecution.call(
new Callable<ClientApiVertexMultipleResponse>() {
public ClientApiVertexMultipleResponse call() throws Exception {
return vertexApi.findMultiple(allVertexIds, false);
}
});
List<ClientApiVertex> vertices = timedResponse.result.getVertices();
int expectedVertexCount = (numVertices * userVertexAuthorizations.size() / allVertexVisibilities.size()) +
numVertices % allVertexVisibilities.size();
assertEquals(expectedVertexCount, vertices.size());
assertThat(timedResponse.timeMillis, lessThanOrEqualTo(maxTimeMillis));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 1,460 |
931 | package dev.engine.json;
import java.lang.reflect.Type;
/**
* detail: JSON Engine 接口
* @author Ttt
*/
public interface IJSONEngine<Config extends IJSONEngine.EngineConfig> {
/**
* detail: JSON Config
* @author Ttt
*/
class EngineConfig {
}
// ==========
// = 转换方法 =
// ==========
/**
* 将对象转换为 JSON String
* @param object {@link Object}
* @return JSON String
*/
String toJson(Object object);
/**
* 将对象转换为 JSON String
* @param object {@link Object}
* @param config 配置信息
* @return JSON String
*/
String toJson(
Object object,
Config config
);
// =
/**
* 将 JSON String 映射为指定类型对象
* @param json JSON String
* @param classOfT {@link Class} T
* @param <T> 泛型
* @return instance of type
*/
<T> T fromJson(
String json,
Class<T> classOfT
);
/**
* 将 JSON String 映射为指定类型对象
* @param json JSON String
* @param classOfT {@link Class} T
* @param config 配置信息
* @param <T> 泛型
* @return instance of type
*/
<T> T fromJson(
String json,
Class<T> classOfT,
Config config
);
// =
/**
* 将 JSON String 映射为指定类型对象
* @param json JSON String
* @param typeOfT {@link Type} T
* @param <T> 泛型
* @return instance of type
*/
<T> T fromJson(
String json,
Type typeOfT
);
/**
* 将 JSON String 映射为指定类型对象
* @param json JSON String
* @param typeOfT {@link Type} T
* @param config 配置信息
* @param <T> 泛型
* @return instance of type
*/
<T> T fromJson(
String json,
Type typeOfT,
Config config
);
// ==========
// = 其他方法 =
// ==========
/**
* 判断字符串是否 JSON 格式
* @param json 待校验 JSON String
* @return {@code true} yes, {@code false} no
*/
boolean isJSON(String json);
/**
* 判断字符串是否 JSON Object 格式
* @param json 待校验 JSON String
* @return {@code true} yes, {@code false} no
*/
boolean isJSONObject(String json);
/**
* 判断字符串是否 JSON Array 格式
* @param json 待校验 JSON String
* @return {@code true} yes, {@code false} no
*/
boolean isJSONArray(String json);
/**
* JSON String 缩进处理
* @param json JSON String
* @return JSON String
*/
String toJsonIndent(String json);
/**
* JSON String 缩进处理
* @param json JSON String
* @param config 配置信息
* @return JSON String
*/
String toJsonIndent(
String json,
Config config
);
// =
/**
* Object 转 JSON String 并进行缩进处理
* @param object {@link Object}
* @return JSON String
*/
String toJsonIndent(Object object);
/**
* Object 转 JSON String 并进行缩进处理
* @param object {@link Object}
* @param config 配置信息
* @return JSON String
*/
String toJsonIndent(
Object object,
Config config
);
} | 1,739 |
2,792 | # # -*- coding:utf-8 -*-
# Author:wancong
# Date: 2018-04-30
from pyhanlp import *
def demo_tokenizer_config(text):
""" 演示动态设置预置分词器,这里的设置是全局的
>>> text = "泽田依子是上外日本文化经济学院的外教"
>>> demo_tokenizer_config(text)
[泽田依/nr, 子/ng, 是/vshi, 上外/nit, 日本/ns, 文化/n, 经济学院/nit, 的/ude1, 外教/n]
[泽田依子/nrj, 是/vshi, 上外日本文化经济学院/nt, 的/ude1, 外教/n]
"""
StandardTokenizer = JClass("com.hankcs.hanlp.tokenizer.StandardTokenizer")
print(StandardTokenizer.segment(text))
StandardTokenizer.SEGMENT.enableAllNamedEntityRecognize(True)
print(StandardTokenizer.segment(text))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 421 |
629 | /**
* Copyright (c) 2017-present, Facebook, Inc.
* All rights reserved.
*/
#include "common.h"
#include "flags.h"
namespace microbattles {
std::tuple<float, float, float, float> getUnitCountsHealth(
cherrypi::State* state) {
auto allies = state->unitsInfo().myUnits();
auto enemies = state->unitsInfo().enemyUnits();
float allyCount = allies.size();
float enemyCount = enemies.size();
float allyHp = 0;
float enemyHp = 0;
for (auto& ally : allies) {
allyHp += ally->unit.health + ally->unit.shield; // Include shield in HP
}
for (auto& enemy : enemies) {
enemyHp += enemy->unit.health + enemy->unit.shield;
}
return std::make_tuple(allyCount, enemyCount, allyHp, enemyHp);
}
double getMovementRadius(cherrypi::Unit* u) {
return u->topSpeed * FLAGS_frame_skip * 3 +
std::max(u->unit.pixel_size_x, u->unit.pixel_size_y) / 2. /
cherrypi::tc::BW::XYPixelsPerWalktile;
}
at::Device defaultDevice() {
return FLAGS_gpu ? torch::Device("cuda") : torch::Device("cpu");
}
} // namespace microbattles
| 381 |
852 | <gh_stars>100-1000
#include "Alignment/LaserAlignment/interface/LASConstants.h"
#include "FWCore/Utilities/interface/Exception.h"
///
///
///
LASConstants::LASConstants() : atRadius(0.), tecBsZPosition(0.), atZPosition(0.) {}
///
///
///
LASConstants::LASConstants(std::vector<edm::ParameterSet> const& theConstConf) {
InitContainers();
for (std::vector<edm::ParameterSet>::const_iterator iter = theConstConf.begin(); iter < theConstConf.end(); ++iter) {
if (iter->getParameter<std::string>("PSetName") == "BeamsplitterKinks")
FillBsKinks(*iter);
else if (iter->getParameter<std::string>("PSetName") == "Radii")
FillRadii(*iter);
else if (iter->getParameter<std::string>("PSetName") == "ZPositions")
FillZPositions(*iter);
else {
std::cerr << " [] ** WARNING: Cannot process unknown parameter set named: "
<< iter->getParameter<std::string>("PSetName") << "." << std::endl;
}
}
}
///
///
///
LASConstants::~LASConstants() {}
///
/// Returns one beamsplitter kink, parameters are:
/// det (0=TEC+/1=TEC-)
/// ring (0=R4/1=R6)
/// beam (0..7)
///
double LASConstants::GetEndcapBsKink(unsigned int det, unsigned int ring, unsigned int beam) const {
if (!((det == 0 || det == 1) && (ring == 0 || ring == 1) && (beam < 8U))) { // beam >= 0, since beam is unsigned
throw cms::Exception(" [LASConstants::GetEndcapBsKink]")
<< " ** ERROR: no such element: det " << det << ", ring " << ring << ", beam " << beam << "." << std::endl;
}
return endcapBsKinks.at(det).at(ring).at(beam);
}
///
/// Returns beamplitter kink for alignment tube beam <beam> (0..7)
///
double LASConstants::GetAlignmentTubeBsKink(unsigned int beam) const {
if (beam >= 8U) { // beam >= 0, since beam is unsigned
throw cms::Exception(" [LASConstants::GetAlignmentTubeBsKink]")
<< " ** ERROR: no such beam: " << beam << "." << std::endl;
}
return alignmentTubeBsKinks.at(beam);
}
///
///
///
double LASConstants::GetTecRadius(unsigned int ring) const {
if (ring > 1U) { // ring >= 0, since ring is unsigned
throw cms::Exception(" [LASConstants::GetTecRadius]") << " ** ERROR: no such ring: " << ring << "." << std::endl;
}
return tecRadii.at(ring);
}
///
///
///
double LASConstants::GetAtRadius(void) const { return atRadius; }
///
///
///
double LASConstants::GetTecZPosition(unsigned int det, unsigned int disk) const {
if ((det > 1) || (disk > 8)) {
throw cms::Exception(" [LASConstants::GetTecZPosition]")
<< " ** ERROR: no such element: det " << det << ", disk " << disk << "." << std::endl;
}
if (det == 0)
return tecZPositions.at(disk); // tec+
else
return -1. * tecZPositions.at(disk); // tec-
}
///
///
///
double LASConstants::GetTibZPosition(unsigned int pos) const {
if (pos > 5) {
throw cms::Exception(" [LASConstants::GetTibZPosition]")
<< " ** ERROR: no such position: " << pos << "." << std::endl;
}
return tibZPositions.at(pos);
}
///
///
///
double LASConstants::GetTobZPosition(unsigned int pos) const {
if (pos > 5) {
throw cms::Exception(" [LASConstants::GetTobZPosition]")
<< " ** ERROR: no such position: " << pos << "." << std::endl;
}
return tobZPositions.at(pos);
}
///
///
///
double LASConstants::GetTecBsZPosition(unsigned int det) const { return tecBsZPosition; }
///
///
///
double LASConstants::GetAtBsZPosition(void) const { return atZPosition; }
///
///
///
void LASConstants::InitContainers(void) {
// beam splitter kinks
endcapBsKinks.resize(2); // create two dets
for (int det = 0; det < 2; ++det) {
endcapBsKinks.at(det).resize(2); // create two rings per det
for (int ring = 0; ring < 2; ++ring) {
endcapBsKinks.at(det).at(ring).resize(8); // 8 beams per ring
}
}
alignmentTubeBsKinks.resize(8); // 8 beams
// radii
tecRadii.resize(2);
// z positions
tecZPositions.resize(9);
tibZPositions.resize(6);
tobZPositions.resize(6);
}
///
/// fill the beamplitter-kink related containers
///
void LASConstants::FillBsKinks(edm::ParameterSet const& theBsKinkConf) {
// tec+
endcapBsKinks.at(0).at(0) = theBsKinkConf.getParameter<std::vector<double> >("LASTecPlusRing4BsKinks");
endcapBsKinks.at(0).at(1) = theBsKinkConf.getParameter<std::vector<double> >("LASTecPlusRing6BsKinks");
// apply global offsets
for (unsigned int ring = 0; ring < 2; ++ring) {
for (unsigned int beam = 0; beam < 8; ++beam) {
endcapBsKinks.at(0).at(ring).at(beam) += theBsKinkConf.getParameter<double>("TecPlusGlobalOffset");
}
}
// tec-
endcapBsKinks.at(1).at(0) = theBsKinkConf.getParameter<std::vector<double> >("LASTecMinusRing4BsKinks");
endcapBsKinks.at(1).at(1) = theBsKinkConf.getParameter<std::vector<double> >("LASTecMinusRing6BsKinks");
// apply global offsets
for (unsigned int ring = 0; ring < 2; ++ring) {
for (unsigned int beam = 0; beam < 8; ++beam) {
endcapBsKinks.at(1).at(ring).at(beam) += theBsKinkConf.getParameter<double>("TecMinusGlobalOffset");
}
}
// at
alignmentTubeBsKinks = theBsKinkConf.getParameter<std::vector<double> >("LASAlignmentTubeBsKinks");
}
///
/// fill the beam radii
///
void LASConstants::FillRadii(edm::ParameterSet const& theRadiiConf) {
tecRadii = theRadiiConf.getParameter<std::vector<double> >("LASTecRadius");
atRadius = theRadiiConf.getParameter<double>("LASAtRadius");
}
///
///
///
void LASConstants::FillZPositions(edm::ParameterSet const& theZPosConf) {
tecZPositions = theZPosConf.getParameter<std::vector<double> >("LASTecZPositions");
tibZPositions = theZPosConf.getParameter<std::vector<double> >("LASTibZPositions");
tobZPositions = theZPosConf.getParameter<std::vector<double> >("LASTobZPositions");
tecBsZPosition = theZPosConf.getParameter<double>("LASTecBeamSplitterZPosition");
atZPosition = theZPosConf.getParameter<double>("LASAtBeamsplitterZPosition");
}
| 2,279 |
852 | #include "__subsys__/__class__/src/__class__.h"
| 21 |
3,428 | {"id":"00473","group":"easy-ham-2","checksum":{"type":"MD5","value":"b184f51963f4482e41483fde5223b37f"},"text":"From <EMAIL> Tue Aug 20 12:43:52 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 6607743C34\n\tfor <jm@localhost>; Tue, 20 Aug 2002 07:43:52 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Tue, 20 Aug 2002 12:43:52 +0100 (IST)\nReceived: from lugh.tuatha.org (<EMAIL> [172.16.17.325]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g7KBjIZ04448 for\n <<EMAIL>>; Tue, 20 Aug 2002 12:45:18 +0100\nReceived: from lugh (root@localhost [127.0.0.1]) by lugh.tuatha.org\n (8.9.3/8.9.3) with ESMTP id MAA00734; Tue, 20 Aug 2002 12:44:07 +0100\nReceived: from milexc01.maxtor.com ([134.6.205.206]) by lugh.tuatha.org\n (8.9.3/8.9.3) with ESMTP id MAA00681 for <<EMAIL>>; Tue,\n 20 Aug 2002 12:43:50 +0100\nX-Authentication-Warning: lugh.tuatha.org: Host [192.168.127.12] claimed to\n be milexc01.maxtor.com\nReceived: by milexc01.maxtor.com with Internet Mail Service (5.5.2653.19)\n id <RHPLX0WP>; Tue, 20 Aug 2002 12:43:47 +0100\nMessage-Id: <<EMAIL>>\nFrom: \"<NAME>\" <<EMAIL>>\nTo: \"'<EMAIL>'\" <<EMAIL>>\nSubject: RE: [ILUG] mirroring on a running system\nDate: Tue, 20 Aug 2002 12:43:46 +0100\nMIME-Version: 1.0\nX-Mailer: Internet Mail Service (5.5.2653.19)\nContent-Type: text/plain\nSender: [email protected]\nErrors-To: [email protected]\nX-Mailman-Version: 1.1\nPrecedence: bulk\nList-Id: Irish Linux Users' Group <ilug.linux.ie>\nX-Beenthere: [email protected]\n\nSound. I'll be there at 7ish so. \n\nCW\n\n-----------\n I've to leave the girlfriend home first. So, I'll be there sometime just\nafter seven.\n\nKate\n\n-- \nIrish Linux Users' Group: <EMAIL>\nhttp://www.linux.ie/mailman/listinfo/ilug for (un)subscription information.\nList maintainer: <EMAIL>\n\n"} | 862 |
4,012 | <filename>cpp/src/io/utilities/hostdevice_vector.hpp
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
/**
* @brief A helper class that wraps fixed-length device memory for the GPU, and
* a mirror host pinned memory for the CPU.
*
* This abstraction allocates a specified fixed chunk of device memory that can
* initialized upfront, or gradually initialized as required.
* The host-side memory can be used to manipulate data on the CPU before and
* after operating on the same data on the GPU.
*/
template <typename T>
class hostdevice_vector {
public:
using value_type = T;
hostdevice_vector() {}
hostdevice_vector(hostdevice_vector&& v) { move(std::move(v)); }
hostdevice_vector& operator=(hostdevice_vector&& v)
{
move(std::move(v));
return *this;
}
explicit hostdevice_vector(size_t max_size,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
: hostdevice_vector(max_size, max_size, stream)
{
}
explicit hostdevice_vector(size_t initial_size,
size_t max_size,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
: num_elements(initial_size), max_elements(max_size)
{
if (max_elements != 0) {
CUDA_TRY(cudaMallocHost(&h_data, sizeof(T) * max_elements));
d_data.resize(sizeof(T) * max_elements, stream);
}
}
~hostdevice_vector()
{
if (max_elements != 0) {
auto const free_result = cudaFreeHost(h_data);
assert(free_result == cudaSuccess);
}
}
bool insert(const T& data)
{
if (num_elements < max_elements) {
h_data[num_elements] = data;
num_elements++;
return true;
}
return false;
}
size_t max_size() const noexcept { return max_elements; }
size_t size() const noexcept { return num_elements; }
size_t memory_size() const noexcept { return sizeof(T) * num_elements; }
T& operator[](size_t i) const { return h_data[i]; }
T* host_ptr(size_t offset = 0) const { return h_data + offset; }
T* begin() { return h_data; }
T* end() { return h_data + num_elements; }
T* d_begin() { return static_cast<T*>(d_data.data()); }
T* d_end() { return static_cast<T*>(d_data.data()) + num_elements; }
T* device_ptr(size_t offset = 0) { return reinterpret_cast<T*>(d_data.data()) + offset; }
T const* device_ptr(size_t offset = 0) const
{
return reinterpret_cast<T const*>(d_data.data()) + offset;
}
operator cudf::device_span<T>() { return {device_ptr(), max_elements}; }
operator cudf::device_span<T const>() const { return {device_ptr(), max_elements}; }
operator cudf::host_span<T>() { return {h_data, max_elements}; }
operator cudf::host_span<T const>() const { return {h_data, max_elements}; }
void host_to_device(rmm::cuda_stream_view stream, bool synchronize = false)
{
CUDA_TRY(cudaMemcpyAsync(
d_data.data(), h_data, memory_size(), cudaMemcpyHostToDevice, stream.value()));
if (synchronize) { stream.synchronize(); }
}
void device_to_host(rmm::cuda_stream_view stream, bool synchronize = false)
{
CUDA_TRY(cudaMemcpyAsync(
h_data, d_data.data(), memory_size(), cudaMemcpyDeviceToHost, stream.value()));
if (synchronize) { stream.synchronize(); }
}
private:
void move(hostdevice_vector&& v)
{
stream = v.stream;
max_elements = v.max_elements;
num_elements = v.num_elements;
h_data = v.h_data;
d_data = std::move(v.d_data);
v.max_elements = 0;
v.num_elements = 0;
v.h_data = nullptr;
}
rmm::cuda_stream_view stream{};
size_t max_elements{};
size_t num_elements{};
T* h_data{};
rmm::device_buffer d_data{};
};
namespace cudf {
namespace detail {
/**
* @brief Wrapper around hostdevice_vector to enable two-dimensional indexing.
*
* Does not incur additional allocations.
*/
template <typename T>
class hostdevice_2dvector {
public:
hostdevice_2dvector(size_t rows,
size_t columns,
rmm::cuda_stream_view stream = rmm::cuda_stream_default)
: _size{rows, columns}, _data{rows * columns, stream}
{
}
operator device_2dspan<T>() { return {_data.device_ptr(), _size}; }
operator device_2dspan<T const>() const { return {_data.device_ptr(), _size}; }
device_2dspan<T> device_view() { return static_cast<device_2dspan<T>>(*this); }
device_2dspan<T> device_view() const { return static_cast<device_2dspan<T const>>(*this); }
operator host_2dspan<T>() { return {_data.host_ptr(), _size}; }
operator host_2dspan<T const>() const { return {_data.host_ptr(), _size}; }
host_2dspan<T> host_view() { return static_cast<host_2dspan<T>>(*this); }
host_2dspan<T> host_view() const { return static_cast<host_2dspan<T const>>(*this); }
host_span<T> operator[](size_t row)
{
return {_data.host_ptr() + host_2dspan<T>::flatten_index(row, 0, _size), _size.second};
}
host_span<T const> operator[](size_t row) const
{
return {_data.host_ptr() + host_2dspan<T>::flatten_index(row, 0, _size), _size.second};
}
auto size() const noexcept { return _size; }
auto count() const noexcept { return _size.first * _size.second; }
T* base_host_ptr(size_t offset = 0) { return _data.host_ptr(offset); }
T* base_device_ptr(size_t offset = 0) { return _data.device_ptr(offset); }
T const* base_host_ptr(size_t offset = 0) const { return _data.host_ptr(offset); }
T const* base_device_ptr(size_t offset = 0) const { return _data.device_ptr(offset); }
size_t memory_size() const noexcept { return _data.memory_size(); }
void host_to_device(rmm::cuda_stream_view stream, bool synchronize = false)
{
_data.host_to_device(stream, synchronize);
}
void device_to_host(rmm::cuda_stream_view stream, bool synchronize = false)
{
_data.device_to_host(stream, synchronize);
}
private:
hostdevice_vector<T> _data;
typename host_2dspan<T>::size_type _size;
};
} // namespace detail
} // namespace cudf
| 2,577 |
1,909 | <filename>xchange-core/src/main/java/org/knowm/xchange/service/trade/params/CancelOrderByInstrument.java
package org.knowm.xchange.service.trade.params;
import org.knowm.xchange.instrument.Instrument;
public interface CancelOrderByInstrument extends CancelOrderParams {
public Instrument getInstrument();
}
| 95 |
1,826 | package com.vladsch.flexmark.ext.escaped.character;
| 18 |
488 | #ifndef AST_GRAPH_H
#define AST_GRAPH_H
#include "DOTRepresentation.h"
// AST Graph support developed by Andreas
// template <class T> class DOTRepresentation;
namespace AST_Graph {
//options for handling null pointers
enum pointerHandling
{
graph_NULL,
do_not_graph_NULL
};
//options for traversal types
enum traversalType
{
memory_pool_traversal,
whole_graph_AST
};
//BEGIN MY_PAIR
/***********************************************************************************
* Define custom return type from the filter functionals. This is an experiment
* to see if a std::pair like data structure where the variables names are not
* std::pair<>::first and std::pair<>::second. This data structure is expected to
* change as the code develops.
***********************************************************************************/
template <class _T1, class _T2, class _T3>
struct my_pair {
typedef _T1 first_type; ///< @c first_type is the first bound type
typedef _T2 second_type; ///< @c second_type is the second bound type
typedef _T2 third_type; ///< @c second_type is the second bound type
_T1 addToGraph; ///< @c first is a copy of the first object
_T2 DOTOptions; ///< @c second is a copy of the second object
_T3 DOTLabel;
/** The default constructor creates @c first and @c second using their
* respective default constructors. */
my_pair() : addToGraph(), DOTOptions() , DOTLabel(){}
/** Two objects may be passed to a @c my_pair constructor to be copied. */
my_pair(const _T1& a, const _T2& b, const _T3& c) : addToGraph(a), DOTOptions(b), DOTLabel(c) {}
/** There is also a templated copy ctor for the @c my_pair class itself. */
template <class _U1, class _U2, class _U3>
my_pair(const my_pair<_U1, _U2,_U3>& p) : addToGraph(p.addToGraph), DOTOptions(p.DOTOptions), DOTLabel(p.label) {}
};
/// Two my_pairs of the same type are equal iff their members are equal.
template <class _T1, class _T2, class _T3>
inline bool operator==(const my_pair<_T1, _T2,_T3>& x, const my_pair<_T1, _T2,_T3>& y)
{
return x.addToGraph == y.addToGraph && x.DOTOptions == y.DOTOptions && x.DOTLabel == y.DOTLabel;
}
/// <http://gcc.gnu.org/onlinedocs/libstdc++/20_util/howto.html#my_pairlt>
template <class _T1, class _T2, class _T3>
inline bool operator<(const my_pair<_T1, _T2,_T3>& x, const my_pair<_T1, _T2,_T3>& y)
{
return x.addToGraph < y.addToGraph ||
(!(y.addToGraph < x.addToGraph) && x.DOTOptions < y.DOTOptions);
}
/// Uses @c operator== to find the result.
template <class _T1, class _T2, class _T3>
inline bool operator!=(const my_pair<_T1, _T2, _T3>& x, const my_pair<_T1, _T2,_T3>& y) {
return !(x == y);
}
/// Uses @c operator< to find the result.
template <class _T1, class _T2, class _T3>
inline bool operator>(const my_pair<_T1, _T2, _T3>& x, const my_pair<_T1, _T2,_T3>& y) {
return y < x;
}
/// Uses @c operator< to find the result.
template <class _T1, class _T2, class _T3>
inline bool operator<=(const my_pair<_T1, _T2, _T3>& x, const my_pair<_T1, _T2,_T3>& y) {
return !(y < x);
}
/// Uses @c operator< to find the result.
template <class _T1, class _T2, class _T3>
inline bool operator>=(const my_pair<_T1, _T2, _T3>& x, const my_pair<_T1, _T2,_T3>& y) {
return !(x < y);
}
//END MY_PAIR
// Functor implemtation to support filtering of the generated AST graphs
// Build a simpler type to simplify the code
typedef std::pair<SgNode*,std::string> NodeType;
#if 0
typedef struct{
bool addToGraph;
std::string DOTOptions;
} FunctionalReturnType;
#endif
typedef my_pair<bool,std::string,std::string> FunctionalReturnType;
typedef std::vector<NodeType> NodeTypeVector;
/***************************************************************************************
* The functional
* struct defaultFilterUnary
* is an example filter on nodes.
**************************************************************************************/
struct ROSE_DLL_API defaultFilterUnary: public std::unary_function<NodeType,FunctionalReturnType >
{
// This functor filters SgFileInfo objects and IR nodes from the GNU compatability file
result_type operator() (argument_type x );
};
/***************************************************************************************
* The functional
* struct defaultFilterBinary
* is an example filter on edges.
**************************************************************************************/
struct ROSE_DLL_API defaultFilterBinary: public std::binary_function<SgNode*,NodeType,FunctionalReturnType >
{
// This functor filters SgFileInfo objects and IR nodes from the GNU compatability file
result_type operator() ( first_argument_type x, second_argument_type y);
};
// This functor is derived from the STL functor mechanism
struct nodePartOfGraph: public std::unary_function< std::pair< SgNode*, std::string>&,FunctionalReturnType >
{
result_type operator() ( argument_type x );
};
// This functor is derived from the STL functor mechanism
struct filterSgFileInfo: public std::unary_function< std::pair< SgNode*, std::string>&,FunctionalReturnType >
{
// This functor filters SgFileInfo objects from being built in the generated graph
result_type operator() ( argument_type x );
};
// This functor is derived from the STL functor mechanism
struct filterSgFileInfoAndGnuCompatabilityNode: public std::unary_function< std::pair< SgNode*, std::string>&, FunctionalReturnType >
{
// This functor filters SgFileInfo objects and IR nodes from the GNU compatability file
result_type operator() ( argument_type x );
};
// DQ (2/23/2006): Andreas' new work
/*******************************************************************************************************************************
* The class
* class DataMemberPointersToIR
* implements two different ways of constructing a graph from the AST.
* * The memory pool traversal which is why the class inherits from ROSE_VisitTraversal
* * Whole AST traversal through traversing on nodes
* This design decision is peculiar out from a inheritance perspective, but makes sence to
* bridge between the iheritance style of Marcus and an STL algorithm style. Caveats:
* * one member functions is only used by the memory pool traversal
* -visit(..)
* The member function generateGraphFromMemoryPool(..) is required to achieve
* STL style algorithms where the bace-class uses implemenatation by virtual functions.
* Great things:
* * avoid duplicate implementation
* * allows a very simple implementation
******************************************************************************************************************************/
template<typename Functional1, typename Functional2>
class DataMemberPointersToIR: public DOTRepresentation<SgNode*>, private ROSE_VisitTraversal
{
public:
DataMemberPointersToIR(const Functional1& addNodeFunctional, const Functional2& addEdgeFunctional,traversalType tT, pointerHandling graphEmpty);
virtual ~DataMemberPointersToIR(){};
private:
//Every node which is graphed is put into the NodeExists list to avoid graphing it twice.
std::list<SgNode*> NodeExists;
//In order to use the memory pool traversal as an STL style algorithm these
//two variables must be introduced. For the whole AST traversal they are NULL.
//These are the functionals which impelemts the conditions on which nodes and edges
//are filtered out
Functional1 nodeFunctional;
Functional2 edgeFunctional;
//Define which traversal type is used
traversalType whichTraversal;
//Specify if NULL pointers should be represented in the graph
pointerHandling graphNull;
public:
//Generates a graph from the AST using either the whole graph traversal or the
//memory pool traversal. The 'depth' paramater is ONLY VALID FOR THE WHOLE AST
//TRAVERSAL. In the case of the memory pool traversal set (depth<0).
//If the 'depth' paramater is
// * (depth<0) then recursively follow all pointers in 'graphNode',
// the nodes 'graphNode' points to etc.
// * (depth>0) then follow pointers just depth steps out from 'graphNode'
void generateGraph(SgNode* graphNode, int depth);
private:
//Implementation of the virtual visit function for the memory pool traversal
//ONLY VALID FOR THE MEMORY POOL TRAVERSAL.
void visit ( SgNode* node);
};
/*********************************************************************************************
* The function
* void AST_Graph::writeGraphOfMemoryPoolToFile(std::string filename, Functional1,
* Functional2, bool graphNullPointers);
* will output a graph of the whole memory pool to a new file called 'filename'. The second argument
* is a custom functional on the form
* unary_function<std::pair<SgNode*,std:string>,bool >
* where the first template argument is a node and it's name while the second template argument is the return
* type (see defaultFilterUnary for an example).
* The third argument is a custom functional to filter edges on the form
* binary_function<SgNode*,std::pair<SgNode*,std:string>,bool >
* where the edge goes from the vertex in the first template argument to the vertex in the second template
* argument (see defaultFilterBinary for an example).
*
* If the third argument is true a node and an edge is made to any NULL pointer. If the third argument
* is true the nodes and edges representing NULL pointers are filtered out (default).
**********************************************************************************************/
template<typename Functional1, typename Functional2>
ROSE_DLL_API void writeGraphOfMemoryPoolToFile(std::string filename, AST_Graph::pointerHandling, Functional1, Functional2);
/*********************************************************************************************
* The function
* void writeGraphOfMemoryPoolToFile(std::string filename, bool graphNullPointers = false);
* will output a graph of the whole memory pool to a new file called 'filename'. This function
* does exactly the same as the function
* void AST_Graph::writeGraphOfMemoryPoolToFile(std::string filename, Functional1,
* Functional2, bool graphNullPointers);
* except there is no filters Filter1 and Filter2 on nodes and edges.
**********************************************************************************************/
ROSE_DLL_API void writeGraphOfMemoryPoolToFile(std::string filename, AST_Graph::pointerHandling);
/*********************************************************************************************
* The function
* void AST_Graph::writeGraphOfMemoryPoolToFile(std::string filename, Functional1, bool graphNullPointers);
* will output a graph of the whole memory pool to a new file called 'filename'. This function
* does exactly the same as the function
* void AST_Graph::writeGraphOfMemoryPoolToFile(std::string filename, Functional1,
* Functional2, bool graphNullPointers);
* except there is no filter Filter2 so all edges are kept.
**********************************************************************************************/
template<typename Functional1>
ROSE_DLL_API void writeGraphOfMemoryPoolToFile(std::string filename, AST_Graph::pointerHandling, Functional1);
/*************************************************************************************************
* The functon
* void writeGraphOfAstSubGraphToFile(std::string filename, SgNode*, Functional1, Functional2, int, bool);
* will output a graph of the subgraph of the AST SgNode in the second argument into a file 'filename'.
* The third arguemtn is a custom functional on the form
* unary_function<std::pair<SgNode*,std:string>,bool >
* where the first template argument is a node and it's name while the second template argument is the return
* type (see defaultFilterUnary for an example).
* The fourth argument is a custom functional to filter edges on the form
* binary_function<SgNode*,std::pair<SgNode*,std:string>,bool >
* where the edge goes from the vertex in the first template argument to the vertex in the second template
* argument (see defaultFilterBinary for an example).
*
* If the third argument is true a node and an edge is made to any NULL pointer. If the third argument
* is true the nodes and edges representing NULL pointers are filtered out (default).
**********************************************************************************************/
template<typename Functional1, typename Functional2>
ROSE_DLL_API void writeGraphOfAstSubGraphToFile(std::string filename, SgNode* node, AST_Graph::pointerHandling, Functional1 addNode, Functional2 addEdge, int depth = -1 );
/************************************************************************************************************
* The function
* void writeGraphOfAstSubGraphToFile(std::string filename,SgNode*, Functional1, int depth, bool);
* does the same as the function
* void writeGraphOfAstSubGraphToFile(std::string,SgNode*, Functional1, Functional2, int, bool);
* except there is no filter Functional2 to filter on edges.
************************************************************************************************************/
template<typename Functional1>
ROSE_DLL_API void writeGraphOfAstSubGraphToFile(std::string filename, SgNode* node, AST_Graph::pointerHandling, Functional1 addNode, int depth = -1);
/************************************************************************************************************
* The function
* void writeGraphOfAstSubGraphToFile(std::string filename,SgNode*, int depth, bool);
* does the same as the function
* void writeGraphOfAstSubGraphToFile(std::string,SgNode*, Functional1, Functional2, int, bool);
* except there is no filters Functional1 and Functional2 to filter on nodes and edges.
************************************************************************************************************/
ROSE_DLL_API void writeGraphOfAstSubGraphToFile(std::string filename, SgNode* node, AST_Graph::pointerHandling graphNullPointers,int depth = -1 );
} //End namespace AST_Graph
// DQ (2/22/2006): Include the template functions implementing the
// generation of graphs of the AST (uses memory pools to see all AST IR nodes).
#include "astGraphTemplateImpl.h"
#endif
| 4,230 |
428 | /**
* Copyright 2008 - 2019 The Loon Game Engine Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* @project loon
* @author cping
* @email:<EMAIL>
* @version 0.5
*/
package loon.component.layout;
public class JsonTemplate {
public final static String LAYOUY_TYPE = "type";
public final static String LAYOUY_CHILD = "child";
public final static String LAYOUY_PROPS = "props";
public final static String LAYOUY_TEXT = "text";
public final static String LAYOUY_CODE = "code";
public final static String LAYOUY_COLOR = "color";
public final static String LAYOUY_VAR = "var";
public final static String LAYOUY_ALIGN = "align";
public final static String LAYOUY_FONT_SIZE = "font.size";
public final static String COMP_BTN = "button";
public final static String COMP_BTN_IMG = "button.image";
public final static String COMP_PAPER = "paper";
public final static String COMP_PROGRESS = "progress";
public final static String COMP_TOAST = "toast";
public final static String COMP_LABEL = "label";
public final static String COMP_LAYER = "layer";
public final static String COMP_SELECT = "select";
public final static String COMP_MENU = "menu";
public final static String COMP_MENU_SELECT = "menu.select";
public final static String COMP_TEXTAREA = "textarea";
public final static String COMP_CHECK = "check";
public final static String COMP_MESSAGE = "message";
public final static String COMP_MESSAGEBOX = "message.box";
public final static String COMP_TEXT_FIELD = "text";
public final static String SPR_SPRITE = "sprite";
public final static String SPR_ENTITY = "entity";
}
| 629 |
1,156 | <filename>reagent/training/rl_trainer_pytorch.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Optional
from reagent.core.parameters import RLParameters
logger = logging.getLogger(__name__)
# pyre-fixme[13]: Attribute `rl_parameters` is never initialized.
class RLTrainerMixin:
# Q-value for action that is not possible. Guaranteed to be worse than any
# legitimate action
ACTION_NOT_POSSIBLE_VAL = -1e9
# todo potential inconsistencies
_use_seq_num_diff_as_time_diff = None
_maxq_learning = None
_multi_steps = None
rl_parameters: RLParameters
@property
def gamma(self) -> float:
return self.rl_parameters.gamma
@property
def tau(self) -> float:
return self.rl_parameters.target_update_rate
@property
def multi_steps(self) -> Optional[int]:
return (
self.rl_parameters.multi_steps
if self._multi_steps is None
else self._multi_steps
)
@multi_steps.setter
def multi_steps(self, multi_steps):
self._multi_steps = multi_steps
@property
def maxq_learning(self) -> bool:
return (
self.rl_parameters.maxq_learning
if self._maxq_learning is None
else self._maxq_learning
)
@maxq_learning.setter
def maxq_learning(self, maxq_learning):
self._maxq_learning = maxq_learning
@property
def use_seq_num_diff_as_time_diff(self) -> bool:
return (
self.rl_parameters.use_seq_num_diff_as_time_diff
if self._use_seq_num_diff_as_time_diff is None
else self._use_seq_num_diff_as_time_diff
)
@use_seq_num_diff_as_time_diff.setter
def use_seq_num_diff_as_time_diff(self, use_seq_num_diff_as_time_diff):
self._use_seq_num_diff_as_time_diff = use_seq_num_diff_as_time_diff
@property
def rl_temperature(self) -> float:
return self.rl_parameters.temperature
| 867 |
605 | import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
class TestSwiftPrivateImport(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@skipUnlessDarwin
@swiftTest
def test_private_import(self):
"""Test a library with a private import for which there is no debug info"""
invisible_swift = self.getBuildArtifact("Invisible.swift")
import shutil
shutil.copyfile("InvisibleSource.swift", invisible_swift)
self.build()
os.unlink(invisible_swift)
os.unlink(self.getBuildArtifact("Invisible.swiftmodule"))
os.unlink(self.getBuildArtifact("Invisible.swiftinterface"))
if lldb.remote_platform:
wd = lldb.remote_platform.GetWorkingDirectory()
filename = 'libInvisible.dylib'
err = lldb.remote_platform.Put(
lldb.SBFileSpec(self.getBuildArtifact(filename)),
lldb.SBFileSpec(os.path.join(wd, filename)))
self.assertFalse(err.Fail(), 'Failed to copy ' + filename)
lldbutil.run_to_source_breakpoint(
self, 'break here', lldb.SBFileSpec('main.swift'),
extra_images=['Library'])
self.expect("fr var -d run -- x", substrs=["(Invisible.InvisibleStruct)"])
# FIXME: This crashes LLDB with a Swift DESERIALIZATION FAILURE.
# self.expect("fr var -d run -- y", substrs=["(Any)"])
| 660 |
974 | from ethereum import utils
from ethereum import tester as t
try:
from ecdsa_recover import ecdsa_raw_sign, ecdsa_raw_recover
except ImportError:
from bitcoin import ecdsa_raw_sign, ecdsa_raw_recover
def make_ticket(privkey, value):
value_enc = utils.zpad(utils.encode_int(value), 32)
h = utils.sha3(value_enc)
v, r, s = ecdsa_raw_sign(h, privkey)
return (value, v, r, s)
def test():
t.gas_price = 0
s = t.state()
c = s.abi_contract('channel.se')
a0_prebal = s.block.get_balance(t.a0)
a1_prebal = s.block.get_balance(t.a1)
cid = c.create_channel(t.a0, t.a1, 10**18, 3600, value=10**18)
make_ticket(t.k0, 2)
make_ticket(t.k0, 202)
_value, _v, _r, _s = make_ticket(t.k0, 702)
c.close_channel(cid, _value, _v, _r, _s, sender=t.k1)
assert s.block.get_balance(t.a0) == a0_prebal - 702
assert s.block.get_balance(t.a1) == a1_prebal + 702
if __name__ == '__main__':
test()
| 448 |
330 | METADATA_FILE_NAME = 'trial_metadata.yml'
SHORT_HASH_INT = 6 # taking a section of hash for folder names (e.g. 6 digits)
# TRAINING
DEFAULT_EPOCHS = 200
DEFAULT_BATCH_SIZE = 64 | 69 |
1,982 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The Package that contains all the news commands for the univaq department"""
import telegram
from telegram.ext import ConversationHandler
from libs import utils
def univaq(bot, update):
"""
Command that shows keyboard of sections for:
inevidenza, ultimissime, univaqon, univaqoff
"""
keys = [['In Evidenza'], ['Ultimissime'], ['Chiudi']]
bot.sendMessage(update.message.chat_id,
'Scegli la sezione:',
reply_markup=telegram.ReplyKeyboardMarkup(
keys, one_time_keyboard=True))
return "univaq"
def inevidenza(bot, update):
"""Defining function that prints 5 news from in evidenza"""
news_to_string = ""
for i, item in enumerate(utils.NEWS['univaq'][0:5]):
news_to_string += (str(i + 1) + ' - <a href="{link}">{title}</a>\n\n').format(**item)
news_to_string += ('<a href="http://www.univaq.it">'
'Vedi le altre notizie</a> e attiva le notifiche con /newson per '
'restare sempre aggiornato')
bot.sendMessage(update.message.chat_id,
parse_mode='HTML', disable_web_page_preview=True, text=news_to_string,
reply_markup=telegram.ReplyKeyboardRemove())
return ConversationHandler.END
def ultimissime(bot, update):
"""Defining function that prints 5 news from ultimissime"""
news_to_string = ""
for i, item in enumerate(utils.NEWS['univaq'][5:10]):
news_to_string += (str(i + 1) + ' - <a href="{link}">{title}</a>\n\n').format(**item)
news_to_string += ('<a href="http://www.univaq.it">'
'Vedi le altre notizie</a> e attiva le notifiche con /newson per '
'restare sempre aggiornato')
bot.sendMessage(update.message.chat_id,
parse_mode='HTML', disable_web_page_preview=True, text=news_to_string,
reply_markup=telegram.ReplyKeyboardRemove())
return ConversationHandler.END
def univaqon(bot, update):
"""Defining the command to enable notification for univaq"""
if update.message.chat_id not in utils.USERS['univaq']:
utils.subscribe_user(update.message.chat_id, 'univaq')
bot.sendMessage(update.message.chat_id,
text='Notifiche Abilitate!',
reply_markup=telegram.ReplyKeyboardRemove())
else:
bot.sendMessage(update.message.chat_id,
text='Le notifiche sono già abilitate!',
reply_markup=telegram.ReplyKeyboardRemove())
return ConversationHandler.END
def univaqoff(bot, update):
"""Defining the command to disable notification for univaq"""
if update.message.chat_id in utils.USERS['univaq']:
utils.unsubscribe_user(update.message.chat_id, 'univaq')
bot.sendMessage(update.message.chat_id,
text='Notifiche Disattivate!',
reply_markup=telegram.ReplyKeyboardRemove())
else:
bot.sendMessage(update.message.chat_id,
text='Per disattivare le notifiche dovresti prima attivarle.',
reply_markup=telegram.ReplyKeyboardRemove())
return ConversationHandler.END
| 1,514 |
9,680 | <gh_stars>1000+
import nni
def bar():
"""I'm doc string"""
return nni.report_final_result(0)
| 43 |
12,077 | <reponame>jacksmith15/faker<gh_stars>1000+
from collections import OrderedDict
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = (
"hotmail.com",
"gmail.com",
"outlook.com",
"yahoo.com",
"ymail.com",
"kon.in.th",
"icloud.com",
"protonmail.com",
)
tlds = OrderedDict(
(
("in.th", 100),
("co.th", 80),
("go.th", 40),
("or.th", 40),
("ac.th", 20),
("net.th", 10),
("mi.th", 5),
("co", 10),
("net", 20),
("com", 150),
("org", 50),
),
)
| 413 |
332 | from django.test import override_settings
from django.core.exceptions import ValidationError
from django.conf.urls import url
from django.core.cache import cache
from rest_framework.test import APIClient, APIRequestFactory
from rest_framework import status, filters
from .utils import MultipleModelTestCase
from .models import Play, Poem
from .serializers import PlaySerializer, PoemSerializer
from drf_multiple_model.views import ObjectMultipleModelAPIView
factory = APIRequestFactory()
class BasicObjectView(ObjectMultipleModelAPIView):
querylist = (
{'queryset': Play.objects.all(), 'serializer_class': PlaySerializer},
{'queryset': Poem.objects.filter(style="Sonnet"), 'serializer_class': PoemSerializer},
)
class CustomLabelView(ObjectMultipleModelAPIView):
querylist = (
{
'queryset': Play.objects.all(),
'serializer_class': PlaySerializer,
'label': 'Drama',
},
{
'queryset': Poem.objects.filter(style="Sonnet"),
'serializer_class': PoemSerializer,
'label': 'Poetry',
},
)
class DynamicQueryView(ObjectMultipleModelAPIView):
def get_querylist(self):
title = self.kwargs['play'].replace('-', ' ')
querylist = (
{'queryset': Play.objects.filter(title=title), 'serializer_class': PlaySerializer},
{'queryset': Poem.objects.filter(style="Sonnet"), 'serializer_class': PoemSerializer},
)
return querylist
class SearchFilterView(BasicObjectView):
filter_backends = (filters.SearchFilter,)
search_fields = ('title',)
# Testing filter_fn
def title_without_letter(queryset, request, *args, **kwargs):
letter_to_exclude = request.query_params['letter']
return queryset.exclude(title__icontains=letter_to_exclude)
class FilterFnView(ObjectMultipleModelAPIView):
querylist = (
{
'queryset': Play.objects.all(),
'serializer_class': PlaySerializer,
'filter_fn': title_without_letter,
},
{
'queryset': Poem.objects.filter(style="Sonnet"),
'serializer_class': PoemSerializer,
},
)
class CachedQueryView(ObjectMultipleModelAPIView):
querylist = (
{'queryset': Play.objects.all(), 'serializer_class': PlaySerializer},
{'queryset': Poem.objects.filter(style="Sonnet"), 'serializer_class': PoemSerializer},
)
def load_queryset(self, query_data, request, *args, **kwargs):
queryset = cache.get('{}-queryset'.format(query_data['queryset'].model.__name__))
if not queryset:
queryset = query_data['queryset'].all()
cache.set('{}-queryset'.format(query_data['queryset'].model.__name__), queryset)
return queryset
# Broken Views
class NoQuerylistView(ObjectMultipleModelAPIView):
pass
class NoQuerysetView(ObjectMultipleModelAPIView):
querylist = [
{'serializer_class': PlaySerializer},
{'serializer_class': PoemSerializer},
]
class NoSerializerClassView(ObjectMultipleModelAPIView):
querylist = [
{'queryset': Play.objects.all()},
{'queryset': Poem.objects.all()},
]
urlpatterns = [
url(r'^$', BasicObjectView.as_view()),
]
# TESTS
@override_settings(ROOT_URLCONF=__name__)
class TestMMObjectViews(MultipleModelTestCase):
maxDiff = None
def test_post(self):
"""
POST requests should throw a 405 Error
"""
view = BasicObjectView.as_view()
data = {'fake': 'data'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(0):
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "POST" not allowed.'})
def test_put(self):
"""
PUT requests should throw a 405 Error
"""
view = BasicObjectView.as_view()
data = {'fake': 'data'}
request = factory.put('/', data, format='json')
with self.assertNumQueries(0):
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "PUT" not allowed.'})
def test_delete(self):
"""
DELETE requests should throw a 405 Error
"""
view = BasicObjectView.as_view()
request = factory.delete('/')
with self.assertNumQueries(0):
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertEqual(response.data, {"detail": 'Method "DELETE" not allowed.'})
def test_no_querylist(self):
"""
A view with no querylist and no `get_querylist` overwrite should raise
an assertion error with the appropriate message
"""
view = NoQuerylistView.as_view()
request = factory.get('/')
with self.assertRaises(AssertionError) as error:
view(request).render()
self.assertEqual(str(error.exception), (
'NoQuerylistView should either include a `querylist` attribute, '
'or override the `get_querylist()` method.'
))
def test_no_queryset(self):
"""
A querylist with no `queryset` key should raise a ValidationError with the
appropriate message
"""
view = NoQuerysetView.as_view()
request = factory.get('/')
with self.assertRaises(ValidationError) as error:
view(request).render()
self.assertEqual(error.exception.message, (
'All items in the NoQuerysetView querylist attribute '
'should contain a `queryset` key'
))
def test_no_serializer_class(self):
"""
A querylist with no `serializer_class` key should raise a ValidationError with the
appropriate message
"""
view = NoSerializerClassView.as_view()
request = factory.get('/')
with self.assertRaises(ValidationError) as error:
view(request).render()
self.assertEqual(error.exception.message, (
'All items in the NoSerializerClassView querylist attribute '
'should contain a `serializer_class` key'
))
def test_basic_object_view(self):
"""
The default setting for the `ObjectMultipleModelView` should return
the serialized objects in querylist order
"""
view = BasicObjectView.as_view()
request = factory.get('/')
with self.assertNumQueries(2):
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data, {
'Play': [
{'title': 'Romeo And Juliet', 'genre': 'Tragedy', 'year': 1597},
{'title': "A Midsummer Night's Dream", 'genre': 'Comedy', 'year': 1600},
{'title': '<NAME>', 'genre': 'Tragedy', 'year': 1623},
{'title': 'As You Like It', 'genre': 'Comedy', 'year': 1623},
],
'Poem': [
{'title': "Shall I compare thee to a summer's day?", 'style': 'Sonnet'},
{'title': "As a decrepit father takes delight", 'style': 'Sonnet'}
]
})
def test_new_labels(self):
"""
Adding the 'label' key to querylist elements should use those labels
instead of the model names
"""
view = CustomLabelView.as_view()
request = factory.get('/')
with self.assertNumQueries(2):
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data, {
'Drama': [
{'title': 'Romeo And Juliet', 'genre': 'Tragedy', 'year': 1597},
{'title': "A Midsummer Night's Dream", 'genre': 'Comedy', 'year': 1600},
{'title': '<NAME>', 'genre': 'Tragedy', 'year': 1623},
{'title': 'As You Like It', 'genre': 'Comedy', 'year': 1623},
],
'Poetry': [
{'title': "Shall I compare thee to a summer's day?", 'style': 'Sonnet'},
{'title': "As a decrepit father takes delight", 'style': 'Sonnet'}
]
})
def test_filter_fn_view(self):
"""
The filter function is useful if you want to apply filtering to one query
but not another (unlike adding view level filtering, which will filter all the
querysets), but that filtering can't be provided at the beginning (for example, it
needs to access a query_param). This is testing the filter_fn.
"""
view = FilterFnView.as_view()
request = factory.get('/', {'letter': 'o'})
with self.assertNumQueries(2):
response = view(request).render()
# Check that the plays have been filter to remove those with the letter 'o'
# But the poems haven't been affected
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'Play': [
{'title': "A Midsummer Night's Dream", 'genre': 'Comedy', 'year': 1600},
{'title': '<NAME>', 'genre': 'Tragedy', 'year': 1623},
],
'Poem': [
{'title': "Shall I compare thee to a summer's day?", 'style': 'Sonnet'},
{'title': "As a decrepit father takes delight", 'style': 'Sonnet'}
]
})
def test_dynamic_querylist(self):
"""
using get_querylist allows the construction of dynamic queryLists
"""
view = DynamicQueryView.as_view()
request = factory.get('/Julius-Caesar')
with self.assertNumQueries(2):
response = view(request, play="Julius-Caesar")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data, {
'Play': [
{'title': '<NAME>', 'genre': 'Tragedy', 'year': 1623},
],
'Poem': [
{'title': "Shall I compare thee to a summer's day?", 'style': 'Sonnet'},
{'title': "As a decrepit father takes delight", 'style': 'Sonnet'}
]
})
def test_cached_querylist(self):
view = CachedQueryView.as_view()
request = factory.get('/Julius-Caesar')
with self.assertNumQueries(2):
response = view(request)
with self.assertNumQueries(0):
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'Play': [
{'title': '<NAME>', 'genre': 'Tragedy', 'year': 1597},
{'title': "A Midsummer Night's Dream", 'genre': 'Comedy', 'year': 1600},
{'title': '<NAME>', 'genre': 'Tragedy', 'year': 1623},
{'title': 'As You Like It', 'genre': 'Comedy', 'year': 1623},
],
'Poem': [
{'title': "Shall I compare thee to a summer's day?", 'style': 'Sonnet'},
{'title': "As a decrepit father takes delight", 'style': 'Sonnet'}
]
})
def test_search_filter_view(self):
"""
Tests use of built in DRF filtering with ObjectMultipleModelAPIView
"""
view = SearchFilterView.as_view()
request = factory.get('/', {'search': 'as'})
with self.assertNumQueries(2):
response = view(request).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'Play': [
{'title': 'As You Like It', 'genre': 'Comedy', 'year': 1623},
],
'Poem': [
{'title': "As a decrepit father takes delight", 'style': 'Sonnet'}
]
})
def test_url_endpoint(self):
"""
DRF 3.3 broke the MultipleModelAPIView with a load_queryset call
This test is to replicate (and then fix) that problem
"""
client = APIClient()
response = client.get('/', format='api')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 5,549 |
652 | <filename>Lib/parallel/http/server.py
from async.http.server import *
| 23 |
3,227 | #include <CGAL/Simple_cartesian.h>
#include <CGAL/Surface_mesh.h>
#include <CGAL/boost/graph/graph_traits_Surface_mesh.h>
#include <CGAL/subdivision_method_3.h>
#include <CGAL/Timer.h>
#include <boost/lexical_cast.hpp>
#include <iostream>
#include <fstream>
typedef CGAL::Simple_cartesian<double> Kernel;
typedef CGAL::Surface_mesh<Kernel::Point_3> PolygonMesh;
using namespace std;
using namespace CGAL;
namespace params = CGAL::parameters;
int main(int argc, char **argv) {
if (argc > 4) {
cerr << "Usage: Sqrt3_subdivision [d] [filename_in] [filename_out] \n";
cerr << " d -- the depth of the subdivision (default: 1) \n";
cerr << " filename_in -- the input mesh (.off) (default: data/quint_tris.off) \n";
cerr << " filename_out -- the output mesh (.off) (default: result.off)" << endl;
return 1;
}
int d = (argc > 1) ? boost::lexical_cast<int>(argv[1]) : 2;
const std::string in_file = (argc > 2) ? argv[2] : CGAL::data_file_path("meshes/quint_tris.off");
const char* out_file = (argc > 3) ? argv[3] : "result.off";
PolygonMesh pmesh;
std::ifstream in(in_file);
if(in.fail()) {
std::cerr << "Could not open input file " << in_file << std::endl;
return 1;
}
in >> pmesh;
Timer t;
t.start();
Subdivision_method_3::Sqrt3_subdivision(pmesh, params::number_of_iterations(d));
std::cerr << "Done (" << t.time() << " s)" << std::endl;
std::ofstream out(out_file);
out << pmesh;
return 0;
}
| 633 |
1,350 | /**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
*/
package com.microsoft.azure.cognitiveservices.search.entitysearch.models;
import java.util.Collection;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.microsoft.rest.ExpandableStringEnum;
/**
* Defines values for EntityScenario.
*/
public final class EntityScenario extends ExpandableStringEnum<EntityScenario> {
/** Static value DominantEntity for EntityScenario. */
public static final EntityScenario DOMINANT_ENTITY = fromString("DominantEntity");
/** Static value DisambiguationItem for EntityScenario. */
public static final EntityScenario DISAMBIGUATION_ITEM = fromString("DisambiguationItem");
/** Static value ListItem for EntityScenario. */
public static final EntityScenario LIST_ITEM = fromString("ListItem");
/**
* Creates or finds a EntityScenario from its string representation.
* @param name a name to look for
* @return the corresponding EntityScenario
*/
@JsonCreator
public static EntityScenario fromString(String name) {
return fromString(name, EntityScenario.class);
}
/**
* @return known EntityScenario values
*/
public static Collection<EntityScenario> values() {
return values(EntityScenario.class);
}
}
| 447 |
652 | <gh_stars>100-1000
{
"name": "dependency-with-global",
"version": "0.0.0",
"description": "Example of using browserify-shim with a dependency that requires a separately loaded global.",
"main": "./js/entry.js",
"browserify-shim": {
"jquery": "global:$"
},
"browserify": {
"transform": [
"browserify-shim"
]
},
"repository": "",
"author": "<NAME>",
"license": "BSD",
"dependencies": {
"request": "~2.88.0"
},
"devDependencies": {
"browserify": "~2.36.1",
"browserify-shim": "~3.2.0"
}
}
| 235 |
20,995 | // Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
#define V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
// Deserializes the read-only blob, creating the read-only roots and the
// Read-only object cache used by the other deserializers.
class ReadOnlyDeserializer final : public Deserializer<Isolate> {
public:
explicit ReadOnlyDeserializer(Isolate* isolate, const SnapshotData* data,
bool can_rehash)
: Deserializer(isolate, data->Payload(), data->GetMagicNumber(), false,
can_rehash) {}
// Deserialize the snapshot into an empty heap.
void DeserializeIntoIsolate();
};
} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
| 387 |
1,444 | <gh_stars>1000+
package mage.cards.l;
import java.util.UUID;
import mage.abilities.common.DiscardsACardOpponentTriggeredAbility;
import mage.abilities.effects.common.LoseLifeTargetEffect;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.SetTargetPointer;
/**
*
* @author <EMAIL>
*/
public final class LilianasCaress extends CardImpl {
public LilianasCaress(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.ENCHANTMENT}, "{1}{B}");
// Whenever an opponent discards a card, that player loses 2 life.
this.addAbility(new DiscardsACardOpponentTriggeredAbility(new LoseLifeTargetEffect(2), false, SetTargetPointer.PLAYER));
}
private LilianasCaress(final LilianasCaress card) {
super(card);
}
@Override
public LilianasCaress copy() {
return new LilianasCaress(this);
}
}
| 342 |
960 | /**
* Copyright (2018) Baidu Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Auth: <NAME>(<EMAIL>)
* Desc: wifi config related report log
*/
#ifndef BAIDU_DUER_LIGHTDUER_DS_LOG_WIFI_CONFIG_H
#define BAIDU_DUER_LIGHTDUER_DS_LOG_WIFI_CONFIG_H
#include "baidu_json.h"
#include "lightduer_types.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum _duer_ds_log_wifi_cfg_code {
DUER_DS_LOG_WIFI_CFG_START = 0x101,
DUER_DS_LOG_WIFI_CFG_SCAN_RESLUTS = 0x102,
DUER_DS_LOG_WIFI_CFG_LOCKED = 0x103,
DUER_DS_LOG_WIFI_CFG_CONNECTING = 0x104,
DUER_DS_LOG_WIFI_CFG_CONNECTED = 0x105,
DUER_DS_LOG_WIFI_CFG_FINISHED = 0x106,
DUER_DS_LOG_WIFI_CFG_TIMEOUT = 0x401,
DUER_DS_LOG_WIFI_CFG_FAILED = 0x402,
} duer_ds_log_wifi_cfg_code_t;
typedef enum _duer_wifi_cfg_err_code {
DUER_WIFI_CFG_SUCCESS = 0,
DUER_WIFI_CFG_STOP = 1,
DUER_WIFI_CFG_ERR_NORMAL = 2,
DUER_WIFI_CFG_ERR_NO_MEMORY = 3,
DUER_WIFI_CFG_ERR_SSID_INVALID = 4,
DUER_WIFI_CFG_ERR_HANDSHARK = 5,
} duer_wifi_cfg_err_code_t;
typedef enum _duer_wifi_cfg_method {
DUER_WIFI_CFG_UNKOWN = 0,
DUER_WIFI_CFG_AIRKISS = 1,
DUER_WIFI_CFG_SMART_CONFIG = 2,
DUER_WIFI_CFG_SONIC = 3,
DUER_WIFI_CFG_BLUE_TOOTH = 4,
} duer_wifi_cfg_method_t;
#define CHECK_WIFI_CFG_ID() \
if (duer_ds_log_wifi_cfg_get_id() == 0) { \
DUER_LOGE("should call duer_ds_log_wifi_cfg_start first"); \
return DUER_ERR_FAILED; \
}
/**
* common function to report the log of wifi config
*/
duer_status_t duer_ds_log_wifi_cfg(duer_ds_log_wifi_cfg_code_t log_code,
const baidu_json *message);
/**
* report the log that wifi config start
*/
duer_status_t duer_ds_log_wifi_cfg_start(void);
/**
* report the message for log code DUER_DS_LOG_WIFI_CFG_SCAN_RESLUTS
* {
* "list": [
* "SSID名称1", "SSID名称2", ...
* ],
* }
*/
duer_status_t duer_ds_log_wifi_cfg_scan_resluts(const char **ssids, duer_size_t num);
/**
* report the log that wifi config finished
*/
duer_status_t duer_ds_log_wifi_cfg_finished(duer_wifi_cfg_err_code_t code,
duer_wifi_cfg_method_t method);
/**
* report the log that wifi config failed
*/
duer_status_t duer_ds_log_wifi_cfg_failed(duer_wifi_cfg_err_code_t err_code, int rssi);
/**
* get the id of wifi config
*/
int duer_ds_log_wifi_cfg_get_id(void);
#ifdef __cplusplus
}
#endif
#endif // BAIDU_DUER_LIGHTDUER_DS_LOG_WIFI_CONFIG_H
| 1,580 |
3,783 | package contests.facebookHC;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
/**
* Created by sherxon on 1/6/17.
*/
public class ProgressPie {
public static void main(String[] args) {
FastReader fastReader= new FastReader();
int n=Integer.parseInt(fastReader.nextLine());
for (int i = 1; i <=n; i++) {
String[] s=fastReader.nextLine().split(" ");
int p=Integer.parseInt(s[0]);
int x=Integer.parseInt(s[1]);
int y=Integer.parseInt(s[2]);
calc(p, x, y, i);
}
}
private static void calc(int p, int x, int y, int i) {
double startAnkle = p*3.6;
double angle1 = Math.atan2(50, 0);
double angle2 = Math.atan2(y - 50, x - 50);
double radius = 50;
float calculatedAngle = (float) Math.toDegrees(angle1 - angle2);
if (calculatedAngle < 0) calculatedAngle += 360;
double ra = Math.sqrt((50-x) * (50-x) + (50-y) * (50-y));
if(startAnkle > calculatedAngle && ra < radius)
System.out.println("Case #" + i +": black");
else
System.out.println("Case #" + i +": white");
}
private static class FastReader {
BufferedReader bf;
StringTokenizer st;
public FastReader() {
bf=new BufferedReader(new InputStreamReader(System.in));
}
public String nextLine(){
String st="";
try {
st=bf.readLine();
} catch (IOException e) {
e.printStackTrace();
}
return st;
}
public String next(){
while (st==null || !st.hasMoreTokens()){
try {
st= new StringTokenizer(bf.readLine());
} catch (IOException e) {
e.printStackTrace();
}
}
return st.nextToken();
}
public int nextInt(){
return Integer.parseInt(next());
}
}
}
| 1,053 |
2,151 | <reponame>zipated/src
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/app_list/views/search_result_list_view.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "ash/app_list/model/search/search_result.h"
#include "base/bind.h"
#include "base/time/time.h"
#include "ui/app_list/app_list_metrics.h"
#include "ui/app_list/app_list_view_delegate.h"
#include "ui/app_list/views/app_list_main_view.h"
#include "ui/app_list/views/search_result_view.h"
#include "ui/events/event.h"
#include "ui/gfx/animation/linear_animation.h"
#include "ui/gfx/geometry/insets.h"
#include "ui/views/background.h"
#include "ui/views/layout/box_layout.h"
namespace {
constexpr int kMaxResults = 5;
} // namespace
namespace app_list {
SearchResultListView::SearchResultListView(AppListMainView* main_view,
AppListViewDelegate* view_delegate)
: main_view_(main_view),
view_delegate_(view_delegate),
results_container_(new views::View) {
results_container_->SetLayoutManager(
std::make_unique<views::BoxLayout>(views::BoxLayout::kVertical));
for (int i = 0; i < kMaxResults; ++i) {
search_result_views_.emplace_back(
new SearchResultView(this, view_delegate_));
results_container_->AddChildView(search_result_views_.back());
}
AddChildView(results_container_);
}
SearchResultListView::~SearchResultListView() {}
bool SearchResultListView::IsResultViewSelected(
const SearchResultView* result_view) const {
if (selected_index() < 0)
return false;
return static_cast<const SearchResultView*>(
results_container_->child_at(selected_index())) == result_view;
}
SearchResultView* SearchResultListView::GetResultViewAt(size_t index) {
DCHECK(index >= 0 && index < search_result_views_.size());
return search_result_views_[index];
}
void SearchResultListView::ListItemsRemoved(size_t start, size_t count) {
size_t last = std::min(start + count, search_result_views_.size());
for (size_t i = start; i < last; ++i)
GetResultViewAt(i)->ClearResultNoRepaint();
SearchResultContainerView::ListItemsRemoved(start, count);
}
void SearchResultListView::OnContainerSelected(bool from_bottom,
bool /*directional_movement*/) {
if (num_results() == 0)
return;
SetSelectedIndex(from_bottom ? num_results() - 1 : 0);
}
void SearchResultListView::NotifyFirstResultYIndex(int y_index) {
for (size_t i = 0; i < static_cast<size_t>(num_results()); ++i)
GetResultViewAt(i)->result()->set_distance_from_origin(i + y_index);
}
int SearchResultListView::GetYSize() {
return num_results();
}
views::View* SearchResultListView::GetSelectedView() {
return IsValidSelectionIndex(selected_index())
? GetResultViewAt(selected_index())
: nullptr;
}
SearchResultBaseView* SearchResultListView::GetFirstResultView() {
DCHECK(results_container_->has_children());
return num_results() <= 0 ? nullptr : search_result_views_[0];
}
int SearchResultListView::DoUpdate() {
std::vector<SearchResult*> display_results =
SearchModel::FilterSearchResultsByDisplayType(
results(), ash::SearchResultDisplayType::kList,
results_container_->child_count());
for (size_t i = 0; i < static_cast<size_t>(results_container_->child_count());
++i) {
SearchResultView* result_view = GetResultViewAt(i);
result_view->set_is_last_result(i == display_results.size() - 1);
if (i < display_results.size()) {
result_view->SetResult(display_results[i]);
result_view->SetVisible(true);
} else {
result_view->SetResult(NULL);
result_view->SetVisible(false);
}
}
set_container_score(
display_results.empty() ? 0 : display_results.front()->display_score());
return display_results.size();
}
void SearchResultListView::UpdateSelectedIndex(int old_selected,
int new_selected) {
if (old_selected >= 0) {
SearchResultView* selected_view = GetResultViewAt(old_selected);
selected_view->ClearSelectedAction();
selected_view->SchedulePaint();
}
if (new_selected >= 0) {
SearchResultView* selected_view = GetResultViewAt(new_selected);
ScrollRectToVisible(selected_view->bounds());
selected_view->ClearSelectedAction();
selected_view->SchedulePaint();
selected_view->NotifyAccessibilityEvent(ax::mojom::Event::kSelection, true);
}
}
void SearchResultListView::Layout() {
results_container_->SetBoundsRect(GetLocalBounds());
}
gfx::Size SearchResultListView::CalculatePreferredSize() const {
return results_container_->GetPreferredSize();
}
const char* SearchResultListView::GetClassName() const {
return "SearchResultListView";
}
int SearchResultListView::GetHeightForWidth(int w) const {
return results_container_->GetHeightForWidth(w);
}
void SearchResultListView::SearchResultActivated(SearchResultView* view,
int event_flags) {
if (view_delegate_ && view->result()) {
RecordSearchResultOpenSource(view->result(), view_delegate_->GetModel(),
view_delegate_->GetSearchModel());
view_delegate_->OpenSearchResult(view->result()->id(), event_flags);
}
}
void SearchResultListView::SearchResultActionActivated(SearchResultView* view,
size_t action_index,
int event_flags) {
if (view_delegate_ && view->result()) {
view_delegate_->InvokeSearchResultAction(view->result()->id(), action_index,
event_flags);
}
}
void SearchResultListView::OnSearchResultInstalled(SearchResultView* view) {
if (main_view_ && view->result())
main_view_->OnResultInstalled(view->result());
}
} // namespace app_list
| 2,348 |
2,690 | <reponame>PushyamiKaveti/kalibr
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2008, <NAME>, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of <NAME>, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#ifndef ASLAM_TIME_IMPL_H_INCLUDED
#define ASLAM_TIME_IMPL_H_INCLUDED
/*********************************************************************
** Headers
*********************************************************************/
//#include <ros/platform.h>
#include <iostream>
#include <cmath>
//#include <ros/exception.h>
//#include <aslam/time.h>
#include <boost/date_time/posix_time/posix_time.hpp>
/*********************************************************************
** Cross Platform Headers
*********************************************************************/
#ifdef WIN32
#include <sys/timeb.h>
#else
#include <sys/time.h>
#endif
namespace aslam {
template<class T, class D>
T& TimeBase<T, D>::fromNSec(uint64_t t) {
sec = (int32_t)(t / 1000000000);
nsec = (int32_t)(t % 1000000000);
normalizeSecNSec(sec, nsec);
return *static_cast<T*>(this);
}
template<class T, class D>
D TimeBase<T, D>::operator-(const T &rhs) const {
return D((int32_t) sec - (int32_t) rhs.sec,
(int32_t) nsec - (int32_t) rhs.nsec); // carry handled in ctor
}
template<class T, class D>
T TimeBase<T, D>::operator-(const D &rhs) const {
return *static_cast<const T*>(this) + (-rhs);
}
template<class T, class D>
T TimeBase<T, D>::operator+(const D &rhs) const {
int64_t sec_sum = (int64_t) sec + (int64_t) rhs.sec;
int64_t nsec_sum = (int64_t) nsec + (int64_t) rhs.nsec;
// Throws an exception if we go out of 32-bit range
normalizeSecNSecUnsigned(sec_sum, nsec_sum);
// now, it's safe to downcast back to uint32 bits
return T((uint32_t) sec_sum, (uint32_t) nsec_sum);
}
template<class T, class D>
T& TimeBase<T, D>::operator+=(const D &rhs) {
*this = *this + rhs;
return *static_cast<T*>(this);
}
template<class T, class D>
T& TimeBase<T, D>::operator-=(const D &rhs) {
*this += (-rhs);
return *static_cast<T*>(this);
}
template<class T, class D>
bool TimeBase<T, D>::operator==(const T &rhs) const {
return sec == rhs.sec && nsec == rhs.nsec;
}
template<class T, class D>
bool TimeBase<T, D>::operator<(const T &rhs) const {
if (sec < rhs.sec)
return true;
else if (sec == rhs.sec && nsec < rhs.nsec)
return true;
return false;
}
template<class T, class D>
bool TimeBase<T, D>::operator>(const T &rhs) const {
if (sec > rhs.sec)
return true;
else if (sec == rhs.sec && nsec > rhs.nsec)
return true;
return false;
}
template<class T, class D>
bool TimeBase<T, D>::operator<=(const T &rhs) const {
if (sec < rhs.sec)
return true;
else if (sec == rhs.sec && nsec <= rhs.nsec)
return true;
return false;
}
template<class T, class D>
bool TimeBase<T, D>::operator>=(const T &rhs) const {
if (sec > rhs.sec)
return true;
else if (sec == rhs.sec && nsec >= rhs.nsec)
return true;
return false;
}
template<class T, class D>
boost::posix_time::ptime TimeBase<T, D>::toBoost() const {
namespace pt = boost::posix_time;
#if defined(BOOST_DATE_TIME_HAS_NANOSECONDS)
return pt::from_time_t(sec) + pt::nanoseconds(nsec);
#else
return pt::from_time_t(sec) + pt::microseconds(static_cast<int32_t>(nsec / 1000.0));
#endif
}
}
#endif // ASLAM_IMPL_TIME_H_INCLUDED
| 1,752 |
353 | <reponame>rosrad/lhotse<filename>lhotse/augmentation/utils.py
import torch
try:
# Pytorch >= 1.7
from torch.fft import rfft, irfft
except ImportError:
from torch import rfft, irfft
# Implementation based on torch-audiomentations:
# https://github.com/asteroid-team/torch-audiomentations/blob/master/torch_audiomentations/utils/convolution.py
_NEXT_FAST_LEN = {}
def next_fast_len(size):
"""
Returns the next largest number ``n >= size`` whose prime factors are all
2, 3, or 5. These sizes are efficient for fast fourier transforms.
Equivalent to :func:`scipy.fftpack.next_fast_len`.
Note: This function was originally copied from the https://github.com/pyro-ppl/pyro
repository, where the license was Apache 2.0. Any modifications to the original code can be
found at https://github.com/asteroid-team/torch-audiomentations/commits
:param int size: A positive number.
:returns: A possibly larger number.
:rtype int:
"""
try:
return _NEXT_FAST_LEN[size]
except KeyError:
pass
assert isinstance(size, int) and size > 0
next_size = size
while True:
remaining = next_size
for n in (2, 3, 5):
while remaining % n == 0:
remaining //= n
if remaining == 1:
_NEXT_FAST_LEN[size] = next_size
return next_size
next_size += 1
def convolve1d(signal: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
"""
Computes the 1-d convolution of signal by kernel using FFTs.
Both signal and kernel must be 1-dimensional.
:param torch.Tensor signal: A signal to convolve.
:param torch.Tensor kernel: A convolution kernel.
:param str mode: One of: 'full', 'valid', 'same'.
:return: torch.Tensor Convolution of signal with kernel. Returns the full convolution, i.e.,
the output tensor will have size m + n - 1, where m is the length of the
signal and n is the length of the kernel.
"""
assert (
signal.ndim == 1 and kernel.ndim == 1
), "signal and kernel must be 1-dimensional"
m = signal.size(-1)
n = kernel.size(-1)
# Compute convolution using fft.
padded_size = m + n - 1
# Round up for cheaper fft.
fast_ftt_size = next_fast_len(padded_size)
f_signal = rfft(signal, n=fast_ftt_size)
f_kernel = rfft(kernel, n=fast_ftt_size)
f_result = f_signal * f_kernel
result = irfft(f_result, n=fast_ftt_size)
return result[:padded_size]
| 992 |
923 | <gh_stars>100-1000
package core.languageHandler.compiler;
import java.io.File;
import java.util.logging.Level;
import java.util.logging.Logger;
import argo.jdom.JsonNode;
import argo.jdom.JsonNodeFactories;
import core.languageHandler.Language;
public class CSharpRemoteCompiler extends AbstractRemoteNativeCompiler {
private File path;
{
getLogger().setLevel(Level.ALL);
}
public CSharpRemoteCompiler(File objectFileDirectory) {
super(objectFileDirectory);
path = new File(".");
}
@Override
public boolean canSetPath() {
return false;
}
@Override
public boolean setPath(File file) {
this.path = file;
return true;
}
@Override
public File getPath() {
return path;
}
@Override
public Language getName() {
return Language.CSHARP;
}
@Override
public String getExtension() {
return ".cs";
}
@Override
public String getObjectExtension() {
return ".dll";
}
@Override
public boolean parseCompilerSpecificArgs(JsonNode node) {
return true;
}
@Override
public JsonNode getCompilerSpecificArgs() {
return JsonNodeFactories.object();
}
@Override
protected String getDummyPrefix() {
return "CS_";
}
@Override
public Logger getLogger() {
return Logger.getLogger(CSharpRemoteCompiler.class.getName());
}
@Override
protected boolean checkRemoteCompilerSettings() {
return true;
}
}
| 466 |
7,353 | /**
* @file bencryption_bench.c
* @author <NAME> <<EMAIL>>
*
* @section LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <limits.h>
#include <misc/balloc.h>
#include <security/BRandom.h>
#include <security/BEncryption.h>
#include <base/DebugObject.h>
static void usage (char *name)
{
printf(
"Usage: %s <enc/dec> <ciper> <num_blocks> <num_ops>\n"
" <cipher> is one of (blowfish, aes).\n",
name
);
exit(1);
}
int main (int argc, char **argv)
{
if (argc <= 0) {
return 1;
}
if (argc != 5) {
usage(argv[0]);
}
char *mode_str = argv[1];
char *cipher_str = argv[2];
int mode;
int cipher = 0; // silence warning
int num_blocks = atoi(argv[3]);
int num_ops = atoi(argv[4]);
if (!strcmp(mode_str, "enc")) {
mode = BENCRYPTION_MODE_ENCRYPT;
}
else if (!strcmp(mode_str, "dec")) {
mode = BENCRYPTION_MODE_DECRYPT;
}
else {
usage(argv[0]);
}
if (!strcmp(cipher_str, "blowfish")) {
cipher = BENCRYPTION_CIPHER_BLOWFISH;
}
else if (!strcmp(cipher_str, "aes")) {
cipher = BENCRYPTION_CIPHER_AES;
}
else {
usage(argv[0]);
}
if (num_blocks < 0 || num_ops < 0) {
usage(argv[0]);
}
int key_size = BEncryption_cipher_key_size(cipher);
int block_size = BEncryption_cipher_block_size(cipher);
uint8_t key[BENCRYPTION_MAX_KEY_SIZE];
BRandom_randomize(key, key_size);
uint8_t iv[BENCRYPTION_MAX_BLOCK_SIZE];
BRandom_randomize(iv, block_size);
if (num_blocks > INT_MAX / block_size) {
printf("too much");
goto fail0;
}
int unit_size = num_blocks * block_size;
printf("unit size %d\n", unit_size);
uint8_t *buf1 = (uint8_t *)BAlloc(unit_size);
if (!buf1) {
printf("BAlloc failed");
goto fail0;
}
uint8_t *buf2 = (uint8_t *)BAlloc(unit_size);
if (!buf2) {
printf("BAlloc failed");
goto fail1;
}
BEncryption enc;
BEncryption_Init(&enc, mode, cipher, key);
uint8_t *in = buf1;
uint8_t *out = buf2;
BRandom_randomize(in, unit_size);
for (int i = 0; i < num_ops; i++) {
BEncryption_Encrypt(&enc, in, out, unit_size, iv);
uint8_t *t = in;
in = out;
out = t;
}
BEncryption_Free(&enc);
BFree(buf2);
fail1:
BFree(buf1);
fail0:
DebugObjectGlobal_Finish();
return 0;
}
| 1,741 |
1,293 | /*
* Copyright (c) 2020 - Manifold Systems LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package manifold.internal.javac;
import com.sun.tools.javac.tree.JCTree;
import com.sun.tools.javac.tree.JCTree.*;
import com.sun.tools.javac.tree.TreeMaker;
import com.sun.tools.javac.tree.TreeTranslator;
import com.sun.tools.javac.util.Context;
import com.sun.tools.javac.util.List;
import com.sun.tools.javac.util.Names;
import static com.sun.tools.javac.code.Flags.*;
public class ParseProcessor extends TreeTranslator
{
private final JavacPlugin _javacPlugin;
public ParseProcessor( JavacPlugin javacPlugin )
{
_javacPlugin = javacPlugin;
}
private int tempVarIndex = 0;
@Override
public void visitAssignop( JCTree.JCAssignOp tree )
{
super.visitAssignop( tree );
if( !JavacPlugin.instance().isExtensionsEnabled() )
{
// operator overloading requires manifold-ext
return;
}
// transform a += b to a = a + b, so that operator overloading can use plus() to implement this
TreeMaker make = _javacPlugin.getTreeMaker();
JCTree.Tag binop = tree.getTag().noAssignOp();
JCExpression lhs = tree.lhs;
if( lhs instanceof JCFieldAccess )
{
// expr.b += c
// ---
// var temp = expr;
// temp.b = temp.b + c
JCExpression fieldAccessLhs = ((JCFieldAccess)lhs).selected;
List<JCTree> tempVars = List.nil();
tempVarIndex++;
Context context = JavacPlugin.instance().getContext();
JCTree[] fieldAccessLhsTemp = tempify( false, fieldAccessLhs, make, fieldAccessLhs, context, "assignTransformLhsTemp" + tempVarIndex, tempVarIndex );
if( fieldAccessLhsTemp != null )
{
tempVars = tempVars.append( fieldAccessLhsTemp[0] );
fieldAccessLhs = (JCExpression)fieldAccessLhsTemp[1];
}
JCFieldAccess newFieldAccess = make.Select( fieldAccessLhs, ((JCFieldAccess)lhs).name );
newFieldAccess.pos = tree.lhs.pos;
JCFieldAccess newFieldAccess2 = make.Select( (JCExpression)fieldAccessLhs.clone(), ((JCFieldAccess)lhs).name );
newFieldAccess2.pos = tree.rhs.pos;
JCBinary binary = make.Binary( binop, newFieldAccess2, tree.rhs );
binary.pos = tree.rhs.pos;
JCAssign assign = make.Assign( newFieldAccess, binary );
assign.pos = tree.pos;
if( !tempVars.isEmpty() )
{
result = ILetExpr.makeLetExpr( make, tempVars, assign, null, tree.pos );
}
else
{
result = assign;
}
}
else if( lhs instanceof JCArrayAccess )
{
// indexed[index] += c
// ---
// var temp1 = indexed;
// var temp2 = index;
// temp1[temp2] = temp1[temp2] + c
Context context = JavacPlugin.instance().getContext();
JCExpression indexed = ((JCArrayAccess)lhs).indexed;
List<JCTree> tempVars = List.nil();
tempVarIndex++;
JCTree[] indexedTemp = tempify( false, indexed, make, indexed, context, "assignTransformLhsTemp" + tempVarIndex, tempVarIndex );
if( indexedTemp != null )
{
tempVars = tempVars.append( indexedTemp[0] );
indexed = (JCExpression)indexedTemp[1];
}
JCExpression index = ((JCArrayAccess)lhs).index;
tempVarIndex++;
JCTree[] indexTemp = tempify( false, index, make, index, context, "assignTransformLhsTemp" + tempVarIndex, tempVarIndex );
if( indexTemp != null )
{
tempVars = tempVars.append( indexTemp[0] );
index = (JCExpression)indexTemp[1];
}
JCArrayAccess newArrayAccess = make.Indexed( indexed, index );
newArrayAccess.pos = tree.lhs.pos;
JCArrayAccess newArrayAccess2 = make.Indexed( (JCExpression)indexed.clone(), (JCExpression)index.clone() );
newArrayAccess.pos = tree.rhs.pos;
JCBinary binary = make.Binary( binop, newArrayAccess2, tree.rhs );
binary.pos = tree.rhs.pos;
JCAssign assign = make.Assign( newArrayAccess, binary );
assign.pos = tree.pos;
if( !tempVars.isEmpty() )
{
result = ILetExpr.makeLetExpr( make, tempVars, assign, null, tree.pos );
}
else
{
result = assign;
}
}
else //if( lhs instanceof JCIdent )
{
JCBinary binary = make.Binary( binop, tree.lhs, tree.rhs );
binary.pos = tree.rhs.pos;
JCAssign assign = make.Assign( (JCExpression)tree.lhs.clone(), binary );
assign.pos = tree.pos;
result = assign;
}
// }
// else
// {
// throw new UnsupportedOperationException( "Unexpected expression type: " + lhs.getClass().getTypeName() );
// }
}
public static JCTree[] tempify( boolean force, JCTree.JCExpression tree, TreeMaker make, JCExpression expr, Context ctx, String varName, int tempVarIndex )
{
switch( expr.getTag() )
{
case LITERAL:
case IDENT:
if( !force )
{
return null;
}
// fall through...
default:
Names names = Names.instance( JavacPlugin.instance().getContext() );
// Symtab symtab = Symtab.instance( JavacPlugin.instance().getContext() );
JCTree.JCVariableDecl tempVar = make.VarDef( make.Modifiers( FINAL /*| SYNTHETIC*/ ),
Names.instance( ctx ).fromString( varName + tempVarIndex ), make.Ident( names.fromString( Object.class.getSimpleName() ) ), expr );
tempVar.pos = tree.pos;
JCExpression ident = make.Ident( tempVar.name );
ident.pos = tree.pos;
return new JCTree[]{tempVar, ident};
}
}
private static JCTree.JCExpression memberAccess( TreeMaker make, String path )
{
return memberAccess( make, path.split( "\\." ) );
}
private static JCTree.JCExpression memberAccess( TreeMaker make, String... components )
{
Names names = Names.instance( JavacPlugin.instance().getContext() );
JCTree.JCExpression expr = make.Ident( names.fromString( ( components[0] ) ) );
for( int i = 1; i < components.length; i++ )
{
expr = make.Select( expr, names.fromString( components[i] ) );
}
return expr;
}
}
| 2,584 |
319 | <gh_stars>100-1000
{
"name": "gtable",
"author": "rstudio",
"license": "CC0",
"raster": "https://github.com/rstudio/hex-stickers/blob/master/PNG/gtable.png",
"vector": "https://github.com/rstudio/hex-stickers/blob/master/SVG/gtable.svg",
"description": "gtable: The Layout Engine for ggplot2",
"filename": "meta/gtable.json"
}
| 142 |
3,262 | <reponame>JLouhela/halley
#pragma once
#include <functional>
#include <optional>
#include <vector>
#include "halley/text/halleystring.h"
namespace Halley {
class ConfigNode;
class ComponentDataRetriever {
public:
using Retriever = std::function<ConfigNode&()>;
ComponentDataRetriever(ConfigNode& componentData, String fieldName, String labelName);
ComponentDataRetriever getSubIndex(size_t index) const;
ComponentDataRetriever getSubKey(const String& key) const;
ConfigNode& getFieldData() const;
const String& getName() const;
const String& getLabelName() const;
private:
ConfigNode& componentData;
String labelName;
String name;
Retriever retriever;
ComponentDataRetriever(ConfigNode& componentData, String fieldName, String labelName, Retriever retriever);
};
class ComponentFieldParameters {
public:
ComponentFieldParameters(String componentName, ComponentDataRetriever data, std::vector<String> defaultValue = {}, std::vector<String> typeParameters = {});
ComponentFieldParameters withSubIndex(size_t index, std::vector<String> defaultValue, std::vector<String> typeParameters = {}) const;
ComponentFieldParameters withSubKey(const String& key, std::vector<String> defaultValue, std::vector<String> typeParameters = {}) const;
ComponentFieldParameters withSubIndex(size_t index, String defaultValue = "", std::vector<String> typeParameters = {}) const;
ComponentFieldParameters withSubKey(const String& key, String defaultValue = "", std::vector<String> typeParameters = {}) const;
String getStringDefaultParameter(size_t n = 0) const;
bool getBoolDefaultParameter(size_t n = 0) const;
int getIntDefaultParameter(size_t n = 0) const;
float getFloatDefaultParameter(size_t n = 0) const;
String componentName;
ComponentDataRetriever data;
std::vector<String> defaultValue;
std::vector<String> typeParameters;
};
}
| 582 |
3,200 | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_AFFINE_OP_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_AFFINE_OP_H_
#include <memory>
#include <string>
#include <vector>
#include "minddata/dataset/core/tensor.h"
#include "minddata/dataset/kernels/image/affine_op.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
class RandomAffineOp : public AffineOp {
public:
/// Default values, also used by python_bindings.cc
static const std::vector<float_t> kDegreesRange;
static const std::vector<float_t> kTranslationPercentages;
static const std::vector<float_t> kScaleRange;
static const std::vector<float_t> kShearRanges;
static const InterpolationMode kDefInterpolation;
static const std::vector<uint8_t> kFillValue;
explicit RandomAffineOp(std::vector<float_t> degrees, std::vector<float_t> translate_range = kTranslationPercentages,
std::vector<float_t> scale_range = kScaleRange,
std::vector<float_t> shear_ranges = kShearRanges,
InterpolationMode interpolation = kDefInterpolation,
std::vector<uint8_t> fill_value = kFillValue);
~RandomAffineOp() override = default;
std::string Name() const override { return kRandomAffineOp; }
Status Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) override;
private:
std::vector<float_t> degrees_range_; // min_degree, max_degree
std::vector<float_t> translate_range_; // maximum x translation percentage, maximum y translation percentage
std::vector<float_t> scale_range_; // min_scale, max_scale
std::vector<float_t> shear_ranges_; // min_x_shear, max_x_shear, min_y_shear, max_y_shear
std::mt19937 rnd_; // random device
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_AFFINE_OP_H_
| 949 |
1,127 | <reponame>si-eun-kim/openvino
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/eye.hpp"
using namespace ov;
using namespace reference_tests;
namespace reference_tests {
namespace {
struct EyeParams {
EyeParams(const reference_tests::Tensor& num_rows,
const reference_tests::Tensor& num_columns,
const reference_tests::Tensor& diagonal_index,
const element::Type& output_type,
const reference_tests::Tensor& expected_tensor,
const std::string& test_case_name,
bool is_dyn_shape_test = false)
: num_rows(num_rows),
num_columns(num_columns),
diagonal_index(diagonal_index),
output_type(output_type),
expected_tensor(expected_tensor),
test_case_name(test_case_name),
set_dynamic_shape(is_dyn_shape_test) {}
reference_tests::Tensor num_rows;
reference_tests::Tensor num_columns;
reference_tests::Tensor diagonal_index;
element::Type output_type;
reference_tests::Tensor expected_tensor;
std::string test_case_name;
bool set_dynamic_shape = false;
};
struct EyeBatchShapeParams {
EyeBatchShapeParams(const reference_tests::Tensor& num_rows,
const reference_tests::Tensor& num_columns,
const reference_tests::Tensor& diagonal_index,
const reference_tests::Tensor& batch_shape,
const element::Type& output_type,
const reference_tests::Tensor& expected_tensor,
const std::string& test_case_name,
bool is_dyn_shape_test = false)
: num_rows(num_rows),
num_columns(num_columns),
diagonal_index(diagonal_index),
batch_shape(batch_shape),
output_type(output_type),
expected_tensor(expected_tensor),
test_case_name(test_case_name),
set_dynamic_shape(is_dyn_shape_test) {}
reference_tests::Tensor num_rows;
reference_tests::Tensor num_columns;
reference_tests::Tensor diagonal_index;
reference_tests::Tensor batch_shape;
element::Type output_type;
reference_tests::Tensor expected_tensor;
std::string test_case_name;
bool set_dynamic_shape = false;
};
class ReferenceEyeLayerTest : public testing::TestWithParam<EyeParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.num_rows,
params.num_columns,
params.diagonal_index,
params.output_type,
params.set_dynamic_shape);
inputData = {params.num_rows.data, params.num_columns.data, params.diagonal_index.data};
refOutData = {params.expected_tensor.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<EyeParams>& obj) {
return obj.param.test_case_name + (obj.param.set_dynamic_shape ? "_dyn_shape_inputs" : "");
}
private:
static std::shared_ptr<Model> CreateFunction(const reference_tests::Tensor& num_rows,
const reference_tests::Tensor& num_columns,
const reference_tests::Tensor& diagonal_index,
const element::Type& output_type,
bool set_dynamic_shape = false) {
const auto in1 = std::make_shared<op::v0::Parameter>(num_rows.type, set_dynamic_shape ? PartialShape::dynamic() : num_rows.shape);
const auto in2 = std::make_shared<op::v0::Parameter>(num_columns.type, set_dynamic_shape ? PartialShape::dynamic() : num_columns.shape);
const auto in3 = std::make_shared<op::v0::Parameter>(diagonal_index.type, set_dynamic_shape ? PartialShape::dynamic() : diagonal_index.shape);
const auto Eye = std::make_shared<op::v9::Eye>(in1, in2, in3, output_type);
return std::make_shared<Model>(NodeVector{Eye}, ParameterVector{in1, in2, in3});
}
};
class ReferenceEyeBatchShapeLayerTest : public testing::TestWithParam<EyeBatchShapeParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.num_rows,
params.num_columns,
params.diagonal_index,
params.batch_shape,
params.output_type,
params.set_dynamic_shape);
inputData = {params.num_rows.data, params.num_columns.data, params.diagonal_index.data, params.batch_shape.data};
refOutData = {params.expected_tensor.data};
}
static std::string getTestCaseName(const testing::TestParamInfo<EyeBatchShapeParams>& obj) {
return obj.param.test_case_name + (obj.param.set_dynamic_shape ? "_dyn_shape_inputs" : "");
}
private:
static std::shared_ptr<Model> CreateFunction(const reference_tests::Tensor& num_rows,
const reference_tests::Tensor& num_columns,
const reference_tests::Tensor& diagonal_index,
const reference_tests::Tensor& batch_shape,
const element::Type& output_type,
bool set_dynamic_shape = false) {
const auto in1 = std::make_shared<op::v0::Parameter>(num_rows.type, set_dynamic_shape ? PartialShape::dynamic() : num_rows.shape);
const auto in2 = std::make_shared<op::v0::Parameter>(num_columns.type, set_dynamic_shape ? PartialShape::dynamic() : num_columns.shape);
const auto in3 = std::make_shared<op::v0::Parameter>(diagonal_index.type, set_dynamic_shape ? PartialShape::dynamic() : diagonal_index.shape);
const auto in4 = std::make_shared<op::v0::Parameter>(batch_shape.type, set_dynamic_shape ? PartialShape::dynamic() : batch_shape.shape);
const auto Eye = std::make_shared<op::v9::Eye>(in1, in2, in3, in4, output_type);
return std::make_shared<Model>(NodeVector{Eye}, ParameterVector{in1, in2, in3, in4});
}
};
std::vector<EyeParams> generateEyeParams(bool is_dyn_shape_test = false) {
std::vector<EyeParams> test_params {
EyeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{0}},
element::Type_t::f32,
reference_tests::Tensor{{3, 2}, element::f32, std::vector<float>{1, 0,
0, 1,
0, 0}},
"float32_default_3x2", is_dyn_shape_test),
EyeParams(reference_tests::Tensor{{}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 4}, element::i8, std::vector<int8_t>{0, 0, 1, 0,
0, 0, 0, 1}},
"int8_diag=2_2x4", is_dyn_shape_test),
EyeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{}, element::i32, std::vector<int32_t>{-3}},
element::Type_t::i64,
reference_tests::Tensor{{4, 2}, element::i64, std::vector<int64_t>{0, 0,
0, 0,
0, 0,
1, 0}},
"int64_diag=-3_4x2", is_dyn_shape_test),
EyeParams(reference_tests::Tensor{{}, element::i32, std::vector<int32_t>{1}},
reference_tests::Tensor{{}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{}, element::i32, std::vector<int32_t>{10}},
element::Type_t::i8,
reference_tests::Tensor{{1, 4}, element::i8, std::vector<int8_t>{0, 0, 0, 0}},
"int8_empty_1x4", is_dyn_shape_test)};
return test_params;
}
std::vector<EyeBatchShapeParams> generateEyeBatchShapeParams(bool is_dyn_shape_test = false) {
std::vector<EyeBatchShapeParams> test_params {
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{0}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::f32,
reference_tests::Tensor{{2, 3, 3}, element::f32, std::vector<float>{1, 0, 0,
0, 1, 0,
0, 0, 1,
1, 0, 0,
0, 1, 0,
0, 0, 1}},
"f32_2x3x3_diag0", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{0}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::f32,
reference_tests::Tensor{{2, 3, 3}, element::f32, std::vector<float>{1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1}},
"f32_2x4x4_diag0", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{0}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::f32,
reference_tests::Tensor{{2, 3, 4}, element::f32, std::vector<float>{1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0}},
"f32_2x3x4_diag0", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{0}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 4, 3}, element::i8, std::vector<int8_t>{1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, 0,
1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, 0}},
"f32_2x4x3_diag0", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{1}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::f32,
reference_tests::Tensor{{2, 3, 4}, element::f32, std::vector<float>{0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1}},
"f32_2x3x4_diag1", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{1}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::f32,
reference_tests::Tensor{{2, 4, 3}, element::f32, std::vector<float>{0, 1, 0,
0, 0, 1,
0, 0, 0,
0, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, 0,
0, 0, 0}},
"f32_2x4x3_diag1", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 3, 4}, element::i8, std::vector<int8_t>{0, 0, 1, 0,
0, 0, 0, 1,
0, 0, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
0, 0, 0, 0}},
"i8_2x3x4_diag2", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 4, 3}, element::i8, std::vector<int8_t>{0, 0, 1,
0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 1,
0, 0, 0,
0, 0, 0,
0, 0, 0}},
"i8_2x4x3_diag2", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{-1}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 3, 4}, element::i8, std::vector<int8_t>{0, 0, 0, 0,
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 0, 0,
1, 0, 0, 0,
0, 1, 0, 0}},
"i8_2x3x4_diag-1", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{-1}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 4, 3}, element::i8, std::vector<int8_t>{0, 0, 0,
1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, 0,
1, 0, 0,
0, 1, 0,
0, 0, 1}},
"i8_2x4x3_diag-1", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{-2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 3, 4}, element::i8, std::vector<int8_t>{0, 0, 0, 0,
0, 0, 0, 0,
1, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
1, 0, 0, 0}},
"i8_2x3x4_diag-2", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{-2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i8,
reference_tests::Tensor{{2, 4, 3}, element::i8, std::vector<int8_t>{0, 0, 0,
0, 0, 0,
1, 0, 0,
0, 1, 0,
0, 0, 0,
0, 0, 0,
1, 0, 0,
0, 1, 0}},
"i8_2x4x3_diag-2", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{6}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{5}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{1}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
element::Type_t::i32,
reference_tests::Tensor{{2, 6, 5}, element::i32, std::vector<int32_t>{0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 0, 1,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 0, 1,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0}},
"int32_2x6x5_diag1", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{4}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{2}, element::i32, std::vector<int32_t>{2, 2}},
element::Type_t::i64,
reference_tests::Tensor{{2, 2, 4, 2}, element::i64, std::vector<int64_t>{0, 0, 1, 0,
0, 0, 0, 1,
0, 0, 1, 0,
0, 0, 0, 1,
0, 0, 1, 0,
0, 0, 0, 1,
0, 0, 1, 0,
0, 0, 0, 1}},
"int64_2x2x3x3", is_dyn_shape_test),
EyeBatchShapeParams(reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{2}},
reference_tests::Tensor{{1}, element::i32, std::vector<int32_t>{3}},
reference_tests::Tensor{{2}, element::i32, std::vector<int32_t>{1, 3}},
element::Type_t::u8,
reference_tests::Tensor{{2, 3, 3}, element::u8, std::vector<uint8_t>{0, 0,
0, 0,
0, 0,
0, 0,
0, 0,
0, 0}},
"uint8_1x3x2x2", is_dyn_shape_test)};
return test_params;
}
std::vector<EyeParams> generateEyeCombinedParams() {
std::vector<EyeParams> combined_params = generateEyeParams(false);
std::vector<EyeParams> dyn_shape_params = generateEyeParams(true);
combined_params.insert(combined_params.end(), dyn_shape_params.begin(), dyn_shape_params.end());
return combined_params;
}
std::vector<EyeBatchShapeParams> generateEyeBatchShapeCombinedParams() {
std::vector<EyeBatchShapeParams> combined_params = generateEyeBatchShapeParams(false);
std::vector<EyeBatchShapeParams> dyn_shape_params = generateEyeBatchShapeParams(true);
combined_params.insert(combined_params.end(), dyn_shape_params.begin(), dyn_shape_params.end());
return combined_params;
}
TEST_P(ReferenceEyeLayerTest, EyeWithHardcodedRefs) {
Exec();
}
TEST_P(ReferenceEyeBatchShapeLayerTest, EyeRectangleBatchShapeWithHardcodedRefs) {
Exec();
}
} // namespace
INSTANTIATE_TEST_SUITE_P(
smoke_Eye_With_Hardcoded_Refs,
ReferenceEyeLayerTest,
// Generate params (3 inputs) with static and dynamic shapes
::testing::ValuesIn(generateEyeCombinedParams()),
ReferenceEyeLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(
smoke_EyeBatchShape_With_Hardcoded_Refs,
ReferenceEyeBatchShapeLayerTest,
// Generate params (4 inputs) with static and dynamic shapes
::testing::ValuesIn(generateEyeBatchShapeCombinedParams()),
ReferenceEyeBatchShapeLayerTest::getTestCaseName);
} // namespace reference_tests
| 20,654 |
14,443 | <gh_stars>1000+
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.material.catalog.adaptive;
import io.material.catalog.R;
import android.os.Bundle;
import androidx.fragment.app.Fragment;
import androidx.core.view.ViewCompat;
import androidx.recyclerview.widget.LinearLayoutManager;
import androidx.recyclerview.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.constraintlayout.widget.ConstraintLayout;
import androidx.constraintlayout.widget.ConstraintSet;
/** A Fragment that hosts a hero demo. */
public class AdaptiveHeroDemoFragment extends Fragment {
@Nullable
@Override
public View onCreateView(
@NonNull LayoutInflater layoutInflater,
@Nullable ViewGroup viewGroup,
@Nullable Bundle bundle) {
View view = layoutInflater.inflate(R.layout.cat_adaptive_hero_fragment, viewGroup, false);
RecyclerView sideContentList = view.findViewById(R.id.hero_side_content);
RecyclerView.LayoutManager layoutManager =
new LinearLayoutManager(getContext(), LinearLayoutManager.VERTICAL, false);
sideContentList.setLayoutManager(layoutManager);
HeroAdapter adapter = new HeroAdapter();
sideContentList.setAdapter(adapter);
ViewCompat.setNestedScrollingEnabled(sideContentList, /* enabled= */ false);
// Set up constraint sets.
ConstraintLayout constraintLayout = view.findViewById(R.id.hero_constraint_layout);
ConstraintSet smallLayout = new ConstraintSet();
smallLayout.clone(constraintLayout);
ConstraintSet mediumLayout = getMediumLayout(constraintLayout);
ConstraintSet largeLayout = getLargeLayout(mediumLayout);
int screenWidth = getResources().getConfiguration().screenWidthDp;
if (screenWidth < AdaptiveUtils.MEDIUM_SCREEN_WIDTH_SIZE) {
smallLayout.applyTo(constraintLayout);
} else if (screenWidth < AdaptiveUtils.LARGE_SCREEN_WIDTH_SIZE) {
mediumLayout.applyTo(constraintLayout);
} else {
largeLayout.applyTo(constraintLayout);
}
return view;
}
/* Returns the constraint set to be used for the medium layout configuration. */
private ConstraintSet getMediumLayout(@NonNull ConstraintLayout constraintLayout) {
int marginVertical =
getResources().getDimensionPixelOffset(R.dimen.cat_adaptive_margin_vertical);
int marginHorizontal =
getResources().getDimensionPixelOffset(R.dimen.cat_adaptive_margin_horizontal);
ConstraintSet constraintSet = new ConstraintSet();
constraintSet.clone(constraintLayout);
// Hero container.
constraintSet.setVisibility(R.id.hero_top_content, View.VISIBLE);
// Main content.
constraintSet.connect(
R.id.hero_main_content, ConstraintSet.TOP, R.id.hero_top_content, ConstraintSet.BOTTOM);
constraintSet.connect(
R.id.hero_main_content,
ConstraintSet.END,
R.id.hero_side_content_container,
ConstraintSet.START);
constraintSet.connect(
R.id.hero_main_content,
ConstraintSet.BOTTOM,
ConstraintSet.PARENT_ID,
ConstraintSet.BOTTOM);
constraintSet.setMargin(R.id.hero_main_content, ConstraintSet.TOP, marginVertical);
constraintSet.setMargin(R.id.hero_main_content, ConstraintSet.END, marginHorizontal);
// Side content.
constraintSet.connect(
R.id.hero_side_content_container,
ConstraintSet.TOP,
R.id.hero_top_content,
ConstraintSet.BOTTOM);
constraintSet.connect(
R.id.hero_side_content_container,
ConstraintSet.START,
R.id.hero_main_content,
ConstraintSet.END);
constraintSet.constrainPercentWidth(R.id.hero_side_content_container, 0.4f);
return constraintSet;
}
/* Returns the constraint set to be used for the large layout configuration. */
private ConstraintSet getLargeLayout(@NonNull ConstraintSet mediumLayout) {
int marginHorizontal =
getResources().getDimensionPixelOffset(R.dimen.cat_adaptive_margin_horizontal);
ConstraintSet constraintSet = new ConstraintSet();
constraintSet.clone(mediumLayout);
// Hero container.
constraintSet.connect(
R.id.hero_top_content,
ConstraintSet.END,
R.id.hero_side_content_container,
ConstraintSet.START);
constraintSet.setMargin(R.id.hero_top_content, ConstraintSet.END, marginHorizontal);
// Side content.
constraintSet.connect(
R.id.hero_side_content_container,
ConstraintSet.TOP,
ConstraintSet.PARENT_ID,
ConstraintSet.TOP);
return constraintSet;
}
/** A RecyclerView adapter for the side content list of the hero demo. */
private static final class HeroAdapter extends RecyclerView.Adapter<HeroAdapter.HeroViewHolder> {
HeroAdapter() {}
/** Provides a reference to the views for each data item. */
static class HeroViewHolder extends RecyclerView.ViewHolder {
public HeroViewHolder(@NonNull View view) {
super(view);
}
}
@NonNull
@Override
public HeroViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {
View view =
LayoutInflater.from(parent.getContext())
.inflate(R.layout.cat_adaptive_hero_item, parent, false);
return new HeroViewHolder(view);
}
@Override
public void onBindViewHolder(@NonNull HeroViewHolder holder, int position) {
// Populate content. Empty for demo purposes.
}
@Override
public int getItemCount() {
return 10;
}
}
}
| 2,203 |
743 | <reponame>ddstreet/corosync
/*
* Copyright (c) 2015-2017 Red Hat, Inc.
*
* All rights reserved.
*
* Author: <NAME> <<EMAIL>>
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <time.h>
#include <limits.h>
#include <ctype.h>
#include <syslog.h>
#include <stdarg.h>
#include <inttypes.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/select.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <pthread.h>
#include <zlib.h>
#include <libgen.h>
#include <getopt.h>
#include <corosync/corotypes.h>
#include <corosync/cpg.h>
static cpg_handle_t handle;
static pthread_t thread;
#ifndef timersub
#define timersub(a, b, result) \
do { \
(result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
(result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
if ((result)->tv_usec < 0) { \
--(result)->tv_sec; \
(result)->tv_usec += 1000000; \
} \
} while (0)
#endif /* timersub */
static int alarm_notice;
#define MAX_NODEID 65536
#define ONE_MEG 1048576
#define DATASIZE (ONE_MEG*20)
static char data[DATASIZE];
static int send_counter = 0;
static int do_syslog = 0;
static int quiet = 0;
static int report_rtt = 0;
static int abort_on_error = 0;
static int machine_readable = 0;
static char delimiter = ',';
static int to_stderr = 0;
static unsigned int g_our_nodeid;
static volatile int stopped;
static unsigned int flood_start = 64;
static unsigned int flood_multiplier = 5;
static unsigned long flood_max = (ONE_MEG - 100);
// stats
static unsigned int length_errors=0;
static unsigned int crc_errors=0;
static unsigned int sequence_errors=0;
static unsigned int packets_sent=0;
static unsigned int packets_recvd=0;
static unsigned int packets_recvd1=0; /* For flood intermediates */
static unsigned int send_retries=0;
static unsigned int send_fails=0;
static unsigned long avg_rtt=0;
static unsigned long max_rtt=0;
static unsigned long min_rtt=LONG_MAX;
static unsigned long interim_avg_rtt=0;
static unsigned long interim_max_rtt=0;
static unsigned long interim_min_rtt=LONG_MAX;
struct cpghum_header {
unsigned int counter;
unsigned int crc;
unsigned int size;
struct timeval timestamp;
};
static void cpg_bm_confchg_fn (
cpg_handle_t handle_in,
const struct cpg_name *group_name,
const struct cpg_address *member_list, size_t member_list_entries,
const struct cpg_address *left_list, size_t left_list_entries,
const struct cpg_address *joined_list, size_t joined_list_entries)
{
}
static unsigned int g_recv_count;
static unsigned int g_recv_length;
static int g_recv_start[MAX_NODEID+1];
static int g_recv_counter[MAX_NODEID+1];
static int g_recv_size[MAX_NODEID+1];
static int g_log_mask = 0xFFFF;
typedef enum
{
CPGH_LOG_INFO = 1,
CPGH_LOG_PERF = 2,
CPGH_LOG_RTT = 4,
CPGH_LOG_STATS = 8,
CPGH_LOG_ERR = 16
} log_type_t;
static void cpgh_print_message(int syslog_level, const char *facility_name, const char *format, va_list ap)
__attribute__((format(printf, 3, 0)));
static void cpgh_log_printf(log_type_t type, const char *format, ...)
__attribute__((format(printf, 2, 3)));
static void cpgh_print_message(int syslog_level, const char *facility_name, const char *format, va_list ap)
{
char msg[1024];
int start = 0;
if (machine_readable) {
snprintf(msg, sizeof(msg), "%s%c", facility_name, delimiter);
start = strlen(msg);
}
assert(vsnprintf(msg+start, sizeof(msg)-start, format, ap) < sizeof(msg)-start);
if (to_stderr || (syslog_level <= LOG_ERR)) {
fprintf(stderr, "%s", msg);
}
else {
printf("%s", msg);
}
if (do_syslog) {
syslog(syslog_level, "%s", msg);
}
}
static void cpgh_log_printf(log_type_t type, const char *format, ...)
{
va_list ap;
if (!(type & g_log_mask)) {
return;
}
va_start(ap, format);
switch (type) {
case CPGH_LOG_INFO:
cpgh_print_message(LOG_INFO, "[Info]", format, ap);
break;
case CPGH_LOG_PERF:
cpgh_print_message(LOG_INFO, "[Perf]", format, ap);
break;
case CPGH_LOG_RTT:
cpgh_print_message(LOG_INFO, "[RTT]", format, ap);
break;
case CPGH_LOG_STATS:
cpgh_print_message(LOG_INFO, "[Stats]", format, ap);
break;
case CPGH_LOG_ERR:
cpgh_print_message(LOG_ERR, "[Err]", format, ap);
break;
default:
break;
}
va_end(ap);
}
static unsigned long update_rtt(struct timeval *header_timestamp, int packet_count,
unsigned long *rtt_min, unsigned long *rtt_avg, unsigned long *rtt_max)
{
struct timeval tv1;
struct timeval rtt;
unsigned long rtt_usecs;
gettimeofday (&tv1, NULL);
timersub(&tv1, header_timestamp, &rtt);
rtt_usecs = rtt.tv_usec + rtt.tv_sec*1000000;
if (rtt_usecs > *rtt_max) {
*rtt_max = rtt_usecs;
}
if (rtt_usecs < *rtt_min) {
*rtt_min = rtt_usecs;
}
/* Don't start the average with 0 */
if (*rtt_avg == 0) {
*rtt_avg = rtt_usecs;
}
else {
*rtt_avg = ((*rtt_avg * packet_count) + rtt_usecs) / (packet_count+1);
}
return rtt_usecs;
}
static void cpg_bm_deliver_fn (
cpg_handle_t handle_in,
const struct cpg_name *group_name,
uint32_t nodeid,
uint32_t pid,
void *msg,
size_t msg_len)
{
uLong crc=0;
struct cpghum_header *header = (struct cpghum_header *)msg;
uLong recv_crc = header->crc & 0xFFFFFFFF;
unsigned int *dataint = (unsigned int *)((char*)msg + sizeof(struct cpghum_header));
unsigned int datalen;
if (nodeid > MAX_NODEID) {
cpgh_log_printf(CPGH_LOG_ERR, "Got message from invalid nodeid " CS_PRI_NODE_ID " (too high for us). Quitting\n", nodeid);
exit(1);
}
packets_recvd++;
packets_recvd1++;
g_recv_length = msg_len;
datalen = header->size - sizeof(struct cpghum_header);
// Report RTT first in case abort_on_error is set
if (nodeid == g_our_nodeid) {
unsigned long rtt_usecs;
// For flood
update_rtt(&header->timestamp, packets_recvd1, &interim_min_rtt, &interim_avg_rtt, &interim_max_rtt);
rtt_usecs = update_rtt(&header->timestamp, g_recv_counter[nodeid], &min_rtt, &avg_rtt, &max_rtt);
if (report_rtt) {
if (machine_readable) {
cpgh_log_printf(CPGH_LOG_RTT, "%ld%c%ld%c%ld%c%ld\n", rtt_usecs, delimiter, min_rtt, delimiter, avg_rtt, delimiter, max_rtt);
}
else {
cpgh_log_printf(CPGH_LOG_RTT, "%s: RTT %ld uS (min/avg/max): %ld/%ld/%ld\n", group_name->value, rtt_usecs, min_rtt, avg_rtt, max_rtt);
}
}
}
// Basic check, packets should all be the right size
if (msg_len != header->size) {
length_errors++;
cpgh_log_printf(CPGH_LOG_ERR, "%s: message sizes don't match. got %zu, expected %u from node " CS_PRI_NODE_ID "\n", group_name->value, msg_len, header->size, nodeid);
if (abort_on_error) {
exit(2);
}
}
g_recv_size[nodeid] = msg_len;
// Sequence counters are incrementing in step?
if (header->counter != g_recv_counter[nodeid]) {
/* Don't report the first mismatch or a newly restarted sender, we're just catching up */
if (g_recv_counter[nodeid] && header->counter) {
sequence_errors++;
cpgh_log_printf(CPGH_LOG_ERR, "%s: counters don't match. got %d, expected %d from node " CS_PRI_NODE_ID "\n", group_name->value, header->counter, g_recv_counter[nodeid], nodeid);
if (abort_on_error) {
exit(2);
}
}
else {
g_recv_start[nodeid] = header->counter;
}
/* Catch up or we'll be printing errors for ever */
g_recv_counter[nodeid] = header->counter+1;
}
else {
g_recv_counter[nodeid]++;
}
/* Check crc */
crc = crc32(0, NULL, 0);
crc = crc32(crc, (Bytef *)dataint, datalen) & 0xFFFFFFFF;
if (crc != recv_crc) {
crc_errors++;
cpgh_log_printf(CPGH_LOG_ERR, "%s: CRCs don't match. got %lx, expected %lx from nodeid " CS_PRI_NODE_ID "\n", group_name->value, recv_crc, crc, nodeid);
if (abort_on_error) {
exit(2);
}
}
g_recv_count++;
}
static cpg_model_v1_data_t model1_data = {
.cpg_deliver_fn = cpg_bm_deliver_fn,
.cpg_confchg_fn = cpg_bm_confchg_fn,
};
static cpg_callbacks_t callbacks = {
.cpg_deliver_fn = cpg_bm_deliver_fn,
.cpg_confchg_fn = cpg_bm_confchg_fn
};
static struct cpg_name group_name = {
.value = "cpghum",
.length = 7
};
static void set_packet(int write_size, int counter)
{
struct cpghum_header *header = (struct cpghum_header *)data;
int i;
unsigned int *dataint = (unsigned int *)(data + sizeof(struct cpghum_header));
unsigned int datalen = write_size - sizeof(struct cpghum_header);
struct timeval tv1;
uLong crc;
header->counter = counter;
for (i=0; i<(datalen/4); i++) {
dataint[i] = rand();
}
crc = crc32(0, NULL, 0);
header->crc = crc32(crc, (Bytef*)&dataint[0], datalen);
header->size = write_size;
gettimeofday (&tv1, NULL);
memcpy(&header->timestamp, &tv1, sizeof(struct timeval));
}
/* Basically this is cpgbench.c */
static void cpg_flood (
cpg_handle_t handle_in,
int write_size)
{
struct timeval tv1, tv2, tv_elapsed;
struct iovec iov;
unsigned int res = CS_OK;
alarm_notice = 0;
iov.iov_base = data;
iov.iov_len = write_size;
alarm (10);
packets_recvd1 = 0;
interim_avg_rtt = 0;
interim_max_rtt = 0;
interim_min_rtt = LONG_MAX;
gettimeofday (&tv1, NULL);
do {
if (res == CS_OK) {
set_packet(write_size, send_counter);
}
res = cpg_mcast_joined (handle_in, CPG_TYPE_AGREED, &iov, 1);
if (res == CS_OK) {
/* Only increment the packet counter if it was sucessfully sent */
packets_sent++;
send_counter++;
}
else {
if (res == CS_ERR_TRY_AGAIN) {
send_retries++;
}
else {
send_fails++;
}
}
} while (!stopped && alarm_notice == 0 && (res == CS_OK || res == CS_ERR_TRY_AGAIN));
gettimeofday (&tv2, NULL);
timersub (&tv2, &tv1, &tv_elapsed);
if (!quiet) {
if (machine_readable) {
cpgh_log_printf (CPGH_LOG_PERF, "%d%c%d%c%f%c%f%c%f%c%ld%c%ld%c%ld\n", packets_recvd1, delimiter, write_size, delimiter,
(tv_elapsed.tv_sec + (tv_elapsed.tv_usec / 1000000.0)), delimiter,
((float)packets_recvd1) / (tv_elapsed.tv_sec + (tv_elapsed.tv_usec / 1000000.0)), delimiter,
((float)packets_recvd1) * ((float)write_size) / ((tv_elapsed.tv_sec + (tv_elapsed.tv_usec / 1000000.0)) * 1000000.0), delimiter,
interim_min_rtt, delimiter, interim_avg_rtt, delimiter, interim_max_rtt);
}
else {
cpgh_log_printf (CPGH_LOG_PERF, "%5d messages received ", packets_recvd1);
cpgh_log_printf (CPGH_LOG_PERF, "%5d bytes per write ", write_size);
cpgh_log_printf (CPGH_LOG_PERF, "%7.3f Seconds runtime ",
(tv_elapsed.tv_sec + (tv_elapsed.tv_usec / 1000000.0)));
cpgh_log_printf (CPGH_LOG_PERF, "%9.3f TP/s ",
((float)packets_recvd1) / (tv_elapsed.tv_sec + (tv_elapsed.tv_usec / 1000000.0)));
cpgh_log_printf (CPGH_LOG_PERF, "%7.3f MB/s ",
((float)packets_recvd1) * ((float)write_size) / ((tv_elapsed.tv_sec + (tv_elapsed.tv_usec / 1000000.0)) * 1000000.0));
cpgh_log_printf (CPGH_LOG_PERF, "RTT for this size (min/avg/max) %ld/%ld/%ld\n",
interim_min_rtt, interim_avg_rtt, interim_max_rtt);
}
}
}
static void cpg_test (
cpg_handle_t handle_in,
int write_size,
int delay_time,
int print_time)
{
struct iovec iov;
unsigned int res;
alarm_notice = 0;
iov.iov_base = data;
iov.iov_len = write_size;
g_recv_count = 0;
alarm (print_time);
do {
send_counter++;
resend:
set_packet(write_size, send_counter);
res = cpg_mcast_joined (handle_in, CPG_TYPE_AGREED, &iov, 1);
if (res == CS_ERR_TRY_AGAIN) {
usleep(10000);
send_retries++;
goto resend;
}
if (res != CS_OK) {
cpgh_log_printf(CPGH_LOG_ERR, "send failed: %d\n", res);
send_fails++;
}
else {
packets_sent++;
}
usleep(delay_time*1000);
} while (alarm_notice == 0 && (res == CS_OK || res == CS_ERR_TRY_AGAIN) && stopped == 0);
if (!quiet) {
if (machine_readable) {
cpgh_log_printf(CPGH_LOG_RTT, "%d%c%ld%c%ld%c%ld\n", 0, delimiter, min_rtt, delimiter, avg_rtt, delimiter, max_rtt);
}
else {
cpgh_log_printf(CPGH_LOG_PERF, "%s: %5d message%s received, ", group_name.value, g_recv_count, g_recv_count==1?"":"s");
cpgh_log_printf(CPGH_LOG_PERF, "%5d bytes per write. ", write_size);
cpgh_log_printf(CPGH_LOG_RTT, "RTT min/avg/max: %ld/%ld/%ld\n", min_rtt, avg_rtt, max_rtt);
}
}
}
static void sigalrm_handler (int num)
{
alarm_notice = 1;
}
static void sigint_handler (int num)
{
stopped = 1;
}
static void* dispatch_thread (void *arg)
{
cpg_dispatch (handle, CS_DISPATCH_BLOCKING);
return NULL;
}
static void usage(char *cmd)
{
fprintf(stderr, "%s [OPTIONS]\n", cmd);
fprintf(stderr, "\n");
fprintf(stderr, "%s sends CPG messages to all registered users of the CPG.\n", cmd);
fprintf(stderr, "The messages have a sequence number and a CRC so that missing or\n");
fprintf(stderr, "corrupted messages will be detected and reported.\n");
fprintf(stderr, "\n");
fprintf(stderr, "%s can also be asked to simply listen for (and check) packets\n", cmd);
fprintf(stderr, "so that there is another node in the cluster connected to the CPG.\n");
fprintf(stderr, "\n");
fprintf(stderr, "Multiple copies, in different CPGs, can also be run on the same or\n");
fprintf(stderr, "different nodes by using the -n option.\n");
fprintf(stderr, "\n");
fprintf(stderr, "%s can handle more than 1 sender in the same CPG provided they are on\n", cmd);
fprintf(stderr, "different nodes.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -w<num>, --size-bytes Write size in Kbytes, default 4\n");
fprintf(stderr, " -W<num>, --size-kb Write size in bytes, default 4096\n");
fprintf(stderr, " -n<name>, --name CPG name to use, default 'cpghum'\n");
fprintf(stderr, " -M Write machine-readable results\n");
fprintf(stderr, " -D<char> Delimiter for machine-readable results (default ',')\n");
fprintf(stderr, " -E Send normal output to stderr instead of stdout\n");
fprintf(stderr, " -d<num>, --delay Delay between sending packets (mS), default 1000\n");
fprintf(stderr, " -r<num> Number of repetitions, default 100\n");
fprintf(stderr, " -p<num> Delay between printing output (seconds), default 10s\n");
fprintf(stderr, " -l, --listen Listen and check CRCs only, don't send (^C to quit)\n");
fprintf(stderr, " -t, --rtt Report Round Trip Times for each packet.\n");
fprintf(stderr, " -m<num> cpg_initialise() model. Default 1.\n");
fprintf(stderr, " -s Also send errors to syslog.\n");
fprintf(stderr, " -f, --flood Flood test CPG (cpgbench). see --flood-* long options\n");
fprintf(stderr, " -a Abort on crc/length/sequence error\n");
fprintf(stderr, " -q, --quiet Quiet. Don't print messages every 10s (see also -p)\n");
fprintf(stderr, " -qq Very quiet. Don't print stats at the end\n");
fprintf(stderr, " --flood-start=bytes Start value for --flood\n");
fprintf(stderr, " --flood-mult=value Packet size multiplier value for --flood\n");
fprintf(stderr, " --flood-max=bytes Maximum packet size for --flood\n");
fprintf(stderr, "\n");
fprintf(stderr, " values for --flood* and -W can have K or M suffixes to indicate\n");
fprintf(stderr, " Kilobytes or Megabytes\n");
fprintf(stderr, "\n");
fprintf(stderr, "%s exit code is 0 if no error happened, 1 on generic error and 2 on\n", cmd);
fprintf(stderr, "send/crc/length/sequence error");
fprintf(stderr, "\n");
}
/* Parse a size, optionally ending in 'K', 'M' */
static long parse_bytes(const char *valstring)
{
unsigned int value;
int multiplier = 1;
char suffix = '\0';
int have_suffix = 0;
/* Suffix is optional */
if (sscanf(valstring, "%u%c", &value, &suffix) == 0) {
return 0;
}
if (toupper(suffix) == 'M') {
multiplier = 1024*1024;
have_suffix = 1;
}
if (toupper(suffix) == 'K') {
multiplier = 1024;
have_suffix = 1;
}
if (!have_suffix && suffix != '\0') {
fprintf(stderr, "Invalid suffix '%c', only K or M supported\n", suffix);
return 0;
}
return value * multiplier;
}
int main (int argc, char *argv[]) {
int i;
unsigned int res;
uint32_t maxsize;
int opt;
int bs;
int write_size = 4096;
int delay_time = 1000;
int repetitions = 100;
int print_time = 10;
int have_size = 0;
int listen_only = 0;
int flood = 0;
int model = 1;
int option_index = 0;
struct option long_options[] = {
{"flood-start", required_argument, 0, 0 },
{"flood-mult", required_argument, 0, 0 },
{"flood-max", required_argument, 0, 0 },
{"size-kb", required_argument, 0, 'w' },
{"size-bytes", required_argument, 0, 'W' },
{"name", required_argument, 0, 'n' },
{"rtt", no_argument, 0, 't' },
{"flood", no_argument, 0, 'f' },
{"quiet", no_argument, 0, 'q' },
{"listen", no_argument, 0, 'l' },
{"help", no_argument, 0, '?' },
{0, 0, 0, 0 }
};
while ( (opt = getopt_long(argc, argv, "qlstafMEn:d:r:p:m:w:W:D:",
long_options, &option_index)) != -1 ) {
switch (opt) {
case 0: // Long-only options
if (strcmp(long_options[option_index].name, "flood-start") == 0) {
flood_start = parse_bytes(optarg);
if (flood_start == 0) {
fprintf(stderr, "flood-start value invalid\n");
exit(1);
}
}
if (strcmp(long_options[option_index].name, "flood-mult") == 0) {
flood_multiplier = parse_bytes(optarg);
if (flood_multiplier == 0) {
fprintf(stderr, "flood-mult value invalid\n");
exit(1);
}
}
if (strcmp(long_options[option_index].name, "flood-max") == 0) {
flood_max = parse_bytes(optarg);
if (flood_max == 0) {
fprintf(stderr, "flood-max value invalid\n");
exit(1);
}
}
break;
case 'w': // Write size in K
bs = atoi(optarg);
if (bs > 0) {
write_size = bs*1024;
have_size = 1;
}
break;
case 'W': // Write size in bytes (or with a suffix)
bs = parse_bytes(optarg);
if (bs > 0) {
write_size = bs;
have_size = 1;
}
break;
case 'n':
if (strlen(optarg) >= CPG_MAX_NAME_LENGTH) {
fprintf(stderr, "CPG name too long\n");
exit(1);
}
strcpy(group_name.value, optarg);
group_name.length = strlen(group_name.value);
break;
case 't':
report_rtt = 1;
break;
case 'E':
to_stderr = 1;
break;
case 'M':
machine_readable = 1;
break;
case 'f':
flood = 1;
break;
case 'a':
abort_on_error = 1;
break;
case 'd':
delay_time = atoi(optarg);
break;
case 'D':
delimiter = optarg[0];
break;
case 'r':
repetitions = atoi(optarg);
break;
case 'p':
print_time = atoi(optarg);
break;
case 'l':
listen_only = 1;
break;
case 's':
do_syslog = 1;
break;
case 'q':
quiet++;
break;
case 'm':
model = atoi(optarg);
if (model < 0 || model > 1) {
fprintf(stderr, "%s: Model must be 0-1\n", argv[0]);
exit(1);
}
break;
case '?':
usage(basename(argv[0]));
exit(1);
}
}
if (!have_size && flood) {
write_size = flood_start;
}
signal (SIGALRM, sigalrm_handler);
signal (SIGINT, sigint_handler);
switch (model) {
case 0:
res = cpg_initialize (&handle, &callbacks);
break;
case 1:
res = cpg_model_initialize (&handle, CPG_MODEL_V1, (cpg_model_data_t *)&model1_data, NULL);
break;
default:
res=999; // can't get here but it keeps the compiler happy
break;
}
if (res != CS_OK) {
cpgh_log_printf(CPGH_LOG_ERR, "cpg_initialize failed with result %d\n", res);
exit (1);
}
res = cpg_local_get(handle, &g_our_nodeid);
if (res != CS_OK) {
cpgh_log_printf(CPGH_LOG_ERR, "cpg_local_get failed with result %d\n", res);
exit (1);
}
pthread_create (&thread, NULL, dispatch_thread, NULL);
res = cpg_join (handle, &group_name);
if (res != CS_OK) {
cpgh_log_printf(CPGH_LOG_ERR, "cpg_join failed with result %d\n", res);
exit (1);
}
if (listen_only) {
int secs = 0;
while (!stopped) {
sleep(1);
if (++secs > print_time && !quiet) {
int nodes_printed = 0;
if (!machine_readable) {
for (i=1; i<MAX_NODEID; i++) {
if (g_recv_counter[i]) {
cpgh_log_printf(CPGH_LOG_INFO, "%s: %5d message%s of %d bytes received from node " CS_PRI_NODE_ID "\n",
group_name.value, g_recv_counter[i] - g_recv_start[i],
g_recv_counter[i]==1?"":"s",
g_recv_size[i], i);
nodes_printed++;
}
}
}
/* Separate list of nodes if more than one */
if (nodes_printed > 1) {
cpgh_log_printf(CPGH_LOG_INFO, "\n");
}
secs = 0;
}
}
}
else {
cpg_max_atomic_msgsize_get (handle, &maxsize);
if (write_size > maxsize) {
fprintf(stderr, "INFO: packet size (%d) is larger than the maximum atomic size (%d), libcpg will fragment\n",
write_size, maxsize);
}
/* The main job starts here */
if (flood) {
for (i = 0; i < 10; i++) { /* number of repetitions - up to 50k */
cpg_flood (handle, write_size);
signal (SIGALRM, sigalrm_handler);
write_size *= flood_multiplier;
if (write_size > flood_max) {
break;
}
}
}
else {
send_counter = -1; /* So we start from zero to allow listeners to sync */
for (i = 0; i < repetitions && !stopped; i++) {
cpg_test (handle, write_size, delay_time, print_time);
signal (SIGALRM, sigalrm_handler);
}
}
}
res = cpg_finalize (handle);
if (res != CS_OK) {
cpgh_log_printf(CPGH_LOG_ERR, "cpg_finalize failed with result %d\n", res);
exit (1);
}
if (quiet < 2) {
/* Don't print LONG_MAX for min_rtt if we don't have a value */
if (min_rtt == LONG_MAX) {
min_rtt = 0L;
}
if (machine_readable) {
cpgh_log_printf(CPGH_LOG_STATS, "%d%c%d%c%d%c%d%c%d%c%d%c%d%c%ld%c%ld%c%ld\n",
packets_sent, delimiter,
send_fails, delimiter,
send_retries, delimiter,
length_errors, delimiter,
packets_recvd, delimiter,
sequence_errors, delimiter,
crc_errors, delimiter,
min_rtt, delimiter,
avg_rtt, delimiter,
max_rtt);
}
else {
cpgh_log_printf(CPGH_LOG_STATS, "\n");
cpgh_log_printf(CPGH_LOG_STATS, "Stats:\n");
if (!listen_only) {
cpgh_log_printf(CPGH_LOG_STATS, " packets sent: %d\n", packets_sent);
cpgh_log_printf(CPGH_LOG_STATS, " send failures: %d\n", send_fails);
cpgh_log_printf(CPGH_LOG_STATS, " send retries: %d\n", send_retries);
}
cpgh_log_printf(CPGH_LOG_STATS, " length errors: %d\n", length_errors);
cpgh_log_printf(CPGH_LOG_STATS, " packets recvd: %d\n", packets_recvd);
cpgh_log_printf(CPGH_LOG_STATS, " sequence errors: %d\n", sequence_errors);
cpgh_log_printf(CPGH_LOG_STATS, " crc errors: %d\n", crc_errors);
if (!listen_only) {
cpgh_log_printf(CPGH_LOG_STATS, " min RTT: %ld\n", min_rtt);
cpgh_log_printf(CPGH_LOG_STATS, " max RTT: %ld\n", max_rtt);
cpgh_log_printf(CPGH_LOG_STATS, " avg RTT: %ld\n", avg_rtt);
}
cpgh_log_printf(CPGH_LOG_STATS, "\n");
}
}
res = 0;
if (send_fails > 0 || (have_size && length_errors > 0) || sequence_errors > 0 || crc_errors > 0) {
res = 2;
}
return (res);
}
| 10,990 |
16,461 | // Copyright 2019-present 650 Industries. All rights reserved.
#import <ExpoModulesCore/EXExportedModule.h>
#import <ExpoModulesCore/EXModuleRegistryConsumer.h>
#import <Foundation/Foundation.h>
@interface EXFirebaseAnalytics : EXExportedModule <EXModuleRegistryConsumer>
@end
| 86 |
471 | <reponame>madanagopaltcomcast/pxCore
/////////////////////////////////////////////////////////////////////////////
// Name: src/os2/palette.cpp
// Purpose: wxPalette
// Author: AUTHOR
// Modified by:
// Created: ??/??/98
// Copyright: (c) AUTHOR
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#ifndef WX_PRECOMP
#include <stdio.h>
#include "wx/string.h"
#include "wx/os2/private.h"
#include "wx/palette.h"
#include "wx/app.h"
#endif
#define INCL_PM
#define INCL_GPI
#include "assert.h"
IMPLEMENT_DYNAMIC_CLASS(wxPalette, wxGDIObject)
/*
* Palette
*
*/
wxPaletteRefData::wxPaletteRefData()
{
m_hPalette = NULLHANDLE;
m_hPS = NULLHANDLE;
} // end of wxPaletteRefData::wxPaletteRefData
wxPaletteRefData::~wxPaletteRefData()
{
if ( m_hPalette )
return;
} // end of wxPaletteRefData::~wxPaletteRefData
wxPalette::wxPalette()
{
} // end of wxPalette::wxPalette
wxPalette::wxPalette(
int n
, const unsigned char* pRed
, const unsigned char* pGreen
, const unsigned char* pBlue
)
{
Create( n
,pRed
,pGreen
,pBlue
);
} // end of wxPalette::wxPalette
wxPalette::~wxPalette()
{
} // end of wxPalette::~wxPalette
bool wxPalette::FreeResource( bool WXUNUSED(bForce) )
{
if ( M_PALETTEDATA && M_PALETTEDATA->m_hPalette)
{
::GpiSelectPalette(M_PALETTEDATA->m_hPS, NULLHANDLE);
::GpiDeletePalette((HPAL)M_PALETTEDATA->m_hPalette);
}
return true;
} // end of wxPalette::FreeResource
bool wxPalette::Create( int n,
const unsigned char* pRed,
const unsigned char* pGreen,
const unsigned char* pBlue )
{
PULONG pualTable;
UnRef();
m_refData = new wxPaletteRefData;
pualTable = new ULONG[n];
if (!pualTable)
return false;
for (int i = 0; i < n; i ++)
{
pualTable[i] = (PC_RESERVED * 16777216) + ((int)pRed[i] * 65536) + ((int)pGreen[i] * 256) + (int)pBlue[i];
}
M_PALETTEDATA->m_hPalette = (WXHPALETTE)::GpiCreatePalette( vHabmain
,LCOL_PURECOLOR
,LCOLF_CONSECRGB
,(LONG)n
,pualTable
);
delete [] pualTable;
return true;
} // end of wxPalette::Create
wxGDIRefData *wxPalette::CreateGDIRefData() const
{
return new wxPaletteRefData;
}
wxGDIRefData *wxPalette::CloneGDIRefData(const wxGDIRefData *data) const
{
return new wxPaletteRefData(*static_cast<const wxPaletteRefData *>(data));
}
int wxPalette::GetPixel( unsigned char cRed,
unsigned char cGreen,
unsigned char cBlue) const
{
bool bFound = false;
PULONG pualTable = NULL;
ULONG ulNumEntries;
ULONG ulRGB = (PC_RESERVED * 16777216) +
((int)cRed * 65536) +
((int)cGreen * 256) +
(int)cBlue;
if (!m_refData)
return wxNOT_FOUND;
//
// Get number of entries first
//
ulNumEntries = ::GpiQueryPaletteInfo( M_PALETTEDATA->m_hPalette
,M_PALETTEDATA->m_hPS
,0 // No options
,0 // No start index
,0 // Force return of number entries
,NULL // No array
);
pualTable = new ULONG[ulNumEntries];
//
// Now get the entries
//
ulNumEntries = ::GpiQueryPaletteInfo( M_PALETTEDATA->m_hPalette
,M_PALETTEDATA->m_hPS
,0 // No options
,0 // start at 0
,ulNumEntries // Force return of number entries
,pualTable // Palette entry array with RGB values
);
//
// Now loop through and find the matching entry
//
ULONG i;
for (i = 0; i < ulNumEntries; i++)
{
if (pualTable[i] == ulRGB)
{
bFound = true;
break;
}
}
if (!bFound)
return wxNOT_FOUND;
return (i + 1);
} // end of wxPalette::GetPixel
bool wxPalette::GetRGB( int nIndex,
unsigned char* pRed,
unsigned char* pGreen,
unsigned char* pBlue) const
{
PULONG pualTable = NULL;
RGB2 vRGB;
ULONG ulNumEntries;
if (!m_refData)
return false;
if (nIndex < 0 || nIndex > 255)
return false;
//
// Get number of entries first
//
ulNumEntries = ::GpiQueryPaletteInfo( M_PALETTEDATA->m_hPalette
,M_PALETTEDATA->m_hPS
,0 // No options
,0 // No start index
,0 // Force return of number entries
,NULL // No array
);
pualTable = new ULONG[ulNumEntries];
//
// Now get the entries
//
ulNumEntries = ::GpiQueryPaletteInfo( M_PALETTEDATA->m_hPalette
,M_PALETTEDATA->m_hPS
,0 // No options
,0 // start at 0
,ulNumEntries // Force return of number entries
,pualTable // Palette entry array with RGB values
);
memcpy(&vRGB, &pualTable[nIndex], sizeof(RGB2));
*pBlue = vRGB.bBlue;
*pGreen = vRGB.bGreen;
*pRed = vRGB.bRed;
return true;
} // end of wxPalette::GetRGB
void wxPalette::SetHPALETTE(
WXHPALETTE hPal
)
{
if ( !m_refData )
m_refData = new wxPaletteRefData;
M_PALETTEDATA->m_hPalette = hPal;
} // end of wxPalette::SetHPALETTE
void wxPalette::SetPS(
HPS hPS
)
{
if ( !m_refData )
m_refData = new wxPaletteRefData;
::GpiSelectPalette(M_PALETTEDATA->m_hPS, M_PALETTEDATA->m_hPalette);
M_PALETTEDATA->m_hPS = hPS;
} // end of wxPalette::SetHPALETTE
| 4,294 |
3,200 | <gh_stars>1000+
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import EXCLUDE, fields
from polyaxon.deploy.schemas.celery import CelerySchema
from polyaxon.k8s import k8s_schemas
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.swagger import SwaggerField
class ServiceSchema(BaseCamelSchema):
enabled = fields.Bool(allow_none=True)
image = fields.Str(allow_none=True)
image_tag = fields.Str(allow_none=True)
image_pull_policy = fields.Str(allow_none=True)
replicas = fields.Int(allow_none=True)
concurrency = fields.Int(allow_none=True)
resources = SwaggerField(cls=k8s_schemas.V1ResourceRequirements, allow_none=True)
class Meta:
unknown = EXCLUDE
@staticmethod
def schema_config():
return Service
class Service(BaseConfig):
SCHEMA = ServiceSchema
REDUCED_ATTRIBUTES = [
"enabled",
"image",
"imageTag",
"imagePullPolicy",
"replicas",
"concurrency",
"resources",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
):
self.enabled = enabled
self.image = image
self.image_tag = image_tag
self.image_pull_policy = image_pull_policy
self.replicas = replicas
self.concurrency = concurrency
self.resources = resources
class WorkerServiceSchema(ServiceSchema):
celery = fields.Nested(CelerySchema, allow_none=True)
@staticmethod
def schema_config():
return WorkerServiceConfig
class WorkerServiceConfig(Service):
SCHEMA = WorkerServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + ["celery"]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
celery=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.celery = celery
class HelperServiceSchema(ServiceSchema):
sleep_interval = fields.Int(allow_none=True)
sync_interval = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return HelperServiceConfig
class HelperServiceConfig(Service):
SCHEMA = HelperServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + [
"sleepInterval",
"syncInterval",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
sleep_interval=None,
sync_interval=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.sleep_interval = sleep_interval
self.sync_interval = sync_interval
class AgentServiceSchema(ServiceSchema):
instance = fields.String(allow_none=True)
token = fields.String(allow_none=True)
is_replica = fields.Bool(allow_none=True)
compressed_logs = fields.Bool(allow_none=True)
@staticmethod
def schema_config():
return AgentServiceConfig
class AgentServiceConfig(Service):
SCHEMA = AgentServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + [
"instance",
"token",
"isReplica",
"compressedLogs",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
instance=None,
token=None,
is_replica=None,
compressed_logs=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.instance = instance
self.token = token
self.is_replica = is_replica
self.compressed_logs = compressed_logs
class OperatorServiceSchema(ServiceSchema):
skip_crd = fields.Bool(allow_none=True, data_key="skipCRD")
use_crd_v1beta1 = fields.Bool(allow_none=True, data_key="useCRDV1Beta1")
@staticmethod
def schema_config():
return OperatorServiceConfig
class OperatorServiceConfig(Service):
SCHEMA = OperatorServiceSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + ["skipCRD", "useCRDV1Beta1"]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
skip_crd=None,
use_crd_v1beta1=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.skip_crd = skip_crd
self.use_crd_v1beta1 = use_crd_v1beta1
class ApiServiceSchema(ServiceSchema):
service = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return ApiServiceConfig
class ApiServiceConfig(Service):
SCHEMA = ApiServiceSchema
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
service=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.service = service
class HooksSchema(ServiceSchema):
load_fixtures = fields.Bool(allow_none=True)
@staticmethod
def schema_config():
return HooksConfig
class HooksConfig(Service):
SCHEMA = HooksSchema
REDUCED_ATTRIBUTES = Service.REDUCED_ATTRIBUTES + ["loadFixtures"]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
concurrency=None,
resources=None,
load_fixtures=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
concurrency=concurrency,
resources=resources,
)
self.load_fixtures = load_fixtures
class ThirdPartyServiceSchema(ServiceSchema):
enabled = fields.Bool(allow_none=True)
persistence = fields.Dict(allow_none=True)
node_selector = fields.Dict(allow_none=True)
affinity = fields.Dict(allow_none=True)
tolerations = fields.List(fields.Dict(allow_none=True), allow_none=True)
@staticmethod
def schema_config():
return ThirdPartyService
class ThirdPartyService(Service):
SCHEMA = ThirdPartyServiceSchema
REDUCED_ATTRIBUTES = [
"enabled",
"image",
"imageTag",
"imagePullPolicy",
"replicas",
"concurrency",
"resources",
"persistence",
"nodeSelector",
"affinity",
"tolerations",
]
def __init__(
self,
enabled=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
)
self.enabled = enabled
self.persistence = persistence
self.node_selector = node_selector
self.affinity = affinity
self.tolerations = tolerations
class PostgresqlSchema(ThirdPartyServiceSchema):
postgres_user = fields.Str(allow_none=True)
postgres_password = fields.Str(allow_none=True)
postgres_database = fields.Str(allow_none=True)
conn_max_age = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return PostgresqlConfig
class PostgresqlConfig(ThirdPartyService):
SCHEMA = PostgresqlSchema
REDUCED_ATTRIBUTES = ThirdPartyService.REDUCED_ATTRIBUTES + [
"postgresUser",
"postgresPassword",
"postgresDatabase",
"connMaxAge",
]
def __init__(
self,
enabled=None,
postgres_user=None,
postgres_password=<PASSWORD>,
postgres_database=None,
conn_max_age=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
persistence=persistence,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
)
self.postgres_user = postgres_user
self.postgres_password = <PASSWORD>
self.postgres_database = postgres_database
self.conn_max_age = conn_max_age
class RedisSchema(ThirdPartyServiceSchema):
image = fields.Raw(allow_none=True)
non_broker = fields.Bool(allow_none=True)
use_password = fields.Bool(allow_none=True)
password = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return RedisConfig
class RedisConfig(ThirdPartyService):
SCHEMA = RedisSchema
REDUCED_ATTRIBUTES = ThirdPartyService.REDUCED_ATTRIBUTES + [
"nonBroker",
"usePassword",
"password",
]
def __init__(
self,
enabled=None,
non_broker=None,
use_password=<PASSWORD>,
password=<PASSWORD>,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
persistence=persistence,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
)
self.non_broker = non_broker
self.use_password = <PASSWORD>
self.password = password
class RabbitmqSchema(ThirdPartyServiceSchema):
rabbitmq_username = fields.Str(allow_none=True)
rabbitmq_password = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return RabbitmqConfig
class RabbitmqConfig(ThirdPartyService):
SCHEMA = RabbitmqSchema
REDUCED_ATTRIBUTES = ThirdPartyService.REDUCED_ATTRIBUTES + [
"rabbitmqUsername",
"rabbitmqPassword",
]
def __init__(
self,
enabled=None,
rabbitmq_username=None,
rabbitmq_password=None,
image=None,
image_tag=None,
image_pull_policy=None,
replicas=None,
resources=None,
persistence=None,
node_selector=None,
affinity=None,
tolerations=None,
):
super().__init__(
enabled=enabled,
image=image,
image_tag=image_tag,
image_pull_policy=image_pull_policy,
replicas=replicas,
resources=resources,
persistence=persistence,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
)
self.rabbitmq_username = rabbitmq_username
self.rabbitmq_password = <PASSWORD>
class ExternalServiceSchema(BaseCamelSchema):
user = fields.Str(allow_none=True)
password = fields.Str(allow_none=True)
host = fields.Str(allow_none=True)
port = fields.Int(allow_none=True)
database = fields.Str(allow_none=True)
use_password = fields.Bool(allow_none=True)
conn_max_age = fields.Int(allow_none=True)
pgbouncer = fields.Dict(allow_none=True)
options = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return ExternalService
class ExternalService(BaseConfig):
SCHEMA = ExternalServiceSchema
REDUCED_ATTRIBUTES = [
"user",
"password",
"host",
"port",
"database",
"usePassword",
"connMaxAge",
"pgbouncer",
"options",
]
def __init__(
self,
user=None,
password=None,
host=None,
port=None,
database=None,
use_password=None,
conn_max_age=None,
pgbouncer=None,
options=None,
):
self.user = user
self.password = password
self.host = host
self.port = port
self.database = database
self.use_password = use_password
self.conn_max_age = conn_max_age
self.pgbouncer = pgbouncer
self.options = options
class ExternalBackendSchema(BaseCamelSchema):
enabled = fields.Bool(allow_none=True)
backend = fields.Str(allow_none=True)
options = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return ExternalBackend
class ExternalBackend(BaseConfig):
SCHEMA = ExternalBackendSchema
REDUCED_ATTRIBUTES = [
"enabled",
"backend",
"options",
]
def __init__(
self,
enabled=None,
backend=None,
options=None,
):
self.enabled = enabled
self.backend = backend
self.options = options
class AuthServicesSchema(BaseCamelSchema):
github = fields.Nested(ExternalBackendSchema, allow_none=True)
gitlab = fields.Nested(ExternalBackendSchema, allow_none=True)
bitbucket = fields.Nested(ExternalBackendSchema, allow_none=True)
google = fields.Nested(ExternalBackendSchema, allow_none=True)
saml = fields.Nested(ExternalBackendSchema, allow_none=True)
@staticmethod
def schema_config():
return AuthServicesConfig
class AuthServicesConfig(BaseConfig):
SCHEMA = AuthServicesSchema
REDUCED_ATTRIBUTES = [
"github",
"gitlab",
"bitbucket",
"google",
"saml",
]
def __init__(
self,
github=None,
gitlab=None,
bitbucket=None,
google=None,
saml=None,
):
self.github = github
self.gitlab = gitlab
self.bitbucket = bitbucket
self.google = google
self.saml = saml
class ExternalServicesSchema(BaseCamelSchema):
redis = fields.Nested(ExternalServiceSchema, allow_none=True)
rabbitmq = fields.Nested(ExternalServiceSchema, allow_none=True)
postgresql = fields.Nested(ExternalServiceSchema, allow_none=True)
gateway = fields.Nested(ExternalServiceSchema, allow_none=True)
api = fields.Nested(ExternalServiceSchema, allow_none=True)
transactions = fields.Nested(ExternalBackendSchema, allow_none=True)
analytics = fields.Nested(ExternalBackendSchema, allow_none=True)
metrics = fields.Nested(ExternalBackendSchema, allow_none=True)
errors = fields.Nested(ExternalBackendSchema, allow_none=True)
auth = fields.Nested(AuthServicesSchema, allow_none=True)
allowed_versions = fields.List(fields.Str(), allow_none=True)
@staticmethod
def schema_config():
return ExternalServicesConfig
class ExternalServicesConfig(BaseConfig):
SCHEMA = ExternalServicesSchema
REDUCED_ATTRIBUTES = [
"redis",
"rabbitmq",
"postgresql",
"gateway",
"api",
"transactions",
"analytics",
"metrics",
"errors",
"auth",
"allowedVersions",
]
def __init__(
self,
redis=None,
rabbitmq=None,
postgresql=None,
gateway=None,
api=None,
transactions=None,
analytics=None,
metrics=None,
errors=None,
auth=None,
allowed_versions=None,
):
self.redis = redis
self.rabbitmq = rabbitmq
self.postgresql = postgresql
self.gateway = gateway
self.api = api
self.transactions = transactions
self.analytics = analytics
self.metrics = metrics
self.errors = errors
self.auth = auth
self.allowed_versions = allowed_versions
| 8,381 |
1,603 | <gh_stars>1000+
package com.linkedin.metadata.entity;
import com.linkedin.common.urn.Urn;
import com.linkedin.data.template.DataTemplateUtil;
import com.linkedin.data.template.RecordTemplate;
import com.linkedin.identity.CorpUserInfo;
import com.linkedin.metadata.EbeanTestUtils;
import com.linkedin.metadata.entity.ebean.EbeanAspectDao;
import com.linkedin.metadata.entity.ebean.EbeanRetentionService;
import com.linkedin.metadata.event.EventProducer;
import com.linkedin.metadata.key.CorpUserKey;
import com.linkedin.metadata.models.registry.EntityRegistryException;
import com.linkedin.metadata.query.ListUrnsResult;
import com.linkedin.metadata.utils.PegasusUtils;
import com.linkedin.mxe.SystemMetadata;
import io.ebean.EbeanServer;
import io.ebean.Transaction;
import io.ebean.TxScope;
import io.ebean.annotation.TxIsolation;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import static org.mockito.Mockito.mock;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
/**
* A class that knows how to configure {@link EntityServiceTest} to run integration tests against a relational database.
*
* This class also contains all the test methods where realities of an underlying storage leak into the
* {@link EntityService} in the form of subtle behavior differences. Ideally that should never happen, and it'd be
* great to address captured differences.
*/
public class EbeanEntityServiceTest extends EntityServiceTest<EbeanAspectDao, EbeanRetentionService> {
public EbeanEntityServiceTest() throws EntityRegistryException {
}
@BeforeMethod
public void setupTest() {
EbeanServer server = EbeanTestUtils.createTestServer();
_mockProducer = mock(EventProducer.class);
_aspectDao = new EbeanAspectDao(server);
_aspectDao.setConnectionValidated(true);
_entityService = new EntityService(_aspectDao, _mockProducer, _testEntityRegistry);
_retentionService = new EbeanRetentionService(_entityService, server, 1000);
_entityService.setRetentionService(_retentionService);
}
/**
* Ideally, all tests would be in the base class, so they're reused between all implementations.
* When that's the case - test runner will ignore this class (and its base!) so we keep this dummy test
* to make sure this class will always be discovered.
*/
@Test
public void obligatoryTest() throws Exception {
Assert.assertTrue(true);
}
@Override
@Test
public void testIngestListLatestAspects() throws Exception {
// TODO: If you're modifying this test - match your changes in sibling implementations.
// TODO: Move this test into the base class,
// If you can find a way for Cassandra and relational databases to share result ordering rules.
Urn entityUrn1 = Urn.createFromString("urn:li:corpuser:test1");
Urn entityUrn2 = Urn.createFromString("urn:li:corpuser:test2");
Urn entityUrn3 = Urn.createFromString("urn:li:corpuser:test3");
SystemMetadata metadata1 = new SystemMetadata();
metadata1.setLastObserved(1625792689);
metadata1.setRunId("run-123");
String aspectName = PegasusUtils.getAspectNameFromSchema(new CorpUserInfo().schema());
// Ingest CorpUserInfo Aspect #1
CorpUserInfo writeAspect1 = createCorpUserInfo("<EMAIL>");
_entityService.ingestAspect(entityUrn1, aspectName, writeAspect1, TEST_AUDIT_STAMP, metadata1);
// Ingest CorpUserInfo Aspect #2
CorpUserInfo writeAspect2 = createCorpUserInfo("<EMAIL>");
_entityService.ingestAspect(entityUrn2, aspectName, writeAspect2, TEST_AUDIT_STAMP, metadata1);
// Ingest CorpUserInfo Aspect #3
CorpUserInfo writeAspect3 = createCorpUserInfo("<EMAIL>");
_entityService.ingestAspect(entityUrn3, aspectName, writeAspect3, TEST_AUDIT_STAMP, metadata1);
// List aspects
ListResult<RecordTemplate> batch1 = _entityService.listLatestAspects(entityUrn1.getEntityType(), aspectName, 0, 2);
assertEquals(batch1.getNextStart(), 2);
assertEquals(batch1.getPageSize(), 2);
assertEquals(batch1.getTotalCount(), 3);
assertEquals(batch1.getTotalPageCount(), 2);
assertEquals(batch1.getValues().size(), 2);
assertTrue(DataTemplateUtil.areEqual(writeAspect1, batch1.getValues().get(0)));
assertTrue(DataTemplateUtil.areEqual(writeAspect2, batch1.getValues().get(1)));
ListResult<RecordTemplate> batch2 = _entityService.listLatestAspects(entityUrn1.getEntityType(), aspectName, 2, 2);
assertEquals(batch2.getValues().size(), 1);
assertTrue(DataTemplateUtil.areEqual(writeAspect3, batch2.getValues().get(0)));
}
@Override
@Test
public void testIngestListUrns() throws Exception {
// TODO: If you're modifying this test - match your changes in sibling implementations.
// TODO: Move this test into the base class,
// If you can find a way for Cassandra and relational databases to share result ordering rules.
Urn entityUrn1 = Urn.createFromString("urn:li:corpuser:test1");
Urn entityUrn2 = Urn.createFromString("urn:li:corpuser:test2");
Urn entityUrn3 = Urn.createFromString("urn:li:corpuser:test3");
SystemMetadata metadata1 = new SystemMetadata();
metadata1.setLastObserved(1625792689);
metadata1.setRunId("run-123");
String aspectName = PegasusUtils.getAspectNameFromSchema(new CorpUserKey().schema());
// Ingest CorpUserInfo Aspect #1
RecordTemplate writeAspect1 = createCorpUserKey(entityUrn1);
_entityService.ingestAspect(entityUrn1, aspectName, writeAspect1, TEST_AUDIT_STAMP, metadata1);
// Ingest CorpUserInfo Aspect #2
RecordTemplate writeAspect2 = createCorpUserKey(entityUrn2);
_entityService.ingestAspect(entityUrn2, aspectName, writeAspect2, TEST_AUDIT_STAMP, metadata1);
// Ingest CorpUserInfo Aspect #3
RecordTemplate writeAspect3 = createCorpUserKey(entityUrn3);
_entityService.ingestAspect(entityUrn3, aspectName, writeAspect3, TEST_AUDIT_STAMP, metadata1);
// List aspects urns
ListUrnsResult batch1 = _entityService.listUrns(entityUrn1.getEntityType(), 0, 2);
assertEquals((int) batch1.getStart(), 0);
assertEquals((int) batch1.getCount(), 2);
assertEquals((int) batch1.getTotal(), 3);
assertEquals(batch1.getEntities().size(), 2);
assertEquals(entityUrn1.toString(), batch1.getEntities().get(0).toString());
assertEquals(entityUrn2.toString(), batch1.getEntities().get(1).toString());
ListUrnsResult batch2 = _entityService.listUrns(entityUrn1.getEntityType(), 2, 2);
assertEquals((int) batch2.getStart(), 2);
assertEquals((int) batch2.getCount(), 1);
assertEquals((int) batch2.getTotal(), 3);
assertEquals(batch2.getEntities().size(), 1);
assertEquals(entityUrn3.toString(), batch2.getEntities().get(0).toString());
}
@Override
@Test
public void testNestedTransactions() throws Exception {
EbeanServer server = _aspectDao.getServer();
try (Transaction transaction = server.beginTransaction(TxScope.requiresNew()
.setIsolation(TxIsolation.REPEATABLE_READ))) {
transaction.setBatchMode(true);
// Work 1
try (Transaction transaction2 = server.beginTransaction(TxScope.requiresNew()
.setIsolation(TxIsolation.REPEATABLE_READ))) {
transaction2.setBatchMode(true);
// Work 2
transaction2.commit();
}
transaction.commit();
} catch (Exception e) {
System.out.printf("Top level catch %s%n", e);
e.printStackTrace();
throw e;
}
System.out.println("done");
}
}
| 2,566 |
1,347 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class RKDLoss(nn.Module):
"""Relational Knowledge Disitllation, CVPR2019"""
def __init__(self, w_d=25, w_a=50):
super(RKDLoss, self).__init__()
self.w_d = w_d
self.w_a = w_a
def forward(self, f_s, f_t):
student = f_s.view(f_s.shape[0], -1)
teacher = f_t.view(f_t.shape[0], -1)
# RKD distance loss
with torch.no_grad():
t_d = self.pdist(teacher, squared=False)
mean_td = t_d[t_d > 0].mean()
t_d = t_d / mean_td
d = self.pdist(student, squared=False)
mean_d = d[d > 0].mean()
d = d / mean_d
loss_d = F.smooth_l1_loss(d, t_d)
# RKD Angle loss
with torch.no_grad():
td = (teacher.unsqueeze(0) - teacher.unsqueeze(1))
norm_td = F.normalize(td, p=2, dim=2)
t_angle = torch.bmm(norm_td, norm_td.transpose(1, 2)).view(-1)
sd = (student.unsqueeze(0) - student.unsqueeze(1))
norm_sd = F.normalize(sd, p=2, dim=2)
s_angle = torch.bmm(norm_sd, norm_sd.transpose(1, 2)).view(-1)
loss_a = F.smooth_l1_loss(s_angle, t_angle)
loss = self.w_d * loss_d + self.w_a * loss_a
return loss
@staticmethod
def pdist(e, squared=False, eps=1e-12):
e_square = e.pow(2).sum(dim=1)
prod = e @ e.t()
res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min=eps)
if not squared:
res = res.sqrt()
res = res.clone()
res[range(len(e)), range(len(e))] = 0
return res
| 893 |
3,640 | <reponame>bTest2018/Store
package com.nytimes.android.external.store3.base;
import javax.annotation.Nonnull;
import io.reactivex.Maybe;
public interface DiskRead<Raw, Key> {
@Nonnull
Maybe<Raw> read(@Nonnull Key key);
}
| 88 |
5,766 | //
// TextConverter.cpp
//
// Library: Foundation
// Package: Text
// Module: TextConverter
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/TextConverter.h"
#include "Poco/TextIterator.h"
#include "Poco/TextEncoding.h"
namespace {
int nullTransform(int ch)
{
return ch;
}
}
namespace Poco {
TextConverter::TextConverter(const TextEncoding& inEncoding, const TextEncoding& outEncoding, int defaultChar):
_inEncoding(inEncoding),
_outEncoding(outEncoding),
_defaultChar(defaultChar)
{
}
TextConverter::~TextConverter()
{
}
int TextConverter::convert(const std::string& source, std::string& destination, Transform trans)
{
int errors = 0;
TextIterator it(source, _inEncoding);
TextIterator end(source);
unsigned char buffer[TextEncoding::MAX_SEQUENCE_LENGTH];
while (it != end)
{
int c = *it;
if (c == -1) { ++errors; c = _defaultChar; }
c = trans(c);
int n = _outEncoding.convert(c, buffer, sizeof(buffer));
if (n == 0) n = _outEncoding.convert(_defaultChar, buffer, sizeof(buffer));
poco_assert (n <= sizeof(buffer));
destination.append((const char*) buffer, n);
++it;
}
return errors;
}
int TextConverter::convert(const void* source, int length, std::string& destination, Transform trans)
{
poco_check_ptr (source);
int errors = 0;
const unsigned char* it = (const unsigned char*) source;
const unsigned char* end = (const unsigned char*) source + length;
unsigned char buffer[TextEncoding::MAX_SEQUENCE_LENGTH];
while (it < end)
{
int n = _inEncoding.queryConvert(it, 1);
int uc;
int read = 1;
while (-1 > n && (end - it) >= -n)
{
read = -n;
n = _inEncoding.queryConvert(it, read);
}
if (-1 > n)
{
it = end;
}
else
{
it += read;
}
if (-1 >= n)
{
uc = _defaultChar;
++errors;
}
else
{
uc = n;
}
uc = trans(uc);
n = _outEncoding.convert(uc, buffer, sizeof(buffer));
if (n == 0) n = _outEncoding.convert(_defaultChar, buffer, sizeof(buffer));
poco_assert (n <= sizeof(buffer));
destination.append((const char*) buffer, n);
}
return errors;
}
int TextConverter::convert(const std::string& source, std::string& destination)
{
return convert(source, destination, nullTransform);
}
int TextConverter::convert(const void* source, int length, std::string& destination)
{
return convert(source, length, destination, nullTransform);
}
} // namespace Poco
| 949 |
576 | <reponame>kovzol/CindyJS
package cindyjs;
import com.google.gwt.core.client.JavaScriptObject;
import com.google.gwt.core.client.JsArray;
public class CjsValue extends JavaScriptObject {
protected CjsValue() {
}
public final native String ctype() /*-{
return this.ctype;
}-*/;
public final native boolean isList() /*-{
return this.ctype === "list";
}-*/;
public final native boolean isNumber() /*-{
return this.ctype === "number";
}-*/;
public final native JsArray<CjsValue> listValue() /*-{
return this.ctype === "list" ? this.value : null;
}-*/;
public final native double realValue() /*-{
return this.ctype === "number" ? this.value.real : NaN;
}-*/;
public static native CjsValue makeNumber(double real, double imag) /*-{
return {ctype: "number", value: {real: real, imag: imag}};
}-*/;
public static native CjsValue makeNumber(double real) /*-{
return {ctype: "number", value: {real: real, imag: 0}};
}-*/;
public static native CjsValue makeList(JsArray<CjsValue> value) /*-{
return {ctype: "list", value: value};
}-*/;
}
| 466 |
592 | """user roles
Revision ID: f<PASSWORD>
Revises: <PASSWORD>
Create Date: 2020-06-12 21:54:20.905401
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "f<PASSWORD>"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"role",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("date_created", sa.DateTime(), nullable=True),
sa.Column("date_modified", sa.DateTime(), nullable=True),
sa.Column("name", sa.String(length=256), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"user_role",
sa.Column("role_id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["role_id"],
["role.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("role_id", "user_id"),
)
op.alter_column(
"user", "email", existing_type=mysql.VARCHAR(length=256), nullable=False
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"user", "email", existing_type=mysql.VARCHAR(length=256), nullable=True
)
op.drop_table("user_role")
op.drop_table("role")
# ### end Alembic commands ###
| 701 |
348 | {"nom":"Gordes","circ":"5ème circonscription","dpt":"Vaucluse","inscrits":1689,"abs":673,"votants":1016,"blancs":23,"nuls":4,"exp":989,"res":[{"nuance":"REM","nom":"<NAME>","voix":375},{"nuance":"LR","nom":"M. <NAME>","voix":301},{"nuance":"FN","nom":"Mme <NAME>","voix":155},{"nuance":"FI","nom":"M. <NAME>","voix":59},{"nuance":"ECO","nom":"Mme <NAME>","voix":27},{"nuance":"DLF","nom":"Mme <NAME>","voix":20},{"nuance":"ECO","nom":"Mme <NAME>","voix":15},{"nuance":"RDG","nom":"<NAME>","voix":11},{"nuance":"DVD","nom":"M. <NAME>","voix":8},{"nuance":"DIV","nom":"Mme <NAME>","voix":6},{"nuance":"DVD","nom":"Mme <NAME>","voix":6},{"nuance":"DVG","nom":"<NAME>","voix":3},{"nuance":"EXG","nom":"M. <NAME>","voix":3}]} | 292 |
306 | <gh_stars>100-1000
import tempfile
from pathlib import Path
import imageio.v2 as iio
import numpy
import pytest
from scipy import sparse
import matplotx
def test_show():
M = sparse.rand(20, 20, density=0.1)
plt = matplotx.spy(M)
plt.show()
@pytest.mark.parametrize(
"ref, kwargs",
[
(6875310, {}),
(7524085, {"border_width": 1}),
(21306270, {"border_width": 1, "border_color": "red"}),
(4981037, {"colormap": "viridis"}),
(7101351, {"colormap": "viridis", "border_width": 1}),
],
)
def test_png(ref, kwargs):
M = sparse.rand(20, 30, density=0.1, random_state=123)
numpy.random.seed(123)
with tempfile.TemporaryDirectory() as temp_dir:
filepath = Path(temp_dir) / "test.png"
matplotx.spy(M, filename=filepath, **kwargs)
im = iio.imread(filepath)
y = numpy.random.randint(0, 100, size=numpy.prod(im.shape))
assert numpy.dot(y, im.flatten()) == ref
def test_readme_images():
import meshzoo
from skfem import BilinearForm, ElementTriP2, InteriorBasis, MeshTri
from skfem.helpers import dot, grad
@BilinearForm
def laplace(u, v, _):
return dot(grad(u), grad(v))
points, cells = meshzoo.rectangle_tri((-1.0, 1.0, 20), (-1.0, 1.0, 20))
mesh = MeshTri(points.T, cells.T)
basis = InteriorBasis(mesh, ElementTriP2())
A = laplace.assemble(basis)
with tempfile.TemporaryDirectory() as temp_dir:
filepath = Path(temp_dir) / "test.png"
matplotx.spy(A, border_width=2, filename=filepath)
# betterspy.write_png(
# 'ATA.png', M, border_width=2,
# colormap='viridis'
# )
def test_cli():
this_dir = Path(__file__).resolve().parent
mmfile = this_dir / "data" / "gre_343_343_crg.mm"
matplotx.cli(["spy", mmfile.as_posix()])
matplotx.cli(["spy", mmfile.as_posix(), "out.png"])
if __name__ == "__main__":
test_readme_images()
| 896 |
1,587 | <gh_stars>1000+
// use every strict math function
package classes.test;
import java.util.Arrays;
import java.lang.StrictMath;
public class StrictMathTest {
private static void results(String name, int[] results) {
System.out.print(name + ": ");
System.out.println(Arrays.toString(results));
}
private static void results(String name, long[] results) {
System.out.print(name + ": ");
System.out.println(Arrays.toString(results));
}
private static void results(String name, float[] results) {
System.out.print(name + ": ");
System.out.println(Arrays.toString(results));
}
private static void results(String name, double[] results) {
System.out.print(name + ": ");
boolean first = true;
String result = "";
for (double d : results) {
if (first)
first = false;
else
result += " ";
// paper over precision issues in Chrome, see #181
result += String.format("%.13g", d);
}
System.out.println(result);
}
public static void main(String[] args) {
double[] d_vals = { 2.12345,
-3.256,
0,3,4,
Double.MAX_VALUE,
Double.MIN_VALUE,
Double.MIN_NORMAL,
Double.MAX_EXPONENT,
Double.MIN_EXPONENT,
//Double.NaN,
Double.POSITIVE_INFINITY,
Double.NEGATIVE_INFINITY
};
float[] f_vals = { 2,
-5,
0,
Float.MAX_VALUE,
Float.MIN_VALUE,
Float.MIN_NORMAL,
Float.MAX_EXPONENT,
Float.MIN_EXPONENT,
//Float.NaN,
Float.POSITIVE_INFINITY,
Float.NEGATIVE_INFINITY
};
int[] i_vals = { 245, -20, 0, Integer.MAX_VALUE, Integer.MIN_VALUE };
long[] l_vals = { 12345678, -1235, 0, Long.MAX_VALUE, Long.MIN_VALUE };
double[] d_results = new double[d_vals.length];
double[] d_results_2d = new double[d_vals.length*d_vals.length];
int[] i_results = new int[i_vals.length];
int[] i_results_2d = new int[i_vals.length*i_vals.length];
float[] f_results = new float[f_vals.length];
float[] f_results_2d = new float[f_vals.length*f_vals.length];
long[] l_results = new long[l_vals.length];
long[] l_results_2d = new long[l_vals.length*l_vals.length];
// Loop iterators
int i, j;
// static int abs(int a)
for (i=0; i < i_vals.length; i++) {
i_results[i] = StrictMath.abs(i_vals[i]);
}
results("int abs(int a)", i_results);
// static double abs(double a)
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.abs(d_vals[i]);
}
results("double abs(double a)", d_results);
// static float abs(float a)
for (i=0; i < f_vals.length; i++) {
f_results[i] = StrictMath.abs(f_vals[i]);
}
results("float abs(float a)", f_results);
// static long abs(long a)
for (i=0; i < l_vals.length; i++) {
l_results[i] = StrictMath.abs(l_vals[i]);
}
results("long abs(long a)", l_results);
// static double acos(double a)
// Returns the arc cosine of a value; the returned angle is in the range 0.0 through pi.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.acos(d_vals[i]);
}
results("double acos(double a)", d_results);
// static double asin(double a)
// Returns the arc sine of a value; the returned angle is in the range -pi/2 through pi/2.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.asin(d_vals[i]);
}
results("double asin(double a)", d_results);
// static double atan(double a)
// Returns the arc tangent of a value; the returned angle is in the range -pi/2 through pi/2.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.atan(d_vals[i]);
}
results("double atan(double a)", d_results);
// static double atan2(double y, double x)
// Returns the angle theta from the conversion of rectangular coordinates (x, y) to polar coordinates (r, theta).
for (i=0; i < d_vals.length; i++) {
for (j = 0; j < d_vals.length; j++) {
d_results_2d[i] = StrictMath.atan2(d_vals[i], d_vals[j]);
}
}
results("double atan2(double a, double b)", d_results_2d);
// static double cbrt(double a)
// Returns the cube root of a double value.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.cbrt(d_vals[i]);
}
results("double cbrt(double a)", d_results);
// static double ceil(double a)
// Returns the smallest (closest to negative infinity) double value that is greater than or equal to the argument and is equal to a mathematical integer.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.ceil(d_vals[i]);
}
results("double ceil(double a)", d_results);
// static double cos(double a)
// Returns the trigonometric cosine of an angle.
for (i=0; i < d_vals.length; i++) {
// Some browsers don't return consistent values for this operation
// (e.g. Firefox on Travis-CI)
if (d_vals[i] == Double.MAX_VALUE) {
continue;
}
d_results[i] = StrictMath.cos(d_vals[i]);
}
results("double cos(double a)", d_results);
// static double cosh(double x)
// Returns the hyperbolic cosine of a double value.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.cosh(d_vals[i]);
}
results("double cosh(double a)", d_results);
// static double exp(double a)
// Returns Euler's number e raised to the power of a double value.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.exp(d_vals[i]);
}
results("double exp(double a)", d_results);
// static double expm1(double x)
// Returns ex -1.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.expm1(d_vals[i]);
}
results("double expm1(double a)", d_results);
// static double floor(double a)
// Returns the largest (closest to positive infinity) double value that is less than or equal to the argument and is equal to a mathematical integer.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.floor(d_vals[i]);
}
results("double floor(double a)", d_results);
// static double IEEEremainder(double f1, double f2)
// Computes the remainder operation on two arguments as prescribed by the IEEE 754 standard.
for (i=0; i < d_vals.length; i++) {
for (j=0; j < d_vals.length; j++) {
d_results_2d[i] = StrictMath.IEEEremainder(d_vals[i], d_vals[j]);
}
}
results("double IEEEremainder(double a, double b)", d_results_2d);
// static double log(double a)
// Returns the natural logarithm (base e) of a double value.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.log(d_vals[i]);
}
results("double log(double a)", d_results);
// static double log10(double a)
// Returns the base 10 logarithm of a double value.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.log10(d_vals[i]);
}
results("double log10(double a)", d_results);
// static double log1p(double x)
// Returns the natural logarithm of the sum of the argument and 1.
// static double max(double a, double b)
// Returns the greater of two double values.
for (i=0; i < d_vals.length; i++) {
for (j = 0; j < d_vals.length; j++) {
d_results_2d[i] = StrictMath.max(d_vals[i], d_vals[j]);
}
}
results("double max(double a, double b)", d_results_2d);
// static float max(float a, float b)
// Returns the greater of two float values.
for (i=0; i < f_vals.length; i++) {
for (j = 0; j < f_vals.length; j++) {
f_results_2d[i] = StrictMath.max(f_vals[i], f_vals[j]);
}
}
results("float max(float a, float b)", f_results_2d);
// static int max(int a, int b)
// Returns the greater of two int values.
for (i=0; i < i_vals.length; i++) {
for (j = 0; j < i_vals.length; j++) {
i_results_2d[i] = StrictMath.max(i_vals[i], i_vals[j]);
}
}
results("int max(int a, int b)", i_results_2d);
// static long max(long a, long b)
// Returns the greater of two long values.
for (i=0; i < l_vals.length; i++) {
for (j = 0; j < l_vals.length; j++) {
l_results_2d[i] = StrictMath.max(l_vals[i], l_vals[j]);
}
}
results("long max(long a, long b)", l_results_2d);
// static double min(double a, double b)
// Returns the smaller of two double values.
for (i=0; i < d_vals.length; i++) {
for (j = 0; j < d_vals.length; j++) {
d_results_2d[i] = StrictMath.min(d_vals[i], d_vals[j]);
}
}
results("double min(double a, double b)", d_results_2d);
// static float min(float a, float b)
// Returns the smaller of two float values.
for (i=0; i < f_vals.length; i++) {
for (j = 0; j < f_vals.length; j++) {
f_results_2d[i] = StrictMath.min(f_vals[i], f_vals[j]);
}
}
results("float min(float a, float b)", f_results_2d);
// static int min(int a, int b)
// Returns the smaller of two int values.
for (i=0; i < i_vals.length; i++) {
for (j = 0; j < i_vals.length; j++) {
i_results_2d[i] = StrictMath.min(i_vals[i], i_vals[j]);
}
}
results("int min(int a, int b)", i_results_2d);
// static long min(long a, long b)
// Returns the smaller of two long values.
for (i=0; i < l_vals.length; i++) {
for (j = 0; j < l_vals.length; j++) {
l_results_2d[i] = StrictMath.min(l_vals[i], l_vals[j]);
}
}
results("long min(long a, long b)", l_results_2d);
// static double pow(double a, double b)
// Returns the value of the first argument raised to the power of the second argument.
for (i=0; i < d_vals.length; i++) {
for (j=0; j < d_vals.length; j++) {
d_results[i] = StrictMath.pow(d_vals[i], d_vals[j]);
}
}
results("double pow(double a, double b)", d_results_2d);
// static double hypot(double a, double b)
// Returns the value of the first argument raised to the power of 2 + second argument raised to the power of .
for (i=0; i < d_vals.length; i++) {
for (j=0; j < d_vals.length; j++) {
d_results[i] = StrictMath.hypot(d_vals[i], d_vals[j]);
}
}
results("double hypot(double a, double b)", d_results_2d);
// static double random()
// Returns a double value with a positive sign, greater than or equal to 0.0 and less than 1.0.
for (i = 0; i < 10; i++) {
double rand = StrictMath.random();
if (rand < 0 || rand >= 1) {
System.out.println("StrictMath.random(): Return value outside of [0,1): " + rand);
}
}
// static double rint(double a)
// Returns the double value that is closest in value to the argument and is equal to a mathematical integer.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.rint(d_vals[i]);
}
results("double rint(double a)", d_results);
// static long round(double a)
// Returns the closest long to the argument.
long[] l_for_d_results = new long[d_results.length];
for (i=0; i < d_vals.length; i++) {
l_for_d_results[i] = StrictMath.round(d_vals[i]);
}
results("long round(double a)", l_for_d_results);
// static int round(float a)
// Returns the closest int to the argument.
int[] i_for_f_results = new int[f_results.length];
for (i=0; i < f_vals.length; i++) {
i_for_f_results[i] = StrictMath.round(f_vals[i]);
}
results("int round(float a)", i_for_f_results);
// static double sin(double a)
// Returns the trigonometric sine of an angle.
for (i=0; i < d_vals.length; i++) {
// Some browsers don't return consistent values for this operation
// (e.g. Firefox on Travis-CI)
if (d_vals[i] == Double.MAX_VALUE) {
continue;
}
d_results[i] = StrictMath.sin(d_vals[i]);
}
results("double sin(double a)", d_results);
// static double sinh(double x)
// Returns the hyperbolic sine of a double value.
for (i=0; i < d_vals.length; i++) {
if( d_vals[i] == Double.MIN_VALUE || d_vals[i] == Double.MIN_NORMAL){
continue;
}
d_results[i] = StrictMath.sinh(d_vals[i]);
}
results("double sinh(double a)", d_results);
// static double sqrt(double a)
// Returns the correctly rounded positive square root of a double value.
for (i=0; i < d_vals.length; i++) {
d_results[i] = StrictMath.sqrt(d_vals[i]);
}
results("double sqrt(double a)", d_results);
// static double tan(double a)
// Returns the trigonometric tangent of an angle.
for (i=0; i < d_vals.length; i++) {
// Some browsers don't return consistent values for this operation
// (e.g. Firefox on Travis-CI)
if (d_vals[i] == Double.MAX_VALUE) {
continue;
}
d_results[i] = StrictMath.tan(d_vals[i]);
}
results("double tan(double a)", d_results);
// static double tanh(double x)
// Returns the hyperbolic tangent of a double value.
}
}
| 5,951 |
921 | <filename>src/sqlancer/cockroachdb/CockroachDBBugs.java
package sqlancer.cockroachdb;
public final class CockroachDBBugs {
// https://github.com/cockroachdb/cockroach/issues/46915
public static boolean bug46915 = true;
// https://github.com/cockroachdb/cockroach/issues/45703
public static boolean bug45703 = true;
// https://github.com/cockroachdb/cockroach/issues/44757
public static boolean bug44757 = true;
private CockroachDBBugs() {
}
}
| 166 |
375 | <reponame>alex729/RED<filename>src/Eclipse-IDE/org.robotframework.ide.eclipse.main.plugin.tests/src/org/robotframework/ide/eclipse/main/plugin/preferences/FieldEditorPreferencePageHelper.java
/*
* Copyright 2017 Nokia Solutions and Networks
* Licensed under the Apache License, Version 2.0,
* see license.txt file for details.
*/
package org.robotframework.ide.eclipse.main.plugin.preferences;
import static java.util.stream.Collectors.toList;
import java.lang.reflect.Field;
import java.util.List;
import org.eclipse.jface.preference.FieldEditor;
import org.eclipse.jface.preference.FieldEditorPreferencePage;
class FieldEditorPreferencePageHelper {
@SuppressWarnings("unchecked")
static List<FieldEditor> getEditors(final FieldEditorPreferencePage page) throws Exception {
// there is no other way unless we override addField method and declare own editors
// collection, but I prefer this small reflection than influencing production code this way
// just for the purpose of testing
final Field field = FieldEditorPreferencePage.class.getDeclaredField("fields");
field.setAccessible(true);
return (List<FieldEditor>) field.get(page);
}
static <T extends FieldEditor> List<T> getEditorsOfType(final FieldEditorPreferencePage page,
final Class<T> editorType) throws Exception {
// there is no other way unless we override addField method and declare own editors
// collection, but I prefer this small reflection than influencing production code this way
// just for the purpose of testing
final Field field = FieldEditorPreferencePage.class.getDeclaredField("fields");
field.setAccessible(true);
return ((List<?>) field.get(page)).stream()
.filter(editor -> editorType.isInstance(editor))
.map(editor -> editorType.cast(editor))
.collect(toList());
}
}
| 680 |
1,444 | <gh_stars>1000+
package mage.cards.s;
import java.util.UUID;
import mage.abilities.Ability;
import mage.abilities.common.SimpleStaticAbility;
import mage.abilities.effects.Effect;
import mage.abilities.effects.common.AttachEffect;
import mage.abilities.effects.common.continuous.BecomesCreatureIfVehicleEffect;
import mage.abilities.effects.common.continuous.BoostEnchantedEffect;
import mage.abilities.effects.common.continuous.GainAbilityAttachedEffect;
import mage.abilities.keyword.EnchantAbility;
import mage.abilities.keyword.FirstStrikeAbility;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.*;
import mage.filter.FilterPermanent;
import mage.filter.predicate.Predicates;
import mage.target.TargetPermanent;
/**
* @author JRHerlehy
*/
public final class SiegeModification extends CardImpl {
private static final FilterPermanent filter = new FilterPermanent("creature or vehicle");
static {
filter.add(Predicates.or(CardType.CREATURE.getPredicate(),
SubType.VEHICLE.getPredicate()));
}
public SiegeModification(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.ENCHANTMENT}, "{1}{R}{R}");
this.subtype.add(SubType.AURA);
// Enchant creature or Vehicle
TargetPermanent auraTarget = new TargetPermanent(filter);
this.getSpellAbility().addTarget(auraTarget);
this.getSpellAbility().addEffect(new AttachEffect(Outcome.Benefit));
Ability ability = new EnchantAbility(auraTarget.getTargetName());
this.addAbility(ability);
// As long as enchanted permanent is a Vehicle, it's a creature in addition to its other types.
this.addAbility(new SimpleStaticAbility(Zone.BATTLEFIELD, new BecomesCreatureIfVehicleEffect()));
// Enchanted creature gets +3/+0 and has first strike.
Effect effect = new BoostEnchantedEffect(3, 0);
effect.setText("Enchanted creature gets +3/+0");
ability = new SimpleStaticAbility(Zone.BATTLEFIELD, effect);
effect = new GainAbilityAttachedEffect(FirstStrikeAbility.getInstance(), AttachmentType.AURA);
effect.setText(" and has first strike");
ability.addEffect(effect);
this.addAbility(ability);
}
private SiegeModification(final SiegeModification card) {
super(card);
}
@Override
public SiegeModification copy() {
return new SiegeModification(this);
}
}
| 835 |
8,092 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Taskfail tracks the failed run durations of each task instance"""
from sqlalchemy import Column, ForeignKeyConstraint, Integer
from sqlalchemy.orm import relationship
from airflow.models.base import Base, StringID
from airflow.utils.sqlalchemy import UtcDateTime
class TaskFail(Base):
"""TaskFail tracks the failed run durations of each task instance."""
__tablename__ = "task_fail"
id = Column(Integer, primary_key=True)
task_id = Column(StringID(), nullable=False)
dag_id = Column(StringID(), nullable=False)
run_id = Column(StringID(), nullable=False)
map_index = Column(Integer, nullable=False)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Integer)
__table_args__ = (
ForeignKeyConstraint(
[dag_id, task_id, run_id, map_index],
[
"task_instance.dag_id",
"task_instance.task_id",
"task_instance.run_id",
"task_instance.map_index",
],
name='task_fail_ti_fkey',
ondelete="CASCADE",
),
)
# We don't need a DB level FK here, as we already have that to TI (which has one to DR) but by defining
# the relationship we can more easily find the execution date for these rows
dag_run = relationship(
"DagRun",
primaryjoin="""and_(
TaskFail.dag_id == foreign(DagRun.dag_id),
TaskFail.run_id == foreign(DagRun.run_id),
)""",
viewonly=True,
)
def __init__(self, ti):
self.dag_id = ti.dag_id
self.task_id = ti.task_id
self.run_id = ti.run_id
self.map_index = ti.map_index
self.start_date = ti.start_date
self.end_date = ti.end_date
if self.end_date and self.start_date:
self.duration = int((self.end_date - self.start_date).total_seconds())
else:
self.duration = None
def __repr__(self):
prefix = f"<{self.__class__.__name__}: {self.dag_id}.{self.task_id} {self.run_id}"
if self.map_index != -1:
prefix += f" map_index={self.map_index}"
return prefix + '>'
| 1,172 |
3,227 | <gh_stars>1000+
#include <iostream>
#include <vector>
#include <CGAL/Simple_cartesian.h>
#include <CGAL/Exact_predicates_inexact_constructions_kernel.h>
#include <CGAL/Real_timer.h>
#include <CGAL/algorithm.h>
#include <CGAL/point_generators_3.h>
#include "Orientation_3.h"
typedef CGAL::Simple_cartesian<double> K;
typedef K::Point_3 Point_3;
#ifndef ONLY_TEST_COMPARISONS
typedef CGAL::internal::Static_filters_predicates::Orientation_3_benchmark<Point_3> Predicate;
#else
struct Predicate {
int operator()(const Point_3& p,const Point_3& q,const Point_3& r,const Point_3& s) const
{
return (int)(p < q) + (int)(r<s);
}
};
#endif
int main()
{
const int N = 10000000; // 10M
CGAL::Real_timer timer;
timer.start();
std::vector<Point_3> points;
points.reserve(N);
CGAL::Random_points_in_sphere_3<Point_3> g( 100.0);
std::copy_n( g, N, std::back_inserter(points));
timer.stop();
std::cout << "Fill vector: " << timer.time() << " sec" << std::endl;
timer.reset();
timer.start();
int res=0;
Predicate predicate;
for(int j = 0; j < 10; ++j) {
for(int i = 0; i < N-4; i++){
res += predicate(points[i], points[i+1], points[i+2], points[i+3]);
}
}
timer.stop();
std::cout << "result = " << res << std::endl;
#ifndef ONLY_TEST_COMPARISONS
std::cout << "Orientation: ";
#else
std::cout << "Comparisons: ";
#endif
std::cout << timer.time() << " sec" << std::endl;
}
| 607 |
460 | // Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef _BT_LOGGER_
#define _BT_LOGGER_
#include "BT_Singleton.h"
enum StartOrEnd
{
UNDEFINED = NULL,
START,
FINISH,
};
class Logger
{
protected:
QTextStream * _logStream;
QFile _logFile;
bool _isLogging; //global logging, except for assertion fails which are always logged
bool _verbose;
QString _filepath;
//set to true to disable logging for a source file, file name must be Visual Studio __FILE__ styled such as ".\BT_WindowsOS.cpp"
QHash<QString, bool> _ignoreSourceFiles;
bool _ignoreStartOrEnd;
void openLog();
public:
Logger();
Logger(QString filePath);
~Logger();
void setIsLogging(bool value);
void setVerboseLogging(bool value);
bool isLogging(const QString & sourceFile, StartOrEnd isStart);
bool isVerbose();
//set to true to disable logging for a source file, file name must be Visual Studio __FILE__ styled such as ".\BT_WindowsOS.cpp"
void ignoreSourceFile(const QString & sourceFil);
void ignoreStartOrEnd(bool ignoreStartOrEnd);
void closeLog();
QString getFilepath();
virtual void log(QString message);
virtual void log(QString message, StartOrEnd isStart);
void logException(const char * file, int line, unsigned long code);
void logAssert(const char * file, int line);
};
#define logger Singleton<Logger>::getInstance()
#define LOG(message) if (logger->isLogging(__FILE__, UNDEFINED)) logger->log(message)
#define LOG_START(message) if (logger->isLogging(__FILE__, START))logger->log(message, START)
#define LOG_FINISH(message) if (logger->isLogging(__FILE__, FINISH)) logger->log(message, FINISH)
#define LOG_STATEMENT(statement) LOG_START(#statement);statement;LOG_FINISH(#statement)
#define LOG_FUNCTION_REACHED() if (logger->isLogging(__FILE__, UNDEFINED)) logger->log(QString(__FUNCTION__));
#define LOG_LINE_REACHED_IMP(file, line) if (logger->isLogging(file, UNDEFINED)) logger->log(file"("MAKE_STRING_A(line)"):reached")
#define LOG_LINE_REACHED() LOG_LINE_REACHED_IMP(__FILE__, __LINE__)
#define LOG_ASSERT(expr) ((void)(!(expr) ? logger->logAssert(__FILE__, __LINE__) : 0))
#define VASSERT(expr, message) assert(expr)
#if !defined BTDEBUG
#define ASSERT(expr) LOG_ASSERT(expr)
#define ASSERTE(expr) ASSERT(expr)
#define assert(expr) ASSERT(expr)
#define _ASSERT(expr) ASSERT(expr)
#define _ASSERTE(expr) ASSERT(expr)
#define VASSERT(expr, message) LOG_ASSERT(expr); if (!(expr) && logger->isLogging(__FILE__, UNDEFINED))logger->log(QString_NT("\t") + message)
#endif
// -----------------------------------------------------------------------------
class GUIDLogger : public Logger
{
public:
GUIDLogger();
};
#define guidLogger Singleton<GUIDLogger>::getInstance()
#define GUID_LOG(message) if (guidLogger->isLogging(__FILE__, UNDEFINED)) guidLogger->log(message)
// -----------------------------------------------------------------------------
class TestLogger : public Logger
{
public:
TestLogger();
};
#define testLogger Singleton<TestLogger>::getInstance()
#define TEST_LOG(message) if (testLogger->isLogging(__FILE__, UNDEFINED)) testLogger->log(message)
// -----------------------------------------------------------------------------
#else
class Logger;
#endif // _BT_LOGGER_
| 1,363 |
418 | <filename>training/pytorch/structured/custom_containers/gpu/trainer/metadata.py
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Task type can be either 'classification', 'regression', or 'custom'.
# This is based on the target feature in the dataset.
TASK_TYPE = 'classification'
# List of all the columns (header) present in the input data file(s).
# Used for parsing the input data.
COLUMN_NAMES = [
'tip',
'trip_miles',
'trip_seconds',
'fare',
'trip_start_month',
'trip_start_hour',
'trip_start_day',
'pickup_community_area',
'dropoff_community_area',
'pickup_census_tract',
'dropoff_census_tract',
'pickup_latitude',
'pickup_longitude',
'dropoff_latitude',
'dropoff_longitude',
'payment_type',
'company'
]
# List of the columns expected during serving (which is probably different to
# the header of the training data).
SERVING_COLUMN_NAMES = [
'trip_miles',
'trip_seconds',
'fare',
'trip_start_month',
'trip_start_hour',
'trip_start_day',
'pickup_community_area',
'dropoff_community_area',
'pickup_census_tract',
'dropoff_census_tract',
'pickup_latitude',
'pickup_longitude',
'dropoff_latitude',
'dropoff_longitude',
'payment_type',
'company'
]
# List of the default values of all the columns present in the input data.
# This helps decoding the data types of the columns.
DEFAULTS = [[0], [0.0], [0], [0], [0], [0], [0], [''], [''], [''], [''], [0.0],
[0.0], [0.0], [0.0], [''], ['']]
# Dictionary of the feature names of type int or float. In the dictionary,
# the key is the feature name, and the value is another dictionary includes
# the mean and the variance of the numeric features.
# E.g. {feature_1: {mean: 0, variance:1}, feature_2: {mean: 10, variance:3}}
# The value can be set to None if you don't want to not normalize.
NUMERIC_FEATURE_NAMES_WITH_STATS = {
'fare': None,
'trip_miles': None,
'trip_seconds': None
}
# Numeric features defining time of the trip.
NUMERIC_FEATURE_NAMES = {
'trip_start_month': None,
'trip_start_hour': None,
'trip_start_day': None,
}
NUMERIC_FEATURE_NAMES_GEOPOINTS = {
'pickup_latitude': None,
'pickup_longitude': None,
'dropoff_latitude': None,
'dropoff_longitude': None,
}
# List of categorical columns present in the input data.
CATEGORICAL_COLUMNS = [
'payment_type',
'company'
]
# Dictionary of feature names with int values, but to be treated as
# categorical features. In the dictionary, the key is the feature name,
# and the value is the num_buckets (count of distinct values).
CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY = {}
# Dictionary of categorical features with few nominal values. In the
# dictionary, the key is the feature name, and the value is the list of
# feature vocabulary.
CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {
'payment_type': [
'Cash', 'Credit Card', 'Pcard', 'Unknown', 'No Charge', 'Prcard',
'Dispute', 'Mobile'
],
}
# Dictionary of categorical features with many values. In the dictionary,
# the key is the feature name, and the value is the number of buckets.
CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET = {
'company': 100,
}
# Target feature name (response or class variable).
TARGET_NAME = 'tip'
# List of the class values (labels) in a classification dataset.
TARGET_LABELS = [1, 0]
| 1,410 |
9,425 | def __virtual__():
return True
def execute(*args, **kwargs):
# we use the dunder to assert the loader is provided minionmods
return __salt__["test.arg"]("test.arg fired")
| 61 |
3,058 | /*
* Copyright (c) 2018-2021 The Forge Interactive Inc.
*
* This file is part of The-Forge
* (see https://github.com/ConfettiFX/The-Forge).
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#pragma once
#include "../Core/Config.h"
#include "IOperatingSystem.h"
#include "../Math/MathTypes.h"
#if defined(QUEST_VR)
#include "../Quest/VrApi.h"
#endif
struct CameraMotionParameters
{
float maxSpeed;
float acceleration; // only used with binary inputs such as keypresses
float braking; // also acceleration but orthogonal to the acceleration vector
};
class ICameraController
{
public:
virtual ~ICameraController() {}
virtual void setMotionParameters(const CameraMotionParameters&) = 0;
virtual void update(float deltaTime) = 0;
// there are also implicit dependencies on the keyboard state.
virtual mat4 getViewMatrix() const = 0;
virtual vec3 getViewPosition() const = 0;
virtual vec2 getRotationXY() const = 0;
virtual void moveTo(const vec3& location) = 0;
virtual void lookAt(const vec3& lookAt) = 0;
virtual void setViewRotationXY(const vec2& v) = 0;
virtual void resetView() = 0;
virtual void onMove(const float2& vec) = 0;
virtual void onRotate(const float2& vec) = 0;
virtual void onZoom(const float2& vec) = 0;
};
/// \c initGuiCameraController assumes that the camera is not rotated around the look direction;
/// in its matrix, \c Z points at \c startLookAt and \c X is horizontal.
ICameraController* initGuiCameraController(const vec3& startPosition, const vec3& startLookAt);
/// \c initFpsCameraController does basic FPS-style god mode navigation; tf_free-look is constrained
/// to about +/- 88 degrees and WASD translates in the camera's local XZ plane.
ICameraController* initFpsCameraController(const vec3& startPosition, const vec3& startLookAt);
void exitCameraController(ICameraController* pCamera);
class CameraMatrix
{
public:
CameraMatrix();
CameraMatrix(const CameraMatrix& mat);
inline const CameraMatrix& operator= (const CameraMatrix& mat);
inline const CameraMatrix operator* (const Matrix4& mat) const;
// Returns the camera matrix or the left eye matrix on VR platforms.
mat4 getPrimaryMatrix() const;
static inline const CameraMatrix inverse(const CameraMatrix& mat);
static inline const CameraMatrix transpose(const CameraMatrix& mat);
static inline const CameraMatrix perspective(float fovxRadians, float aspectInverse, float zNear, float zFar);
static inline const CameraMatrix perspectiveReverseZ(float fovxRadians, float aspectInverse, float zNear, float zFar);
static inline const CameraMatrix orthographic(float left, float right, float bottom, float top, float zNear, float zFar);
static inline const CameraMatrix identity();
static inline void extractFrustumClipPlanes(const CameraMatrix& vp, Vector4& rcp, Vector4& lcp, Vector4& tcp, Vector4& bcp, Vector4& fcp, Vector4& ncp, bool const normalizePlanes);
private:
union
{
mat4 mCamera;
mat4 mLeftEye;
};
#if defined(QUEST_VR)
mat4 mRightEye;
#endif
};
inline const CameraMatrix& CameraMatrix::operator= (const CameraMatrix& mat)
{
mLeftEye = mat.mLeftEye;
#if defined(QUEST_VR)
mRightEye = mat.mRightEye;
#endif
return *this;
}
inline const CameraMatrix CameraMatrix::operator* (const Matrix4& mat) const
{
CameraMatrix result;
result.mLeftEye = mLeftEye * mat;
#if defined(QUEST_VR)
result.mRightEye = mRightEye * mat;
#endif
return result;
}
inline const CameraMatrix CameraMatrix::inverse(const CameraMatrix & mat)
{
CameraMatrix result;
result.mLeftEye = ::inverse(mat.mLeftEye);
#if defined(QUEST_VR)
result.mRightEye = ::inverse(mat.mRightEye);
#endif
return result;
}
inline const CameraMatrix CameraMatrix::transpose(const CameraMatrix & mat)
{
CameraMatrix result;
result.mLeftEye = ::transpose(mat.mLeftEye);
#if defined(QUEST_VR)
result.mRightEye = ::transpose(mat.mRightEye);
#endif
return result;
}
inline const CameraMatrix CameraMatrix::perspective(float fovxRadians, float aspectInverse, float zNear, float zFar)
{
CameraMatrix result;
#if defined(QUEST_VR)
result.mLeftEye = getHeadsetLeftEyeProjectionMatrix(zNear, zFar);
result.mRightEye = getHeadsetRightEyeProjectionMatrix(zNear, zFar);
#else
result.mCamera = mat4::perspective(fovxRadians, aspectInverse, zNear, zFar);
#endif
return result;
}
inline const CameraMatrix CameraMatrix::perspectiveReverseZ(float fovxRadians, float aspectInverse, float zNear, float zFar)
{
CameraMatrix result;
#if defined(QUEST_VR)
result.mLeftEye = getHeadsetLeftEyeProjectionMatrix(zNear, zFar);
result.mRightEye = getHeadsetRightEyeProjectionMatrix(zNear, zFar);
Vector4 col2 = result.mLeftEye.getCol2();
Vector4 col3 = result.mLeftEye.getCol3();
col2.setZ(col2.getW() - col2.getZ());
col3.setZ(-col3.getZ());
result.mLeftEye.setCol2(col2);
result.mLeftEye.setCol3(col3);
col2 = result.mRightEye.getCol2();
col3 = result.mRightEye.getCol3();
col2.setZ(col2.getW() - col2.getZ());
col3.setZ(-col3.getZ());
result.mRightEye.setCol2(col2);
result.mRightEye.setCol3(col3);
#else
result.mCamera = mat4::perspectiveReverseZ(fovxRadians, aspectInverse, zNear, zFar);
#endif
return result;
}
inline const CameraMatrix CameraMatrix::orthographic(float left, float right, float bottom, float top, float zNear, float zFar)
{
CameraMatrix result;
#if defined(QUEST_VR)
mat4 projMat = getHeadsetLeftEyeProjectionMatrix(zNear, zFar);
float eyeSeperation = projMat[2][0];
result.mLeftEye = mat4::orthographic(left + eyeSeperation, right + eyeSeperation, bottom, top, zNear, zFar);
result.mRightEye = mat4::orthographic(left - eyeSeperation, right - eyeSeperation, bottom, top, zNear, zFar);
#else
result.mCamera = mat4::orthographic(left, right, bottom, top, zNear, zFar);
#endif
return result;
}
inline const CameraMatrix CameraMatrix::identity()
{
CameraMatrix result;
result.mLeftEye = mat4::identity();
#if defined(QUEST_VR)
result.mRightEye = mat4::identity();
#endif
return result;
}
inline void CameraMatrix::extractFrustumClipPlanes(const CameraMatrix& vp, Vector4& rcp, Vector4& lcp, Vector4& tcp, Vector4& bcp, Vector4& fcp, Vector4& ncp, bool const normalizePlanes)
{
#if defined(QUEST_VR)
// Left plane
lcp = vp.mLeftEye.getRow(3) + vp.mLeftEye.getRow(0);
// Right plane
rcp = vp.mRightEye.getRow(3) - vp.mRightEye.getRow(0);
// Bottom plane
bcp = vp.mLeftEye.getRow(3) + vp.mLeftEye.getRow(1);
// Top plane
tcp = vp.mLeftEye.getRow(3) - vp.mLeftEye.getRow(1);
// Near plane
ncp = vp.mLeftEye.getRow(3) + vp.mLeftEye.getRow(2);
// Far plane
fcp = vp.mLeftEye.getRow(3) - vp.mLeftEye.getRow(2);
// Normalize if needed
if (normalizePlanes)
{
float lcp_norm = length(lcp.getXYZ());
lcp /= lcp_norm;
float rcp_norm = length(rcp.getXYZ());
rcp /= rcp_norm;
float bcp_norm = length(bcp.getXYZ());
bcp /= bcp_norm;
float tcp_norm = length(tcp.getXYZ());
tcp /= tcp_norm;
float ncp_norm = length(ncp.getXYZ());
ncp /= ncp_norm;
float fcp_norm = length(fcp.getXYZ());
fcp /= fcp_norm;
}
#else
mat4::extractFrustumClipPlanes(vp.mCamera, rcp, lcp, tcp, bcp, fcp, ncp, normalizePlanes);
#endif
} | 2,898 |
1,330 | <filename>core/src/test/java/org/apache/struts2/dispatcher/NullActionMapper.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.struts2.dispatcher;
import javax.servlet.http.HttpServletRequest;
import org.apache.struts2.dispatcher.mapper.ActionMapper;
import org.apache.struts2.dispatcher.mapper.ActionMapping;
import com.opensymphony.xwork2.config.ConfigurationManager;
/**
* ActionMapper for testing FilterDispatcher (used in FilterDispaatcherTest)
*/
public class NullActionMapper implements ActionMapper {
private static ActionMapping _actionMapping;
public NullActionMapper() {
}
public static void setActionMapping(ActionMapping actionMappingToBeRetrned) {
_actionMapping = actionMappingToBeRetrned;
}
public ActionMapping getMapping(HttpServletRequest request, ConfigurationManager config) {
return _actionMapping;
}
public ActionMapping getMappingFromActionName(String actionName) {
return null; //To change body of implemented methods use File | Settings | File Templates.
}
public String getUriFromActionMapping(ActionMapping mapping) {
throw new UnsupportedOperationException("operation not supported");
}
}
| 577 |
371 | <filename>src/main/java/de/siegmar/fastcsv/reader/package-info.java
/**
* FastCSV reader.
*
* Obtain low-level (index based) reader via {@link de.siegmar.fastcsv.reader.CsvReader#builder()}
* and higher level (name/header based) reader via
* {@link de.siegmar.fastcsv.reader.NamedCsvReader#builder()}.
*/
package de.siegmar.fastcsv.reader;
| 124 |
545 | {
"Html5AppRendererIndex.enterFullscreen": "Vào chế độ toàn màn hình",
"Html5AppRendererIndex.exitFullscreen": "Thoát chế độ toàn màn hình"
} | 88 |
2,219 | // Copyright (c) 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/base/logging_network_change_observer.h"
#include <string>
#include "base/bind.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/values.h"
#include "net/log/net_log.h"
#include "net/log/net_log_event_type.h"
#if defined(OS_ANDROID)
#include "base/android/build_info.h"
#endif
namespace net {
namespace {
// Returns a human readable integer from a NetworkHandle.
int HumanReadableNetworkHandle(NetworkChangeNotifier::NetworkHandle network) {
#if defined(OS_ANDROID)
// On Marshmallow, demunge the NetID to undo munging done in java
// Network.getNetworkHandle() by shifting away 0xfacade from
// http://androidxref.com/6.0.1_r10/xref/frameworks/base/core/java/android/net/Network.java#385
if (base::android::BuildInfo::GetInstance()->sdk_int() >=
base::android::SDK_VERSION_MARSHMALLOW) {
return network >> 32;
}
#endif
return network;
}
// Return a dictionary of values that provide information about a
// network-specific change. This also includes relevant current state
// like the default network, and the types of active networks.
base::Value NetworkSpecificNetLogParams(
NetworkChangeNotifier::NetworkHandle network) {
base::Value dict(base::Value::Type::DICTIONARY);
dict.SetIntKey("changed_network_handle", HumanReadableNetworkHandle(network));
dict.SetStringKey(
"changed_network_type",
NetworkChangeNotifier::ConnectionTypeToString(
NetworkChangeNotifier::GetNetworkConnectionType(network)));
dict.SetIntKey(
"default_active_network_handle",
HumanReadableNetworkHandle(NetworkChangeNotifier::GetDefaultNetwork()));
NetworkChangeNotifier::NetworkList networks;
NetworkChangeNotifier::GetConnectedNetworks(&networks);
for (NetworkChangeNotifier::NetworkHandle active_network : networks) {
dict.SetStringKey(
"current_active_networks." +
base::NumberToString(HumanReadableNetworkHandle(active_network)),
NetworkChangeNotifier::ConnectionTypeToString(
NetworkChangeNotifier::GetNetworkConnectionType(active_network)));
}
return dict;
}
void NetLogNetworkSpecific(NetLog* net_log,
NetLogEventType type,
NetworkChangeNotifier::NetworkHandle network) {
if (!net_log)
return;
net_log->AddGlobalEntry(type,
[&] { return NetworkSpecificNetLogParams(network); });
}
} // namespace
LoggingNetworkChangeObserver::LoggingNetworkChangeObserver(NetLog* net_log)
: net_log_(net_log) {
NetworkChangeNotifier::AddIPAddressObserver(this);
NetworkChangeNotifier::AddConnectionTypeObserver(this);
NetworkChangeNotifier::AddNetworkChangeObserver(this);
if (NetworkChangeNotifier::AreNetworkHandlesSupported())
NetworkChangeNotifier::AddNetworkObserver(this);
}
LoggingNetworkChangeObserver::~LoggingNetworkChangeObserver() {
NetworkChangeNotifier::RemoveIPAddressObserver(this);
NetworkChangeNotifier::RemoveConnectionTypeObserver(this);
NetworkChangeNotifier::RemoveNetworkChangeObserver(this);
if (NetworkChangeNotifier::AreNetworkHandlesSupported())
NetworkChangeNotifier::RemoveNetworkObserver(this);
}
void LoggingNetworkChangeObserver::OnIPAddressChanged() {
VLOG(1) << "Observed a change to the network IP addresses";
net_log_->AddGlobalEntry(NetLogEventType::NETWORK_IP_ADDRESSES_CHANGED);
}
void LoggingNetworkChangeObserver::OnConnectionTypeChanged(
NetworkChangeNotifier::ConnectionType type) {
std::string type_as_string =
NetworkChangeNotifier::ConnectionTypeToString(type);
VLOG(1) << "Observed a change to network connectivity state "
<< type_as_string;
net_log_->AddGlobalEntryWithStringParams(
NetLogEventType::NETWORK_CONNECTIVITY_CHANGED, "new_connection_type",
type_as_string);
}
void LoggingNetworkChangeObserver::OnNetworkChanged(
NetworkChangeNotifier::ConnectionType type) {
std::string type_as_string =
NetworkChangeNotifier::ConnectionTypeToString(type);
VLOG(1) << "Observed a network change to state " << type_as_string;
net_log_->AddGlobalEntryWithStringParams(
NetLogEventType::NETWORK_CHANGED, "new_connection_type", type_as_string);
}
void LoggingNetworkChangeObserver::OnNetworkConnected(
NetworkChangeNotifier::NetworkHandle network) {
VLOG(1) << "Observed network " << network << " connect";
NetLogNetworkSpecific(net_log_, NetLogEventType::SPECIFIC_NETWORK_CONNECTED,
network);
}
void LoggingNetworkChangeObserver::OnNetworkDisconnected(
NetworkChangeNotifier::NetworkHandle network) {
VLOG(1) << "Observed network " << network << " disconnect";
NetLogNetworkSpecific(
net_log_, NetLogEventType::SPECIFIC_NETWORK_DISCONNECTED, network);
}
void LoggingNetworkChangeObserver::OnNetworkSoonToDisconnect(
NetworkChangeNotifier::NetworkHandle network) {
VLOG(1) << "Observed network " << network << " soon to disconnect";
NetLogNetworkSpecific(
net_log_, NetLogEventType::SPECIFIC_NETWORK_SOON_TO_DISCONNECT, network);
}
void LoggingNetworkChangeObserver::OnNetworkMadeDefault(
NetworkChangeNotifier::NetworkHandle network) {
VLOG(1) << "Observed network " << network << " made the default network";
NetLogNetworkSpecific(
net_log_, NetLogEventType::SPECIFIC_NETWORK_MADE_DEFAULT, network);
}
} // namespace net
| 1,842 |
331 | package it.gmariotti.android.apps.dashclock.extensions;
import android.media.Ringtone;
import android.media.RingtoneManager;
import android.net.Uri;
import android.os.Bundle;
import android.preference.CheckBoxPreference;
import android.preference.ListPreference;
import android.preference.Preference;
import android.preference.PreferenceActivity;
import android.preference.PreferenceManager;
import android.preference.RingtonePreference;
import android.text.TextUtils;
import android.view.MenuItem;
/**
* A base activity for extension configuration activities.
*/
public abstract class BaseSettingsActivity extends PreferenceActivity {
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
getActionBar().setDisplayHomeAsUpEnabled(true);
}
@Override
protected void onPostCreate(Bundle savedInstanceState) {
super.onPostCreate(savedInstanceState);
setupSimplePreferencesScreen();
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
// TODO: if the previous activity on the stack isn't a ConfigurationActivity,
// launch it.
finish();
return true;
}
return super.onOptionsItemSelected(item);
}
protected abstract void setupSimplePreferencesScreen();
/**
* A preference value change listener that updates the preference's summary to reflect its new
* value.
*/
private static Preference.OnPreferenceChangeListener sBindPreferenceSummaryToValueListener
= new Preference.OnPreferenceChangeListener() {
@Override
public boolean onPreferenceChange(Preference preference, Object value) {
String stringValue = value.toString();
if (preference instanceof ListPreference) {
// For list preferences, look up the correct display value in
// the preference's 'entries' list.
ListPreference listPreference = (ListPreference) preference;
int index = listPreference.findIndexOfValue(stringValue);
// Set the summary to reflect the new value.
preference.setSummary(
index >= 0
? (listPreference.getEntries()[index])
.toString().replaceAll("%", "%%")
: null);
} else if (preference instanceof RingtonePreference) {
// For ringtone preferences, look up the correct display value
// using RingtoneManager.
if (TextUtils.isEmpty(stringValue)) {
// Empty values correspond to 'silent' (no ringtone).
//preference.setSummary(R.string.pref_ringtone_silent);
} else {
Ringtone ringtone = RingtoneManager.getRingtone(
preference.getContext(), Uri.parse(stringValue));
if (ringtone == null) {
// Clear the summary if there was a lookup error.
preference.setSummary(null);
} else {
// Set the summary to reflect the new ringtone display
// name.
String name = ringtone.getTitle(preference.getContext());
preference.setSummary(name);
}
}
} else if (preference instanceof CheckBoxPreference) {
// Display default summary
}else {
// For all other preferences, set the summary to the value's
// simple string representation.
preference.setSummary(stringValue);
}
return true;
}
};
/**
* Binds a preference's summary to its value. More specifically, when the preference's value is
* changed, its summary (line of text below the preference title) is updated to reflect the
* value. The summary is also immediately updated upon calling this method. The exact display
* format is dependent on the type of preference.
*/
public static void bindPreferenceSummaryToValue(Preference preference) {
setAndCallPreferenceChangeListener(preference, sBindPreferenceSummaryToValueListener);
}
/**
* When the preference's value is changed, trigger the given listener. The listener is also
* immediately called with the preference's current value upon calling this method.
*/
public static void setAndCallPreferenceChangeListener(Preference preference,
Preference.OnPreferenceChangeListener listener) {
// Set the listener to watch for value changes.
preference.setOnPreferenceChangeListener(listener);
// Trigger the listener immediately with the preference's
// current value.
listener.onPreferenceChange(preference,
PreferenceManager
.getDefaultSharedPreferences(preference.getContext())
.getBoolean(preference.getKey(), true));
}
} | 2,257 |
1,248 | <filename>src/pretix/control/views/global_settings.py
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import pkg_resources
from django.contrib import messages
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, reverse
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django.views import View
from django.views.generic import FormView, TemplateView
from pretix.base.models import LogEntry, OrderPayment, OrderRefund
from pretix.base.services.update_check import check_result_table, update_check
from pretix.base.settings import GlobalSettingsObject
from pretix.control.forms.global_settings import (
GlobalSettingsForm, LicenseCheckForm, UpdateSettingsForm,
)
from pretix.control.permissions import (
AdministratorPermissionRequiredMixin, StaffMemberRequiredMixin,
)
class GlobalSettingsView(AdministratorPermissionRequiredMixin, FormView):
template_name = 'pretixcontrol/global_settings.html'
form_class = GlobalSettingsForm
def form_valid(self, form):
form.save()
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes have not been saved, see below for errors.'))
return super().form_invalid(form)
def get_success_url(self):
return reverse('control:global.settings')
class UpdateCheckView(StaffMemberRequiredMixin, FormView):
template_name = 'pretixcontrol/global_update.html'
form_class = UpdateSettingsForm
def post(self, request, *args, **kwargs):
if 'trigger' in request.POST:
update_check.apply()
return redirect(self.get_success_url())
return super().post(request, *args, **kwargs)
def form_valid(self, form):
form.save()
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes have not been saved, see below for errors.'))
return super().form_invalid(form)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['gs'] = GlobalSettingsObject()
ctx['gs'].settings.set('update_check_ack', True)
ctx['tbl'] = check_result_table()
return ctx
def get_success_url(self):
return reverse('control:global.update')
class MessageView(TemplateView):
template_name = 'pretixcontrol/global_message.html'
class LogDetailView(AdministratorPermissionRequiredMixin, View):
def get(self, request, *args, **kwargs):
le = get_object_or_404(LogEntry, pk=request.GET.get('pk'))
return JsonResponse({'data': le.parsed_data})
class PaymentDetailView(AdministratorPermissionRequiredMixin, View):
def get(self, request, *args, **kwargs):
p = get_object_or_404(OrderPayment, pk=request.GET.get('pk'))
return JsonResponse({'data': p.info_data})
class RefundDetailView(AdministratorPermissionRequiredMixin, View):
def get(self, request, *args, **kwargs):
p = get_object_or_404(OrderRefund, pk=request.GET.get('pk'))
return JsonResponse({'data': p.info_data})
class LicenseCheckView(StaffMemberRequiredMixin, FormView):
template_name = 'pretixcontrol/global_license.html'
form_class = LicenseCheckForm
def get_initial(self):
d = {}
gs = GlobalSettingsObject()
d.update(gs.settings.license_check_input)
if not d:
d['source_notice'] = 'pretix (AGPLv3 with additional terms): https://github.com/pretix/pretix'
seen = set()
for entry_point in pkg_resources.iter_entry_points(group='pretix.plugin', name=None):
if entry_point.dist.key not in seen:
try:
license, url = self._get_license_for_pkg(entry_point.dist.key)
except FileNotFoundError:
license, url = '?', '?'
d['source_notice'] += f'\n{entry_point.dist.key} ({license}): {url}'
seen.add(entry_point.dist.key)
return d
def form_valid(self, form):
gs = GlobalSettingsObject()
gs.settings.license_check_input = form.cleaned_data
gs.settings.license_check_completed = now()
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('Your changes have not been saved, see below for errors.'))
return super().form_invalid(form)
def get_success_url(self):
return reverse('control:global.license')
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
input = self.get_initial()
if 'base_license' in input:
ctx['results'] = self._check_results(input)
else:
ctx['results'] = False
return ctx
def _get_license_for_pkg(self, pkg):
license, url = None, None
try:
pkg = pkg_resources.get_distribution(pkg)
except:
return None, None
try:
for line in pkg.get_metadata_lines(pkg.PKG_INFO):
if ': ' in line:
(k, v) = line.split(': ', 1)
if k == "License":
license = v
if k == "Home-page":
url = v
except FileNotFoundError:
license = '?'
url = '?'
return license, url
def _check_results(self, input):
res = []
if input.get('base_license') == 'agpl_addperm' and input.get('usage') != 'internally':
res.append((
'danger', 'exclamation-circle',
_('You are in violation of the license. If you\'re not sure whether you qualify for the additional '
'permission or if you offer the functionality of pretix to others, you must either use pretix under '
'AGPLv3 terms or obtain a pretix Enterprise license.')
))
if (input.get('base_license') != 'agpl' or input.get('plugins_enterprise')) and input.get('plugins_copyleft'):
res.append((
'danger', 'exclamation-circle',
_('You may not make use of the additional permission or of a pretix Enterprise license if you install '
'any plugins licensed with strong copyleft, otherwise you are likely in violation of the license '
'of these plugins.')
))
if input.get('base_license') == 'agpl' and not input.get('source_notice'):
res.append((
'danger', 'exclamation-circle',
_('If you\'re using pretix under AGPL license, you need to provide instructions on how to access the '
'source code.')
))
if input.get('base_license') == 'agpl' and input.get('plugins_enterprise'):
res.append((
'danger', 'exclamation-circle',
_('You must not use pretix under AGPL terms if you use pretix Enterprise plugins.')
))
if input.get('base_license') not in ('enterprise', 'agpl_addperm'):
if input.get('base_changes') == 'yes':
res.append((
'warning', 'warning',
_('You need to make all changes you made to pretix\' source code freely available to every visitor '
'of your site in source code form under the same license terms as pretix (AGPLv3 + additional '
'restrictions). Make sure to keep it up to date!')
))
if input.get('plugins_own'):
res.append((
'warning', 'warning',
_('You need to make all your installed plugins freely available to every visitor '
'of your site in source code form under the same license terms as pretix (AGPLv3 + additional '
'restrictions). Make sure to keep it up to date!')
))
for entry_point in pkg_resources.iter_entry_points(group='pretix.plugin', name=None):
license, url = self._get_license_for_pkg(entry_point.dist.key)
if not license or not any(l in license for l in ('Apache', 'MIT', 'BSD', 'pretix Enterprise', 'GPL')):
res.append((
'muted', 'warning',
_('We found the plugin "{plugin}" with license "{license}" which this tool does not know about and '
'therefore cannot give any recommendations.').format(plugin=entry_point.dist.key, license=license)
))
continue
if not input.get('plugins_enterprise') and 'pretix Enterprise' in license:
res.append((
'danger', 'exclamation-circle',
_('You selected that you have no active pretix Enterprise licenses, but we found the following '
'Enterprise plugin: {plugin}').format(plugin=entry_point.dist.key)
))
if not input.get('plugins_copyleft') and any(l in license for l in ('GPL',)):
res.append((
'danger', 'exclamation-circle',
_('You selected that you have no copyleft-licensed plugins installed, but we found the '
'plugin "{plugin}" with license "{license}".').format(plugin=entry_point.dist.key, license=license)
))
if not input.get('plugins_free') and any(l in license for l in ('Apache', 'MIT', 'BSD')):
res.append((
'danger', 'exclamation-circle',
_('You selected that you have no free plugins installed, but we found the '
'plugin "{plugin}" with license "{license}".').format(plugin=entry_point.dist.key, license=license)
))
return res
| 4,826 |
361 | <reponame>rjeschke/txtmark
/*
* Copyright (C) 2011-2015 <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.rjeschke.txtmark;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
/**
* Markdown processor class.
*
* <p>
* Example usage:
* </p>
*
* <pre>
* <code>String result = Processor.process("This is ***TXTMARK***");
* </code>
* </pre>
*
* @author <NAME> <<EMAIL>>
*/
public class Processor
{
/** The reader. */
private final Reader reader;
/** The emitter. */
private final Emitter emitter;
/** The Configuration. */
final Configuration config;
/** Extension flag. */
private boolean useExtensions = false;
/**
* Constructor.
*
* @param reader
* The input reader.
*/
private Processor(final Reader reader, final Configuration config)
{
this.reader = reader;
this.config = config;
this.useExtensions = config.forceExtendedProfile;
this.emitter = new Emitter(this.config);
}
/**
* Transforms an input stream into HTML using the given Configuration.
*
* @param reader
* The Reader to process.
* @param configuration
* The Configuration.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @since 0.7
* @see Configuration
*/
public final static String process(final Reader reader, final Configuration configuration) throws IOException
{
final Processor p = new Processor(!(reader instanceof BufferedReader) ? new BufferedReader(reader) : reader,
configuration);
return p.process();
}
/**
* Transforms an input String into HTML using the given Configuration.
*
* @param input
* The String to process.
* @param configuration
* The Configuration.
* @return The processed String.
* @since 0.7
* @see Configuration
*/
public final static String process(final String input, final Configuration configuration)
{
try
{
return process(new StringReader(input), configuration);
}
catch (final IOException e)
{
// This _can never_ happen
return null;
}
}
/**
* Transforms an input file into HTML using the given Configuration.
*
* @param file
* The File to process.
* @param configuration
* the Configuration
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @since 0.7
* @see Configuration
*/
public final static String process(final File file, final Configuration configuration) throws IOException
{
final FileInputStream input = new FileInputStream(file);
final String ret = process(input, configuration);
input.close();
return ret;
}
/**
* Transforms an input stream into HTML using the given Configuration.
*
* @param input
* The InputStream to process.
* @param configuration
* The Configuration.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @since 0.7
* @see Configuration
*/
public final static String process(final InputStream input, final Configuration configuration) throws IOException
{
final Processor p = new Processor(new BufferedReader(new InputStreamReader(input, configuration.encoding)),
configuration);
return p.process();
}
/**
* Transforms an input String into HTML using the default Configuration.
*
* @param input
* The String to process.
* @return The processed String.
* @see Configuration#DEFAULT
*/
public final static String process(final String input)
{
return process(input, Configuration.DEFAULT);
}
/**
* Transforms an input String into HTML.
*
* @param input
* The String to process.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @see Configuration#DEFAULT
*/
public final static String process(final String input, final boolean safeMode)
{
return process(input, Configuration.builder().setSafeMode(safeMode).build());
}
/**
* Transforms an input String into HTML.
*
* @param input
* The String to process.
* @param decorator
* The decorator to use.
* @return The processed String.
* @see Configuration#DEFAULT
*/
public final static String process(final String input, final Decorator decorator)
{
return process(input, Configuration.builder().setDecorator(decorator).build());
}
/**
* Transforms an input String into HTML.
*
* @param input
* The String to process.
* @param decorator
* The decorator to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @see Configuration#DEFAULT
*/
public final static String process(final String input, final Decorator decorator, final boolean safeMode)
{
return process(input, Configuration.builder().setDecorator(decorator).setSafeMode(safeMode).build());
}
/**
* Transforms an input file into HTML using the default Configuration.
*
* @param file
* The File to process.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file) throws IOException
{
return process(file, Configuration.DEFAULT);
}
/**
* Transforms an input file into HTML.
*
* @param file
* The File to process.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file, final boolean safeMode) throws IOException
{
return process(file, Configuration.builder().setSafeMode(safeMode).build());
}
/**
* Transforms an input file into HTML.
*
* @param file
* The File to process.
* @param decorator
* The decorator to use.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file, final Decorator decorator) throws IOException
{
return process(file, Configuration.builder().setDecorator(decorator).build());
}
/**
* Transforms an input file into HTML.
*
* @param file
* The File to process.
* @param decorator
* The decorator to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file, final Decorator decorator, final boolean safeMode)
throws IOException
{
return process(file, Configuration.builder().setDecorator(decorator).setSafeMode(safeMode).build());
}
/**
* Transforms an input file into HTML.
*
* @param file
* The File to process.
* @param encoding
* The encoding to use.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file, final String encoding) throws IOException
{
return process(file, Configuration.builder().setEncoding(encoding).build());
}
/**
* Transforms an input file into HTML.
*
* @param file
* The File to process.
* @param encoding
* The encoding to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file, final String encoding, final boolean safeMode)
throws IOException
{
return process(file, Configuration.builder().setEncoding(encoding).setSafeMode(safeMode).build());
}
/**
* Transforms an input file into HTML.
*
* @param file
* The File to process.
* @param encoding
* The encoding to use.
* @param decorator
* The decorator to use.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file, final String encoding, final Decorator decorator)
throws IOException
{
return process(file, Configuration.builder().setEncoding(encoding).setDecorator(decorator).build());
}
/**
* Transforms an input file into HTML.
*
* @param file
* The File to process.
* @param encoding
* The encoding to use.
* @param decorator
* The decorator to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final File file, final String encoding, final Decorator decorator,
final boolean safeMode) throws IOException
{
return process(file, Configuration.builder().setEncoding(encoding).setSafeMode(safeMode)
.setDecorator(decorator).build());
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input) throws IOException
{
return process(input, Configuration.DEFAULT);
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input, final boolean safeMode) throws IOException
{
return process(input, Configuration.builder().setSafeMode(safeMode).build());
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @param decorator
* The decorator to use.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input, final Decorator decorator) throws IOException
{
return process(input, Configuration.builder().setDecorator(decorator).build());
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @param decorator
* The decorator to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input, final Decorator decorator, final boolean safeMode)
throws IOException
{
return process(input, Configuration.builder().setDecorator(decorator).setSafeMode(safeMode).build());
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @param encoding
* The encoding to use.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input, final String encoding) throws IOException
{
return process(input, Configuration.builder().setEncoding(encoding).build());
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @param encoding
* The encoding to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input, final String encoding, final boolean safeMode)
throws IOException
{
return process(input, Configuration.builder().setEncoding(encoding).setSafeMode(safeMode).build());
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @param encoding
* The encoding to use.
* @param decorator
* The decorator to use.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input, final String encoding, final Decorator decorator)
throws IOException
{
return process(input, Configuration.builder().setEncoding(encoding).setDecorator(decorator).build());
}
/**
* Transforms an input stream into HTML.
*
* @param input
* The InputStream to process.
* @param encoding
* The encoding to use.
* @param decorator
* The decorator to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final InputStream input, final String encoding, final Decorator decorator,
final boolean safeMode) throws IOException
{
return process(input,
Configuration.builder().setEncoding(encoding).setDecorator(decorator).setSafeMode(safeMode).build());
}
/**
* Transforms an input stream into HTML using the default Configuration.
*
* @param reader
* The Reader to process.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final Reader reader) throws IOException
{
return process(reader, Configuration.DEFAULT);
}
/**
* Transforms an input stream into HTML.
*
* @param reader
* The Reader to process.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final Reader reader, final boolean safeMode) throws IOException
{
return process(reader, Configuration.builder().setSafeMode(safeMode).build());
}
/**
* Transforms an input stream into HTML.
*
* @param reader
* The Reader to process.
* @param decorator
* The decorator to use.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final Reader reader, final Decorator decorator) throws IOException
{
return process(reader, Configuration.builder().setDecorator(decorator).build());
}
/**
* Transforms an input stream into HTML.
*
* @param reader
* The Reader to process.
* @param decorator
* The decorator to use.
* @param safeMode
* Set to <code>true</code> to escape unsafe HTML tags.
* @return The processed String.
* @throws IOException
* if an IO error occurs
* @see Configuration#DEFAULT
*/
public final static String process(final Reader reader, final Decorator decorator, final boolean safeMode)
throws IOException
{
return process(reader, Configuration.builder().setDecorator(decorator).setSafeMode(safeMode).build());
}
/**
* Reads all lines from our reader.
* <p>
* Takes care of markdown link references.
* </p>
*
* @return A Block containing all lines.
* @throws IOException
* If an IO error occurred.
*/
private Block readLines() throws IOException
{
final Block block = new Block();
final StringBuilder sb = new StringBuilder(80);
int c = this.reader.read();
LinkRef lastLinkRef = null;
while (c != -1)
{
sb.setLength(0);
int pos = 0;
boolean eol = false;
while (!eol)
{
switch (c)
{
case -1:
eol = true;
break;
case '\n':
c = this.reader.read();
if (c == '\r')
{
c = this.reader.read();
}
eol = true;
break;
case '\r':
c = this.reader.read();
if (c == '\n')
{
c = this.reader.read();
}
eol = true;
break;
case '\t':
{
final int np = pos + (4 - (pos & 3));
while (pos < np)
{
sb.append(' ');
pos++;
}
c = this.reader.read();
break;
}
default:
if (c != '<' || !this.config.panicMode)
{
pos++;
sb.append((char)c);
}
else
{
pos += 4;
sb.append("<");
}
c = this.reader.read();
break;
}
}
final Line line = new Line();
line.value = sb.toString();
line.init();
// Check for link definitions
boolean isLinkRef = false;
String id = null, link = null, comment = null;
if (!line.isEmpty && line.leading < 4 && line.value.charAt(line.leading) == '[')
{
line.pos = line.leading + 1;
// Read ID up to ']'
id = line.readUntil(']');
// Is ID valid and are there any more characters?
if (id != null && line.pos + 2 < line.value.length())
{
// Check for ':' ([...]:...)
if (line.value.charAt(line.pos + 1) == ':')
{
line.pos += 2;
line.skipSpaces();
// Check for link syntax
if (line.value.charAt(line.pos) == '<')
{
line.pos++;
link = line.readUntil('>');
line.pos++;
}
else
{
link = line.readUntil(' ', '\n');
}
// Is link valid?
if (link != null)
{
// Any non-whitespace characters following?
if (line.skipSpaces())
{
final char ch = line.value.charAt(line.pos);
// Read comment
if (ch == '\"' || ch == '\'' || ch == '(')
{
line.pos++;
comment = line.readUntil(ch == '(' ? ')' : ch);
// Valid linkRef only if comment is valid
if (comment != null)
{
isLinkRef = true;
}
}
}
else
{
isLinkRef = true;
}
}
}
}
}
// To make compiler happy: add != null checks
if (isLinkRef && id != null && link != null)
{
if (id.toLowerCase().equals("$profile$"))
{
this.emitter.useExtensions = this.useExtensions = link.toLowerCase().equals("extended");
lastLinkRef = null;
}
else
{
// Store linkRef and skip line
final LinkRef lr = new LinkRef(link, comment, comment != null
&& (link.length() == 1 && link.charAt(0) == '*'));
this.emitter.addLinkRef(id, lr);
if (comment == null)
{
lastLinkRef = lr;
}
}
}
else
{
comment = null;
// Check for multi-line linkRef
if (!line.isEmpty && lastLinkRef != null)
{
line.pos = line.leading;
final char ch = line.value.charAt(line.pos);
if (ch == '\"' || ch == '\'' || ch == '(')
{
line.pos++;
comment = line.readUntil(ch == '(' ? ')' : ch);
}
if (comment != null)
{
lastLinkRef.title = comment;
}
lastLinkRef = null;
}
// No multi-line linkRef, store line
if (comment == null)
{
line.pos = 0;
block.appendLine(line);
}
}
}
return block;
}
/**
* Initializes a list block by separating it into list item blocks.
*
* @param root
* The Block to process.
*/
private void initListBlock(final Block root)
{
Line line = root.lines;
line = line.next;
while (line != null)
{
final LineType t = line.getLineType(this.config);
if ((t == LineType.OLIST || t == LineType.ULIST)
|| (!line.isEmpty && (line.prevEmpty && line.leading == 0 && !(t == LineType.OLIST || t == LineType.ULIST))))
{
root.split(line.previous).type = BlockType.LIST_ITEM;
}
line = line.next;
}
root.split(root.lineTail).type = BlockType.LIST_ITEM;
}
/**
* Recursively process the given Block.
*
* @param root
* The Block to process.
* @param listMode
* Flag indicating that we're in a list item block.
*/
private void recurse(final Block root, final boolean listMode)
{
Block block, list;
Line line = root.lines;
if (listMode)
{
root.removeListIndent(this.config);
if (this.useExtensions && root.lines != null && root.lines.getLineType(this.config) != LineType.CODE)
{
root.id = root.lines.stripID();
}
}
while (line != null && line.isEmpty)
{
line = line.next;
}
if (line == null)
{
return;
}
while (line != null)
{
final LineType type = line.getLineType(this.config);
switch (type)
{
case OTHER:
{
final boolean wasEmpty = line.prevEmpty;
while (line != null && !line.isEmpty)
{
final LineType t = line.getLineType(this.config);
if ((listMode || this.useExtensions) && (t == LineType.OLIST || t == LineType.ULIST))
{
break;
}
if (this.useExtensions && (t == LineType.CODE || t == LineType.FENCED_CODE))
{
break;
}
if (t == LineType.HEADLINE || t == LineType.HEADLINE1 || t == LineType.HEADLINE2
|| t == LineType.HR
|| t == LineType.BQUOTE || t == LineType.XML)
{
break;
}
line = line.next;
}
final BlockType bt;
if (line != null && !line.isEmpty)
{
bt = (listMode && !wasEmpty) ? BlockType.NONE : BlockType.PARAGRAPH;
root.split(line.previous).type = bt;
root.removeLeadingEmptyLines();
}
else
{
bt = (listMode && (line == null || !line.isEmpty) && !wasEmpty) ? BlockType.NONE
: BlockType.PARAGRAPH;
root.split(line == null ? root.lineTail : line).type = bt;
root.removeLeadingEmptyLines();
}
line = root.lines;
break;
}
case CODE:
while (line != null && (line.isEmpty || line.leading > 3))
{
line = line.next;
}
block = root.split(line != null ? line.previous : root.lineTail);
block.type = BlockType.CODE;
block.removeSurroundingEmptyLines();
break;
case XML:
if (line.previous != null)
{
// FIXME ... this looks wrong
root.split(line.previous);
}
root.split(line.xmlEndLine).type = BlockType.XML;
root.removeLeadingEmptyLines();
line = root.lines;
break;
case BQUOTE:
while (line != null)
{
if (!line.isEmpty
&& (line.prevEmpty && line.leading == 0 && line.getLineType(this.config) != LineType.BQUOTE))
{
break;
}
line = line.next;
}
block = root.split(line != null ? line.previous : root.lineTail);
block.type = BlockType.BLOCKQUOTE;
block.removeSurroundingEmptyLines();
block.removeBlockQuotePrefix();
this.recurse(block, false);
line = root.lines;
break;
case HR:
if (line.previous != null)
{
// FIXME ... this looks wrong
root.split(line.previous);
}
root.split(line).type = BlockType.RULER;
root.removeLeadingEmptyLines();
line = root.lines;
break;
case FENCED_CODE:
line = line.next;
while (line != null)
{
if (line.getLineType(this.config) == LineType.FENCED_CODE)
{
break;
}
// TODO ... is this really necessary? Maybe add a special
// flag?
line = line.next;
}
if (line != null)
{
line = line.next;
}
block = root.split(line != null ? line.previous : root.lineTail);
block.type = BlockType.FENCED_CODE;
block.meta = Utils.getMetaFromFence(block.lines.value);
block.lines.setEmpty();
if (block.lineTail.getLineType(this.config) == LineType.FENCED_CODE)
{
block.lineTail.setEmpty();
}
block.removeSurroundingEmptyLines();
break;
case HEADLINE:
case HEADLINE1:
case HEADLINE2:
if (line.previous != null)
{
root.split(line.previous);
}
if (type != LineType.HEADLINE)
{
line.next.setEmpty();
}
block = root.split(line);
block.type = BlockType.HEADLINE;
if (type != LineType.HEADLINE)
{
block.hlDepth = type == LineType.HEADLINE1 ? 1 : 2;
}
if (this.useExtensions)
{
block.id = block.lines.stripID();
}
block.transfromHeadline();
root.removeLeadingEmptyLines();
line = root.lines;
break;
case OLIST:
case ULIST:
while (line != null)
{
final LineType t = line.getLineType(this.config);
if (!line.isEmpty
&& (line.prevEmpty && line.leading == 0 && !(t == LineType.OLIST || t == LineType.ULIST)))
{
break;
}
line = line.next;
}
list = root.split(line != null ? line.previous : root.lineTail);
list.type = type == LineType.OLIST ? BlockType.ORDERED_LIST : BlockType.UNORDERED_LIST;
list.lines.prevEmpty = false;
list.lineTail.nextEmpty = false;
list.removeSurroundingEmptyLines();
list.lines.prevEmpty = list.lineTail.nextEmpty = false;
this.initListBlock(list);
block = list.blocks;
while (block != null)
{
this.recurse(block, true);
block = block.next;
}
list.expandListParagraphs();
break;
default:
line = line.next;
break;
}
}
}
/**
* Does all the processing.
*
* @return The processed String.
* @throws IOException
* If an IO error occurred.
*/
private String process() throws IOException
{
final StringBuilder out = new StringBuilder();
final Block parent = this.readLines();
parent.removeSurroundingEmptyLines();
this.recurse(parent, false);
Block block = parent.blocks;
while (block != null)
{
this.emitter.emit(out, block);
block = block.next;
}
return out.toString();
}
}
| 16,780 |
403 | package org.camunda.bpm.demo.loan_request_handling.nonarquillian;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import org.apache.commons.io.FileUtils;
import org.apache.ibatis.logging.LogFactory;
import org.camunda.bpm.engine.runtime.ProcessInstance;
import org.camunda.bpm.engine.test.ProcessEngineRule;
import org.camunda.bpm.engine.test.Deployment;
import org.camunda.bpm.model.bpmn.Bpmn;
import org.camunda.bpm.model.bpmn.BpmnModelInstance;
import org.camunda.bpm.model.bpmn.instance.Message;
import org.camunda.bpm.model.bpmn.instance.MessageEventDefinition;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import static org.camunda.bpm.engine.test.assertions.ProcessEngineTests.*;
import static org.junit.Assert.*;
import org.camunda.bpm.consulting.process_test_coverage.ProcessTestCoverage;
/**
* Test case starting an in-memory database-backed Process Engine.
*/
public class InMemoryH2Test {
@Rule
public ProcessEngineRule rule = new ProcessEngineRule();
private static final String PROCESS_DEFINITION_KEY = "loan-request-handling";
static {
LogFactory.useSlf4jLogging(); // MyBatis
}
@Before
public void setup() {
init(rule.getProcessEngine());
}
@Ignore
@Test
public void makeModelDeployable() throws IOException {
InputStream stream = this.getClass().getResourceAsStream("/Final Model.bpmn");
BpmnModelInstance modelInstance = Bpmn.readModelFromStream(stream);
new ExecutableModelGenerator().makeExecutable(modelInstance);
FileUtils.writeStringToFile(new File("src/test/resources/FinalModel.executable.bpmn"), Bpmn.convertToString(modelInstance));
}
/**
* Just tests if the process definition is deployable.
*/
@Test
@Deployment(resources = "FinalModel.executable.bpmn")
public void testParsingAndDeployment() {
// nothing is done here, as we just want to check for exceptions during deployment
}
@Ignore
@Test
@Deployment(resources = "branchofficeonlyloan-ref-en.bpmn")
public void testHappyPath() {
ProcessInstance processInstance = processEngine().getRuntimeService().startProcessInstanceByKey("BOLoanRequestHandling");
assertThat(processInstance).isActive();
complete(task());
}
@After
public void calculateCoverageForAllTests() throws Exception {
ProcessTestCoverage.calculate(rule.getProcessEngine());
}
}
| 831 |
321 | <gh_stars>100-1000
#pragma once
#include "Camera.h"
#include "Light.h"
#include "MeshFilter.h"
#include "MeshRenderer.h"
#include "Skybox.h"
| 61 |
1,650 | /*
* Copyright (c) 2015 <NAME>
*
* This file is part of libsoundio, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#include "jack.h"
#include "soundio_private.h"
#include "list.h"
#include <stdio.h>
static struct SoundIoAtomicFlag global_msg_callback_flag = SOUNDIO_ATOMIC_FLAG_INIT;
struct SoundIoJackPort {
const char *full_name;
int full_name_len;
const char *name;
int name_len;
enum SoundIoChannelId channel_id;
jack_latency_range_t latency_range;
};
struct SoundIoJackClient {
const char *name;
int name_len;
bool is_physical;
enum SoundIoDeviceAim aim;
int port_count;
struct SoundIoJackPort ports[SOUNDIO_MAX_CHANNELS];
};
SOUNDIO_MAKE_LIST_STRUCT(struct SoundIoJackClient, SoundIoListJackClient, SOUNDIO_LIST_STATIC)
SOUNDIO_MAKE_LIST_DEF(struct SoundIoJackClient, SoundIoListJackClient, SOUNDIO_LIST_STATIC)
static void split_str(const char *input_str, int input_str_len, char c,
const char **out_1, int *out_len_1, const char **out_2, int *out_len_2)
{
*out_1 = input_str;
while (*input_str) {
if (*input_str == c) {
*out_len_1 = input_str - *out_1;
*out_2 = input_str + 1;
*out_len_2 = input_str_len - 1 - *out_len_1;
return;
}
input_str += 1;
}
}
static struct SoundIoJackClient *find_or_create_client(struct SoundIoListJackClient *clients,
enum SoundIoDeviceAim aim, bool is_physical, const char *client_name, int client_name_len)
{
for (int i = 0; i < clients->length; i += 1) {
struct SoundIoJackClient *client = SoundIoListJackClient_ptr_at(clients, i);
if (client->is_physical == is_physical &&
client->aim == aim &&
soundio_streql(client->name, client->name_len, client_name, client_name_len))
{
return client;
}
}
int err;
if ((err = SoundIoListJackClient_add_one(clients)))
return NULL;
struct SoundIoJackClient *client = SoundIoListJackClient_last_ptr(clients);
client->is_physical = is_physical;
client->aim = aim;
client->name = client_name;
client->name_len = client_name_len;
client->port_count = 0;
return client;
}
static void destruct_device(struct SoundIoDevicePrivate *dp) {
struct SoundIoDeviceJack *dj = &dp->backend_data.jack;
for (int i = 0; i < dj->port_count; i += 1) {
struct SoundIoDeviceJackPort *djp = &dj->ports[i];
free(djp->full_name);
}
free(dj->ports);
}
static int refresh_devices_bare(struct SoundIoPrivate *si) {
struct SoundIo *soundio = &si->pub;
struct SoundIoJack *sij = &si->backend_data.jack;
if (sij->is_shutdown)
return SoundIoErrorBackendDisconnected;
struct SoundIoDevicesInfo *devices_info = ALLOCATE(struct SoundIoDevicesInfo, 1);
if (!devices_info)
return SoundIoErrorNoMem;
devices_info->default_output_index = -1;
devices_info->default_input_index = -1;
const char **port_names = jack_get_ports(sij->client, NULL, NULL, 0);
if (!port_names) {
soundio_destroy_devices_info(devices_info);
return SoundIoErrorNoMem;
}
struct SoundIoListJackClient clients = {0};
const char **port_name_ptr = port_names;
for (; *port_name_ptr; port_name_ptr += 1) {
const char *client_and_port_name = *port_name_ptr;
int client_and_port_name_len = strlen(client_and_port_name);
jack_port_t *jport = jack_port_by_name(sij->client, client_and_port_name);
if (!jport) {
// This refresh devices scan is already outdated. Just give up and
// let refresh_devices be called again.
jack_free(port_names);
soundio_destroy_devices_info(devices_info);
return SoundIoErrorInterrupted;
}
int flags = jack_port_flags(jport);
const char *port_type = jack_port_type(jport);
if (strcmp(port_type, JACK_DEFAULT_AUDIO_TYPE) != 0) {
// we don't know how to support such a port
continue;
}
enum SoundIoDeviceAim aim = (flags & JackPortIsInput) ?
SoundIoDeviceAimOutput : SoundIoDeviceAimInput;
bool is_physical = flags & JackPortIsPhysical;
const char *client_name = NULL;
const char *port_name = NULL;
int client_name_len;
int port_name_len;
split_str(client_and_port_name, client_and_port_name_len, ':',
&client_name, &client_name_len, &port_name, &port_name_len);
if (!client_name || !port_name) {
// device does not have colon, skip it
continue;
}
struct SoundIoJackClient *client = find_or_create_client(&clients, aim, is_physical,
client_name, client_name_len);
if (!client) {
jack_free(port_names);
soundio_destroy_devices_info(devices_info);
return SoundIoErrorNoMem;
}
if (client->port_count >= SOUNDIO_MAX_CHANNELS) {
// we hit the channel limit, skip the leftovers
continue;
}
struct SoundIoJackPort *port = &client->ports[client->port_count++];
port->full_name = client_and_port_name;
port->full_name_len = client_and_port_name_len;
port->name = port_name;
port->name_len = port_name_len;
port->channel_id = soundio_parse_channel_id(port_name, port_name_len);
jack_latency_callback_mode_t latency_mode = (aim == SoundIoDeviceAimOutput) ?
JackPlaybackLatency : JackCaptureLatency;
jack_port_get_latency_range(jport, latency_mode, &port->latency_range);
}
for (int i = 0; i < clients.length; i += 1) {
struct SoundIoJackClient *client = SoundIoListJackClient_ptr_at(&clients, i);
if (client->port_count <= 0)
continue;
struct SoundIoDevicePrivate *dev = ALLOCATE(struct SoundIoDevicePrivate, 1);
if (!dev) {
jack_free(port_names);
soundio_destroy_devices_info(devices_info);
return SoundIoErrorNoMem;
}
struct SoundIoDevice *device = &dev->pub;
struct SoundIoDeviceJack *dj = &dev->backend_data.jack;
int description_len = client->name_len + 3 + 2 * client->port_count;
for (int port_index = 0; port_index < client->port_count; port_index += 1) {
struct SoundIoJackPort *port = &client->ports[port_index];
description_len += port->name_len;
}
dev->destruct = destruct_device;
device->ref_count = 1;
device->soundio = soundio;
device->is_raw = false;
device->aim = client->aim;
device->id = soundio_str_dupe(client->name, client->name_len);
device->name = ALLOCATE(char, description_len);
device->current_format = SoundIoFormatFloat32NE;
device->sample_rate_count = 1;
device->sample_rates = &dev->prealloc_sample_rate_range;
device->sample_rates[0].min = sij->sample_rate;
device->sample_rates[0].max = sij->sample_rate;
device->sample_rate_current = sij->sample_rate;
device->software_latency_current = sij->period_size / (double) sij->sample_rate;
device->software_latency_min = sij->period_size / (double) sij->sample_rate;
device->software_latency_max = sij->period_size / (double) sij->sample_rate;
dj->port_count = client->port_count;
dj->ports = ALLOCATE(struct SoundIoDeviceJackPort, dj->port_count);
if (!device->id || !device->name || !dj->ports) {
jack_free(port_names);
soundio_device_unref(device);
soundio_destroy_devices_info(devices_info);
return SoundIoErrorNoMem;
}
for (int port_index = 0; port_index < client->port_count; port_index += 1) {
struct SoundIoJackPort *port = &client->ports[port_index];
struct SoundIoDeviceJackPort *djp = &dj->ports[port_index];
djp->full_name = soundio_str_dupe(port->full_name, port->full_name_len);
djp->full_name_len = port->full_name_len;
djp->channel_id = port->channel_id;
djp->latency_range = port->latency_range;
if (!djp->full_name) {
jack_free(port_names);
soundio_device_unref(device);
soundio_destroy_devices_info(devices_info);
return SoundIoErrorNoMem;
}
}
memcpy(device->name, client->name, client->name_len);
memcpy(&device->name[client->name_len], ": ", 2);
int index = client->name_len + 2;
for (int port_index = 0; port_index < client->port_count; port_index += 1) {
struct SoundIoJackPort *port = &client->ports[port_index];
memcpy(&device->name[index], port->name, port->name_len);
index += port->name_len;
if (port_index + 1 < client->port_count) {
memcpy(&device->name[index], ", ", 2);
index += 2;
}
}
device->current_layout.channel_count = client->port_count;
bool any_invalid = false;
for (int port_index = 0; port_index < client->port_count; port_index += 1) {
struct SoundIoJackPort *port = &client->ports[port_index];
device->current_layout.channels[port_index] = port->channel_id;
any_invalid = any_invalid || (port->channel_id == SoundIoChannelIdInvalid);
}
if (any_invalid) {
const struct SoundIoChannelLayout *layout = soundio_channel_layout_get_default(client->port_count);
if (layout)
device->current_layout = *layout;
} else {
soundio_channel_layout_detect_builtin(&device->current_layout);
}
device->layout_count = 1;
device->layouts = &device->current_layout;
device->format_count = 1;
device->formats = &dev->prealloc_format;
device->formats[0] = device->current_format;
struct SoundIoListDevicePtr *device_list;
if (device->aim == SoundIoDeviceAimOutput) {
device_list = &devices_info->output_devices;
if (devices_info->default_output_index < 0 && client->is_physical)
devices_info->default_output_index = device_list->length;
} else {
assert(device->aim == SoundIoDeviceAimInput);
device_list = &devices_info->input_devices;
if (devices_info->default_input_index < 0 && client->is_physical)
devices_info->default_input_index = device_list->length;
}
if (SoundIoListDevicePtr_append(device_list, device)) {
soundio_device_unref(device);
soundio_destroy_devices_info(devices_info);
return SoundIoErrorNoMem;
}
}
jack_free(port_names);
soundio_destroy_devices_info(si->safe_devices_info);
si->safe_devices_info = devices_info;
return 0;
}
static int refresh_devices(struct SoundIoPrivate *si) {
int err = SoundIoErrorInterrupted;
while (err == SoundIoErrorInterrupted)
err = refresh_devices_bare(si);
return err;
}
static void my_flush_events(struct SoundIoPrivate *si, bool wait) {
struct SoundIo *soundio = &si->pub;
struct SoundIoJack *sij = &si->backend_data.jack;
int err;
bool cb_shutdown = false;
soundio_os_mutex_lock(sij->mutex);
if (wait)
soundio_os_cond_wait(sij->cond, sij->mutex);
if (sij->is_shutdown && !sij->emitted_shutdown_cb) {
sij->emitted_shutdown_cb = true;
cb_shutdown = true;
}
soundio_os_mutex_unlock(sij->mutex);
if (cb_shutdown) {
soundio->on_backend_disconnect(soundio, SoundIoErrorBackendDisconnected);
} else {
if (!SOUNDIO_ATOMIC_FLAG_TEST_AND_SET(sij->refresh_devices_flag)) {
if ((err = refresh_devices(si))) {
SOUNDIO_ATOMIC_FLAG_CLEAR(sij->refresh_devices_flag);
} else {
soundio->on_devices_change(soundio);
}
}
}
}
static void flush_events_jack(struct SoundIoPrivate *si) {
my_flush_events(si, false);
}
static void wait_events_jack(struct SoundIoPrivate *si) {
my_flush_events(si, false);
my_flush_events(si, true);
}
static void wakeup_jack(struct SoundIoPrivate *si) {
struct SoundIoJack *sij = &si->backend_data.jack;
soundio_os_mutex_lock(sij->mutex);
soundio_os_cond_signal(sij->cond, sij->mutex);
soundio_os_mutex_unlock(sij->mutex);
}
static void force_device_scan_jack(struct SoundIoPrivate *si) {
struct SoundIo *soundio = &si->pub;
struct SoundIoJack *sij = &si->backend_data.jack;
SOUNDIO_ATOMIC_FLAG_CLEAR(sij->refresh_devices_flag);
soundio_os_mutex_lock(sij->mutex);
soundio_os_cond_signal(sij->cond, sij->mutex);
soundio->on_events_signal(soundio);
soundio_os_mutex_unlock(sij->mutex);
}
static int outstream_process_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoOutStreamPrivate *os = (struct SoundIoOutStreamPrivate *)arg;
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
struct SoundIoOutStream *outstream = &os->pub;
osj->frames_left = nframes;
for (int ch = 0; ch < outstream->layout.channel_count; ch += 1) {
struct SoundIoOutStreamJackPort *osjp = &osj->ports[ch];
osj->areas[ch].ptr = (char*)jack_port_get_buffer(osjp->source_port, nframes);
osj->areas[ch].step = outstream->bytes_per_sample;
}
outstream->write_callback(outstream, osj->frames_left, osj->frames_left);
return 0;
}
static void outstream_destroy_jack(struct SoundIoPrivate *is, struct SoundIoOutStreamPrivate *os) {
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
jack_client_close(osj->client);
osj->client = NULL;
}
static struct SoundIoDeviceJackPort *find_port_matching_channel(struct SoundIoDevice *device, enum SoundIoChannelId id) {
struct SoundIoDevicePrivate *dev = (struct SoundIoDevicePrivate *)device;
struct SoundIoDeviceJack *dj = &dev->backend_data.jack;
for (int ch = 0; ch < device->current_layout.channel_count; ch += 1) {
enum SoundIoChannelId chan_id = device->current_layout.channels[ch];
if (chan_id == id)
return &dj->ports[ch];
}
return NULL;
}
static int outstream_xrun_callback(void *arg) {
struct SoundIoOutStreamPrivate *os = (struct SoundIoOutStreamPrivate *)arg;
struct SoundIoOutStream *outstream = &os->pub;
outstream->underflow_callback(outstream);
return 0;
}
static int outstream_buffer_size_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoOutStreamPrivate *os = (struct SoundIoOutStreamPrivate *)arg;
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
struct SoundIoOutStream *outstream = &os->pub;
if ((jack_nframes_t)osj->period_size == nframes) {
return 0;
} else {
outstream->error_callback(outstream, SoundIoErrorStreaming);
return -1;
}
}
static int outstream_sample_rate_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoOutStreamPrivate *os = (struct SoundIoOutStreamPrivate *)arg;
struct SoundIoOutStream *outstream = &os->pub;
if (nframes == (jack_nframes_t)outstream->sample_rate) {
return 0;
} else {
outstream->error_callback(outstream, SoundIoErrorStreaming);
return -1;
}
}
static void outstream_shutdown_callback(void *arg) {
struct SoundIoOutStreamPrivate *os = (struct SoundIoOutStreamPrivate *)arg;
struct SoundIoOutStream *outstream = &os->pub;
outstream->error_callback(outstream, SoundIoErrorStreaming);
}
static inline jack_nframes_t nframes_max(jack_nframes_t a, jack_nframes_t b) {
return (a >= b) ? a : b;
}
static int outstream_open_jack(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os) {
struct SoundIoJack *sij = &si->backend_data.jack;
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
struct SoundIoOutStream *outstream = &os->pub;
struct SoundIoDevice *device = outstream->device;
struct SoundIoDevicePrivate *dev = (struct SoundIoDevicePrivate *)device;
struct SoundIoDeviceJack *dj = &dev->backend_data.jack;
if (sij->is_shutdown)
return SoundIoErrorBackendDisconnected;
if (!outstream->name)
outstream->name = "SoundIoOutStream";
outstream->software_latency = device->software_latency_current;
osj->period_size = sij->period_size;
jack_status_t status;
osj->client = jack_client_open(outstream->name, JackNoStartServer, &status);
if (!osj->client) {
outstream_destroy_jack(si, os);
assert(!(status & JackInvalidOption));
if (status & JackShmFailure)
return SoundIoErrorSystemResources;
if (status & JackNoSuchClient)
return SoundIoErrorNoSuchClient;
return SoundIoErrorOpeningDevice;
}
int err;
if ((err = jack_set_process_callback(osj->client, outstream_process_callback, os))) {
outstream_destroy_jack(si, os);
return SoundIoErrorOpeningDevice;
}
if ((err = jack_set_buffer_size_callback(osj->client, outstream_buffer_size_callback, os))) {
outstream_destroy_jack(si, os);
return SoundIoErrorOpeningDevice;
}
if ((err = jack_set_sample_rate_callback(osj->client, outstream_sample_rate_callback, os))) {
outstream_destroy_jack(si, os);
return SoundIoErrorOpeningDevice;
}
if ((err = jack_set_xrun_callback(osj->client, outstream_xrun_callback, os))) {
outstream_destroy_jack(si, os);
return SoundIoErrorOpeningDevice;
}
jack_on_shutdown(osj->client, outstream_shutdown_callback, os);
jack_nframes_t max_port_latency = 0;
// register ports and map channels
int connected_count = 0;
for (int ch = 0; ch < outstream->layout.channel_count; ch += 1) {
enum SoundIoChannelId my_channel_id = outstream->layout.channels[ch];
const char *channel_name = soundio_get_channel_name(my_channel_id);
unsigned long flags = JackPortIsOutput;
if (!outstream->non_terminal_hint)
flags |= JackPortIsTerminal;
jack_port_t *jport = jack_port_register(osj->client, channel_name, JACK_DEFAULT_AUDIO_TYPE, flags, 0);
if (!jport) {
outstream_destroy_jack(si, os);
return SoundIoErrorOpeningDevice;
}
struct SoundIoOutStreamJackPort *osjp = &osj->ports[ch];
osjp->source_port = jport;
// figure out which dest port this connects to
struct SoundIoDeviceJackPort *djp = find_port_matching_channel(device, my_channel_id);
if (djp) {
osjp->dest_port_name = djp->full_name;
osjp->dest_port_name_len = djp->full_name_len;
connected_count += 1;
max_port_latency = nframes_max(max_port_latency, djp->latency_range.max);
}
}
// If nothing got connected, channel layouts aren't working. Just send the
// data in the order of the ports.
if (connected_count == 0) {
max_port_latency = 0;
outstream->layout_error = SoundIoErrorIncompatibleDevice;
int ch_count = soundio_int_min(outstream->layout.channel_count, dj->port_count);
for (int ch = 0; ch < ch_count; ch += 1) {
struct SoundIoOutStreamJackPort *osjp = &osj->ports[ch];
struct SoundIoDeviceJackPort *djp = &dj->ports[ch];
osjp->dest_port_name = djp->full_name;
osjp->dest_port_name_len = djp->full_name_len;
max_port_latency = nframes_max(max_port_latency, djp->latency_range.max);
}
}
osj->hardware_latency = max_port_latency / (double)outstream->sample_rate;
return 0;
}
static int outstream_pause_jack(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os, bool pause) {
struct SoundIoJack *sij = &si->backend_data.jack;
if (sij->is_shutdown)
return SoundIoErrorBackendDisconnected;
return SoundIoErrorIncompatibleBackend;
}
static int outstream_start_jack(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os) {
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
struct SoundIoOutStream *outstream = &os->pub;
struct SoundIoJack *sij = &si->backend_data.jack;
int err;
if (sij->is_shutdown)
return SoundIoErrorBackendDisconnected;
if ((err = jack_activate(osj->client)))
return SoundIoErrorStreaming;
for (int ch = 0; ch < outstream->layout.channel_count; ch += 1) {
struct SoundIoOutStreamJackPort *osjp = &osj->ports[ch];
const char *dest_port_name = osjp->dest_port_name;
// allow unconnected ports
if (!dest_port_name)
continue;
const char *source_port_name = jack_port_name(osjp->source_port);
if ((err = jack_connect(osj->client, source_port_name, dest_port_name)))
return SoundIoErrorStreaming;
}
return 0;
}
static int outstream_begin_write_jack(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os,
struct SoundIoChannelArea **out_areas, int *frame_count)
{
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
if (*frame_count != osj->frames_left)
return SoundIoErrorInvalid;
*out_areas = osj->areas;
return 0;
}
static int outstream_end_write_jack(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os) {
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
osj->frames_left = 0;
return 0;
}
static int outstream_clear_buffer_jack(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os) {
return SoundIoErrorIncompatibleBackend;
}
static int outstream_get_latency_jack(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os,
double *out_latency)
{
struct SoundIoOutStreamJack *osj = &os->backend_data.jack;
*out_latency = osj->hardware_latency;
return 0;
}
static void instream_destroy_jack(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
jack_client_close(isj->client);
isj->client = NULL;
}
static int instream_xrun_callback(void *arg) {
struct SoundIoInStreamPrivate *is = (struct SoundIoInStreamPrivate *)arg;
struct SoundIoInStream *instream = &is->pub;
instream->overflow_callback(instream);
return 0;
}
static int instream_buffer_size_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoInStreamPrivate *is = (struct SoundIoInStreamPrivate *)arg;
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
struct SoundIoInStream *instream = &is->pub;
if ((jack_nframes_t)isj->period_size == nframes) {
return 0;
} else {
instream->error_callback(instream, SoundIoErrorStreaming);
return -1;
}
}
static int instream_sample_rate_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoInStreamPrivate *is = (struct SoundIoInStreamPrivate *)arg;
struct SoundIoInStream *instream = &is->pub;
if (nframes == (jack_nframes_t)instream->sample_rate) {
return 0;
} else {
instream->error_callback(instream, SoundIoErrorStreaming);
return -1;
}
}
static void instream_shutdown_callback(void *arg) {
struct SoundIoInStreamPrivate *is = (struct SoundIoInStreamPrivate *)arg;
struct SoundIoInStream *instream = &is->pub;
instream->error_callback(instream, SoundIoErrorStreaming);
}
static int instream_process_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoInStreamPrivate *is = (struct SoundIoInStreamPrivate *)arg;
struct SoundIoInStream *instream = &is->pub;
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
isj->frames_left = nframes;
for (int ch = 0; ch < instream->layout.channel_count; ch += 1) {
struct SoundIoInStreamJackPort *isjp = &isj->ports[ch];
isj->areas[ch].ptr = (char*)jack_port_get_buffer(isjp->dest_port, nframes);
isj->areas[ch].step = instream->bytes_per_sample;
}
instream->read_callback(instream, isj->frames_left, isj->frames_left);
return 0;
}
static int instream_open_jack(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
struct SoundIoInStream *instream = &is->pub;
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
struct SoundIoJack *sij = &si->backend_data.jack;
struct SoundIoDevice *device = instream->device;
struct SoundIoDevicePrivate *dev = (struct SoundIoDevicePrivate *)device;
struct SoundIoDeviceJack *dj = &dev->backend_data.jack;
if (sij->is_shutdown)
return SoundIoErrorBackendDisconnected;
if (!instream->name)
instream->name = "SoundIoInStream";
instream->software_latency = device->software_latency_current;
isj->period_size = sij->period_size;
jack_status_t status;
isj->client = jack_client_open(instream->name, JackNoStartServer, &status);
if (!isj->client) {
instream_destroy_jack(si, is);
assert(!(status & JackInvalidOption));
if (status & JackShmFailure)
return SoundIoErrorSystemResources;
if (status & JackNoSuchClient)
return SoundIoErrorNoSuchClient;
return SoundIoErrorOpeningDevice;
}
int err;
if ((err = jack_set_process_callback(isj->client, instream_process_callback, is))) {
instream_destroy_jack(si, is);
return SoundIoErrorOpeningDevice;
}
if ((err = jack_set_buffer_size_callback(isj->client, instream_buffer_size_callback, is))) {
instream_destroy_jack(si, is);
return SoundIoErrorOpeningDevice;
}
if ((err = jack_set_sample_rate_callback(isj->client, instream_sample_rate_callback, is))) {
instream_destroy_jack(si, is);
return SoundIoErrorOpeningDevice;
}
if ((err = jack_set_xrun_callback(isj->client, instream_xrun_callback, is))) {
instream_destroy_jack(si, is);
return SoundIoErrorOpeningDevice;
}
jack_on_shutdown(isj->client, instream_shutdown_callback, is);
jack_nframes_t max_port_latency = 0;
// register ports and map channels
int connected_count = 0;
for (int ch = 0; ch < instream->layout.channel_count; ch += 1) {
enum SoundIoChannelId my_channel_id = instream->layout.channels[ch];
const char *channel_name = soundio_get_channel_name(my_channel_id);
unsigned long flags = JackPortIsInput;
if (!instream->non_terminal_hint)
flags |= JackPortIsTerminal;
jack_port_t *jport = jack_port_register(isj->client, channel_name, JACK_DEFAULT_AUDIO_TYPE, flags, 0);
if (!jport) {
instream_destroy_jack(si, is);
return SoundIoErrorOpeningDevice;
}
struct SoundIoInStreamJackPort *isjp = &isj->ports[ch];
isjp->dest_port = jport;
// figure out which source port this connects to
struct SoundIoDeviceJackPort *djp = find_port_matching_channel(device, my_channel_id);
if (djp) {
isjp->source_port_name = djp->full_name;
isjp->source_port_name_len = djp->full_name_len;
connected_count += 1;
max_port_latency = nframes_max(max_port_latency, djp->latency_range.max);
}
}
// If nothing got connected, channel layouts aren't working. Just send the
// data in the order of the ports.
if (connected_count == 0) {
max_port_latency = 0;
instream->layout_error = SoundIoErrorIncompatibleDevice;
int ch_count = soundio_int_min(instream->layout.channel_count, dj->port_count);
for (int ch = 0; ch < ch_count; ch += 1) {
struct SoundIoInStreamJackPort *isjp = &isj->ports[ch];
struct SoundIoDeviceJackPort *djp = &dj->ports[ch];
isjp->source_port_name = djp->full_name;
isjp->source_port_name_len = djp->full_name_len;
max_port_latency = nframes_max(max_port_latency, djp->latency_range.max);
}
}
isj->hardware_latency = max_port_latency / (double)instream->sample_rate;
return 0;
}
static int instream_pause_jack(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is, bool pause) {
struct SoundIoJack *sij = &si->backend_data.jack;
if (sij->is_shutdown)
return SoundIoErrorBackendDisconnected;
return SoundIoErrorIncompatibleBackend;
}
static int instream_start_jack(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
struct SoundIoInStream *instream = &is->pub;
struct SoundIoJack *sij = &si->backend_data.jack;
int err;
if (sij->is_shutdown)
return SoundIoErrorBackendDisconnected;
if ((err = jack_activate(isj->client)))
return SoundIoErrorStreaming;
for (int ch = 0; ch < instream->layout.channel_count; ch += 1) {
struct SoundIoInStreamJackPort *isjp = &isj->ports[ch];
const char *source_port_name = isjp->source_port_name;
// allow unconnected ports
if (!source_port_name)
continue;
const char *dest_port_name = jack_port_name(isjp->dest_port);
if ((err = jack_connect(isj->client, source_port_name, dest_port_name)))
return SoundIoErrorStreaming;
}
return 0;
}
static int instream_begin_read_jack(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is,
struct SoundIoChannelArea **out_areas, int *frame_count)
{
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
if (*frame_count != isj->frames_left)
return SoundIoErrorInvalid;
*out_areas = isj->areas;
return 0;
}
static int instream_end_read_jack(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
isj->frames_left = 0;
return 0;
}
static int instream_get_latency_jack(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is,
double *out_latency)
{
struct SoundIoInStreamJack *isj = &is->backend_data.jack;
*out_latency = isj->hardware_latency;
return 0;
}
static void notify_devices_change(struct SoundIoPrivate *si) {
struct SoundIo *soundio = &si->pub;
struct SoundIoJack *sij = &si->backend_data.jack;
SOUNDIO_ATOMIC_FLAG_CLEAR(sij->refresh_devices_flag);
soundio_os_mutex_lock(sij->mutex);
soundio_os_cond_signal(sij->cond, sij->mutex);
soundio->on_events_signal(soundio);
soundio_os_mutex_unlock(sij->mutex);
}
static int buffer_size_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoPrivate *si = (struct SoundIoPrivate *)arg;
struct SoundIoJack *sij = &si->backend_data.jack;
sij->period_size = nframes;
notify_devices_change(si);
return 0;
}
static int sample_rate_callback(jack_nframes_t nframes, void *arg) {
struct SoundIoPrivate *si = (struct SoundIoPrivate *)arg;
struct SoundIoJack *sij = &si->backend_data.jack;
sij->sample_rate = nframes;
notify_devices_change(si);
return 0;
}
static void port_registration_callback(jack_port_id_t port_id, int reg, void *arg) {
struct SoundIoPrivate *si = (struct SoundIoPrivate *)arg;
notify_devices_change(si);
}
static void port_rename_calllback(jack_port_id_t port_id,
const char *old_name, const char *new_name, void *arg)
{
struct SoundIoPrivate *si = (struct SoundIoPrivate *)arg;
notify_devices_change(si);
}
static void shutdown_callback(void *arg) {
struct SoundIoPrivate *si = (struct SoundIoPrivate *)arg;
struct SoundIo *soundio = &si->pub;
struct SoundIoJack *sij = &si->backend_data.jack;
soundio_os_mutex_lock(sij->mutex);
sij->is_shutdown = true;
soundio_os_cond_signal(sij->cond, sij->mutex);
soundio->on_events_signal(soundio);
soundio_os_mutex_unlock(sij->mutex);
}
static void destroy_jack(struct SoundIoPrivate *si) {
struct SoundIoJack *sij = &si->backend_data.jack;
if (sij->client)
jack_client_close(sij->client);
if (sij->cond)
soundio_os_cond_destroy(sij->cond);
if (sij->mutex)
soundio_os_mutex_destroy(sij->mutex);
}
int soundio_jack_init(struct SoundIoPrivate *si) {
struct SoundIoJack *sij = &si->backend_data.jack;
struct SoundIo *soundio = &si->pub;
if (!SOUNDIO_ATOMIC_FLAG_TEST_AND_SET(global_msg_callback_flag)) {
if (soundio->jack_error_callback)
jack_set_error_function(soundio->jack_error_callback);
if (soundio->jack_info_callback)
jack_set_info_function(soundio->jack_info_callback);
SOUNDIO_ATOMIC_FLAG_CLEAR(global_msg_callback_flag);
}
sij->mutex = soundio_os_mutex_create();
if (!sij->mutex) {
destroy_jack(si);
return SoundIoErrorNoMem;
}
sij->cond = soundio_os_cond_create();
if (!sij->cond) {
destroy_jack(si);
return SoundIoErrorNoMem;
}
// We pass JackNoStartServer due to
// https://github.com/jackaudio/jack2/issues/138
jack_status_t status;
sij->client = jack_client_open(soundio->app_name, JackNoStartServer, &status);
if (!sij->client) {
destroy_jack(si);
assert(!(status & JackInvalidOption));
if (status & JackShmFailure)
return SoundIoErrorSystemResources;
if (status & JackNoSuchClient)
return SoundIoErrorNoSuchClient;
return SoundIoErrorInitAudioBackend;
}
int err;
if ((err = jack_set_buffer_size_callback(sij->client, buffer_size_callback, si))) {
destroy_jack(si);
return SoundIoErrorInitAudioBackend;
}
if ((err = jack_set_sample_rate_callback(sij->client, sample_rate_callback, si))) {
destroy_jack(si);
return SoundIoErrorInitAudioBackend;
}
if ((err = jack_set_port_registration_callback(sij->client, port_registration_callback, si))) {
destroy_jack(si);
return SoundIoErrorInitAudioBackend;
}
if ((err = jack_set_port_rename_callback(sij->client, port_rename_calllback, si))) {
destroy_jack(si);
return SoundIoErrorInitAudioBackend;
}
jack_on_shutdown(sij->client, shutdown_callback, si);
SOUNDIO_ATOMIC_FLAG_CLEAR(sij->refresh_devices_flag);
sij->period_size = jack_get_buffer_size(sij->client);
sij->sample_rate = jack_get_sample_rate(sij->client);
if ((err = jack_activate(sij->client))) {
destroy_jack(si);
return SoundIoErrorInitAudioBackend;
}
if ((err = refresh_devices(si))) {
destroy_jack(si);
return err;
}
si->destroy = destroy_jack;
si->flush_events = flush_events_jack;
si->wait_events = wait_events_jack;
si->wakeup = wakeup_jack;
si->force_device_scan = force_device_scan_jack;
si->outstream_open = outstream_open_jack;
si->outstream_destroy = outstream_destroy_jack;
si->outstream_start = outstream_start_jack;
si->outstream_begin_write = outstream_begin_write_jack;
si->outstream_end_write = outstream_end_write_jack;
si->outstream_clear_buffer = outstream_clear_buffer_jack;
si->outstream_pause = outstream_pause_jack;
si->outstream_get_latency = outstream_get_latency_jack;
si->instream_open = instream_open_jack;
si->instream_destroy = instream_destroy_jack;
si->instream_start = instream_start_jack;
si->instream_begin_read = instream_begin_read_jack;
si->instream_end_read = instream_end_read_jack;
si->instream_pause = instream_pause_jack;
si->instream_get_latency = instream_get_latency_jack;
return 0;
}
| 15,091 |
338 |
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <exception>
#include <vector>
#include <string>
#include "itkImage.h"
#include "itkConstantPadImageFilter.h"
#include "LOCAL_antsImage.h"
namespace py = pybind11;
template < typename ImageType >
py::capsule padImage( py::capsule & antsImage,
std::vector<int> lowerPadDims,
std::vector<int> upperPadDims,
float padValue )
{
typedef typename ImageType::Pointer ImagePointerType;
ImagePointerType itkImage = as< ImageType >( antsImage );
typename ImageType::SizeType lowerExtendRegion;
lowerExtendRegion[0] = lowerPadDims[0];
lowerExtendRegion[1] = lowerPadDims[1];
if (lowerPadDims.size() == 3)
{
lowerExtendRegion[2] = lowerPadDims[2];
}
typename ImageType::SizeType upperExtendRegion;
upperExtendRegion[0] = upperPadDims[0];
upperExtendRegion[1] = upperPadDims[1];
if (upperPadDims.size() == 3)
{
upperExtendRegion[2] = upperPadDims[2];
}
//ImageType::PixelType constantPixel = padValue;
typedef itk::ConstantPadImageFilter<ImageType, ImageType> PadImageFilterType;
typename PadImageFilterType::Pointer padFilter = PadImageFilterType::New();
padFilter->SetInput( itkImage );
padFilter->SetPadLowerBound( lowerExtendRegion );
padFilter->SetPadUpperBound( upperExtendRegion );
padFilter->SetConstant( padValue );
padFilter->Update();
return wrap< ImageType >( padFilter->GetOutput() );
}
PYBIND11_MODULE(padImage, m)
{
m.def("padImageF2", &padImage<itk::Image<float, 2>>);
m.def("padImageF3", &padImage<itk::Image<float, 3>>);
m.def("padImageF4", &padImage<itk::Image<float, 4>>);
}
| 660 |
348 | {"nom":"Vieux-Lixheim","circ":"4ème circonscription","dpt":"Moselle","inscrits":169,"abs":92,"votants":77,"blancs":7,"nuls":1,"exp":69,"res":[{"nuance":"LR","nom":"<NAME>","voix":36},{"nuance":"REM","nom":"<NAME>","voix":33}]} | 94 |
540 | // File generated from our OpenAPI spec
package com.stripe.model;
public class CouponCollection extends StripeCollection<Coupon> {}
| 35 |
384 | package org.javaee8.cdi.interception.factory;
public class MyGreetingImpl implements MyGreeting {
private String greet;
public String getGreet() {
return greet;
}
public void setGreet(String greet) {
this.greet = greet;
}
}
| 108 |
327 | <reponame>jtravee/neuvector
/*
* lfstack.c
*
* Userspace RCU library - Lock-Free Stack
*
* Copyright 2010-2012 - <NAME> <<EMAIL>>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#undef _LGPL_SOURCE
#include "urcu/lfstack.h"
#define _LGPL_SOURCE
#include "urcu/static/lfstack.h"
/*
* library wrappers to be used by non-LGPL compatible source code.
*/
void cds_lfs_node_init(struct cds_lfs_node *node)
{
_cds_lfs_node_init(node);
}
void cds_lfs_init(struct cds_lfs_stack *s)
{
_cds_lfs_init(s);
}
bool cds_lfs_empty(struct cds_lfs_stack *s)
{
return _cds_lfs_empty(s);
}
bool cds_lfs_push(struct cds_lfs_stack *s, struct cds_lfs_node *node)
{
return _cds_lfs_push(s, node);
}
struct cds_lfs_node *cds_lfs_pop_blocking(struct cds_lfs_stack *s)
{
return _cds_lfs_pop_blocking(s);
}
struct cds_lfs_head *cds_lfs_pop_all_blocking(struct cds_lfs_stack *s)
{
return _cds_lfs_pop_all_blocking(s);
}
void cds_lfs_pop_lock(struct cds_lfs_stack *s)
{
_cds_lfs_pop_lock(s);
}
void cds_lfs_pop_unlock(struct cds_lfs_stack *s)
{
_cds_lfs_pop_unlock(s);
}
struct cds_lfs_node *__cds_lfs_pop(struct cds_lfs_stack *s)
{
return ___cds_lfs_pop(s);
}
struct cds_lfs_head *__cds_lfs_pop_all(struct cds_lfs_stack *s)
{
return ___cds_lfs_pop_all(s);
}
| 798 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.