max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,546 | <reponame>npv12/lawnchair
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.android.systemui.plugins.annotations;
import java.lang.annotation.Repeatable;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* Used to annotate which interfaces a given plugin depends on.
*
* At minimum all plugins should have at least one @Requires annotation
* for the plugin interface that they are implementing. They will also
* need an @Requires for each class that the plugin interface @DependsOn.
*/
@Retention(RetentionPolicy.RUNTIME)
@Repeatable(value = Requirements.class)
public @interface Requires {
Class<?> target();
int version();
}
| 336 |
575 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_SKIA_OUTPUT_DEVICE_VULKAN_SECONDARY_CB_OFFSCREEN_H_
#define COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_SKIA_OUTPUT_DEVICE_VULKAN_SECONDARY_CB_OFFSCREEN_H_
#include <vector>
#include "components/viz/service/display_embedder/skia_output_device_offscreen.h"
#include "gpu/command_buffer/service/shared_context_state.h"
namespace viz {
// Draw into an offscreen buffer which is then drawn to into the secondary
// command buffer. This is meant to for debugging direct compositing with
// secondary command buffers.
class SkiaOutputDeviceVulkanSecondaryCBOffscreen final
: public SkiaOutputDeviceOffscreen {
public:
SkiaOutputDeviceVulkanSecondaryCBOffscreen(
scoped_refptr<gpu::SharedContextState> context_state,
gpu::MemoryTracker* memory_tracker,
DidSwapBufferCompleteCallback did_swap_buffer_complete_callback);
~SkiaOutputDeviceVulkanSecondaryCBOffscreen() override;
SkSurface* BeginPaint(
std::vector<GrBackendSemaphore>* end_semaphores) override;
void SwapBuffers(BufferPresentedCallback feedback,
OutputSurfaceFrame frame) override;
};
} // namespace viz
#endif // COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_SKIA_OUTPUT_DEVICE_VULKAN_SECONDARY_CB_OFFSCREEN_H_
| 507 |
400 | <gh_stars>100-1000
package org.ofdrw.core.text.text;
import org.junit.jupiter.api.Test;
import org.ofdrw.TestTool;
import org.ofdrw.core.basicType.ST_Array;
import org.ofdrw.core.basicType.ST_Box;
import org.ofdrw.core.basicType.ST_ID;
import org.ofdrw.core.basicType.ST_RefID;
import org.ofdrw.core.text.TextCode;
import static org.junit.jupiter.api.Assertions.*;
public class CT_TextTest {
public static CT_Text textCase(){
CT_Text res = new CT_Text();
TextCode fontTxt = new TextCode()
.setX(0d)
.setY(25d)
.setDeltaX(new ST_Array(14, 14, 14))
.setContent("Font");
TextCode fontCN = new TextCode()
.setX(60d)
.setY(25d)
.setDeltaX(new ST_Array(25))
.setContent("字形");
return res.setFont(ST_RefID.getInstance("2"))
.setSize(25.4d)
.setBoundary(new ST_Box(50, 20, 112, 26))
.addTextCode(fontTxt)
.addTextCode(fontCN);
}
@Test
public void gen() throws Exception {
TestTool.genXml("CT_Text", textCase());
}
} | 607 |
321 | {
"extends": ["tslint:recommended", "tslint-react", "tslint-config-prettier"],
"rulesDirectory": ["tslint-plugin-prettier"],
"rules": {
"prettier": true,
"interface-over-type-literal": false,
"interface-name": [true, "never-prefix"],
"object-literal-sort-keys": false,
"ordered-imports": false,
"max-line-length": [true, 300],
"member-access": false,
"only-arrow-functions": false,
"max-classes-per-file": false,
"no-empty-interface": false,
"jsx-no-multiline-js": false,
"arrow-parens": [true, "ban-single-arg-parens"],
"no-var-requires": false,
"jsx-boolean-value": false,
"no-console": false,
"no-namespace": false,
"member-ordering": false,
"ban-types": false,
"no-string-literal": false,
"semicolon": [true, "always", "ignore-bound-class-methods"]
}
}
| 356 |
676 | <reponame>gitskarios/Gitskarios
package com.alorma.github.presenter;
import com.alorma.github.injector.named.ComputationScheduler;
import com.alorma.github.injector.named.MainScheduler;
import com.alorma.gitskarios.core.Pair;
import core.datasource.SdkItem;
import core.repositories.Branch;
import core.repositories.Repo;
import java.util.ArrayList;
import java.util.List;
import rx.Observable;
import rx.Scheduler;
public class RepositorySourcePresenter
extends BaseRxPresenter<Pair<Repo, Branch>, Pair<Branch, List<Branch>>, View<Pair<Branch, List<Branch>>>> {
public RepositorySourcePresenter(@MainScheduler Scheduler mainScheduler, @ComputationScheduler Scheduler ioScheduler) {
super(mainScheduler, ioScheduler, null);
}
@Override
public void execute(Pair<Repo, Branch> data) {
Observable<Pair<Branch, List<Branch>>> observable = Observable.fromCallable(() -> {
List<Branch> branches = data.first.branches;
List<Branch> newBranches = new ArrayList<>();
newBranches.add(data.second);
for (int i = 0; i < branches.size(); i++) {
if (!data.second.name.equals(branches.get(i).name)) {
newBranches.add(branches.get(i));
}
}
return newBranches;
}).map(branches -> new Pair<>(branches.get(0), branches));
subscribe(observable.map(SdkItem::new), false);
}
}
| 530 |
348 | <filename>docs/data/leg-t2/021/02105558.json
{"nom":"Sainte-Marie-la-Blanche","circ":"5ème circonscription","dpt":"Côte-d'Or","inscrits":719,"abs":399,"votants":320,"blancs":15,"nuls":2,"exp":303,"res":[{"nuance":"REM","nom":"<NAME>","voix":176},{"nuance":"LR","nom":"<NAME>","voix":127}]} | 122 |
2,338 | // RUN: %clang_builtins %s %librt -o %t && %run %t
// REQUIRES: librt_has_divmodti4
// REQUIRES: int128
//===-- divmodti4_test.c - Test __divmodti4 -------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file tests __divmodti4 for the compiler_rt library.
//
//===----------------------------------------------------------------------===//
#include "int_lib.h"
#include <stdio.h>
#ifdef CRT_HAS_128BIT
// Effects: if rem != 0, *rem = a % b
// Returns: a / b
COMPILER_RT_ABI ti_int __divmodti4(ti_int a, ti_int b, ti_int* rem);
int test__divmodti4(ti_int a, ti_int b, ti_int expected_q, ti_int expected_r) {
ti_int r;
ti_int q = __divmodti4(a, b, &r);
if (q != expected_q || r != expected_r)
{
utwords at;
at.all = a;
utwords bt;
bt.all = b;
utwords expected_qt;
expected_qt.all = expected_q;
utwords expected_rt;
expected_rt.all = expected_r;
utwords qt;
qt.all = q;
utwords rt;
rt.all = r;
printf("error in __divmodti4: 0x%.16llX%.16llX / 0x%.16llX%.16llX = "
"0x%.16llX%.16llX, R = 0x%.16llX%.16llX, expected 0x%.16llX%.16llX, "
"0x%.16llX%.16llX\n",
at.s.high, at.s.low, bt.s.high, bt.s.low, qt.s.high, qt.s.low,
rt.s.high, rt.s.low, expected_qt.s.high, expected_qt.s.low,
expected_rt.s.high, expected_rt.s.low);
}
return !(q == expected_q && r == expected_r);
}
char assumption_1[sizeof(ti_int) == 2*sizeof(di_int)] = {0};
tu_int tests[][4] =
{
{ (ti_int) 0, (ti_int) 1, (ti_int) 0, (ti_int) 0 },
{ (ti_int) 0, (ti_int)-1, (ti_int) 0, (ti_int) 0 },
{ (ti_int) 2, (ti_int) 1, (ti_int) 2, (ti_int) 0 },
{ (ti_int) 2, (ti_int)-1, (ti_int)-2, (ti_int) 0 },
{ (ti_int)-2, (ti_int) 1, (ti_int)-2, (ti_int) 0 },
{ (ti_int)-2, (ti_int)-1, (ti_int) 2, (ti_int) 0 },
{ (ti_int) 5, (ti_int) 3, (ti_int) 1, (ti_int) 2 },
{ (ti_int) 5, (ti_int)-3, (ti_int)-1, (ti_int) 2 },
{ (ti_int)-5, (ti_int) 3, (ti_int)-1, (ti_int)-2 },
{ (ti_int)-5, (ti_int)-3, (ti_int) 1, (ti_int)-2 },
{ (ti_int)0x8000000000000000LL << 64 | 0, (ti_int) 1, (ti_int)0x8000000000000000LL << 64 | 0, (ti_int)0x0LL },
{ (ti_int)0x8000000000000000LL << 64 | 0, (ti_int)-1, (ti_int)0x8000000000000000LL << 64 | 0, (ti_int)0x0LL },
{ (ti_int)0x8000000000000000LL << 64 | 0, (ti_int)-2, (ti_int)0x4000000000000000LL << 64 | 0, (ti_int)0x0LL },
{ (ti_int)0x8000000000000000LL << 64 | 0, (ti_int) 2, (ti_int)0xC000000000000000LL << 64 | 0, (ti_int)0x0LL },
{ (ti_int)0x8000000000000000LL << 64 | 0, (ti_int)-3, (ti_int)0x2AAAAAAAAAAAAAAALL << 64 | 0xAAAAAAAAAAAAAAAALL, (ti_int)-2 },
{ (ti_int)0x8000000000000000LL << 64 | 0, (ti_int) 3, (ti_int)0xD555555555555555LL << 64 | 0x5555555555555556LL, (ti_int)-2 },
};
#endif
int main()
{
#ifdef CRT_HAS_128BIT
const unsigned N = sizeof(tests) / sizeof(tests[0]);
unsigned i;
for (i = 0; i < N; ++i)
if (test__divmodti4(tests[i][0], tests[i][1], tests[i][2], tests[i][3]))
return 1;
#else
printf("skipped\n");
#endif
return 0;
}
| 2,451 |
3,337 | <reponame>timgates42/SoundJS
{
"name": "SoundJS-UnitTests",
"version": "0.0.1",
"description": "SoundJS unit testing.",
"url": "http://www.createjs.com/#!/SoundJS",
"logo": "assets/docs-icon-SoundJS.png",
"devDependencies": {
"body-parser": "^1.9.2",
"grunt": "~0.4.5",
"grunt-contrib-connect": "^0.9.0",
"grunt-contrib-jasmine": "^0.8.2"
},
"engine": "node >= 0.10.22"
}
| 192 |
1,587 | package com.example.demo.connectionchecking.junit5;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import org.junit.jupiter.api.extension.ExtendWith;
@Retention(RetentionPolicy.RUNTIME)
@ExtendWith(AssumeConnectionCondition.class)
public @interface AssumeConnection {
String uri();
}
| 106 |
9,225 | import os.path
import shutil
failure_demo = os.path.join(os.path.dirname(__file__), "failure_demo.py")
pytest_plugins = ("pytester",)
def test_failure_demo_fails_properly(pytester):
target = pytester.path.joinpath(os.path.basename(failure_demo))
shutil.copy(failure_demo, target)
result = pytester.runpytest(target, syspathinsert=True)
result.stdout.fnmatch_lines(["*44 failed*"])
assert result.ret != 0
| 176 |
1,475 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.cache.query.internal;
import org.apache.geode.InternalGemFireError;
public class Support {
public static final boolean ASSERTIONS_ENABLED = true;
private static final int OTHER = 0;
private static final int STATE = 1;
private static final int ARG = 2;
public static void assertArg(boolean b, String message) {
if (!ASSERTIONS_ENABLED) {
return;
}
Assert(b, message, ARG);
}
public static void assertState(boolean b, String message) {
if (!ASSERTIONS_ENABLED) {
return;
}
Assert(b, message, STATE);
}
public static void Assert(boolean b) {
if (!ASSERTIONS_ENABLED) {
return;
}
Assert(b, "", OTHER);
}
public static void Assert(boolean b, String message) {
if (!ASSERTIONS_ENABLED) {
return;
}
Assert(b, message, OTHER);
}
public static void assertionFailed(String message) {
assertionFailed(message, OTHER);
}
public static void assertionFailed() {
assertionFailed("", OTHER);
}
private static void Assert(boolean b, String message, int type) {
if (!b) {
assertionFailed(message, type);
}
}
private static void assertionFailed(String message, int type) {
switch (type) {
case ARG:
throw new IllegalArgumentException(message);
case STATE:
throw new IllegalStateException(message);
default:
throw new InternalGemFireError(
String.format("ERROR: Assertion failed: ' %s '", message));
}
// org.apache.persistence.jdo.GsRuntime.notifyCDebugger(null);
}
}
| 779 |
1,875 | /*
* Copyright 2018 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
package io.flutter.perf;
/**
* Base class for all view models displaying performance stats
*/
public interface PerfModel {
void markAppIdle();
void clear();
void onFrame();
boolean isAnimationActive();
}
| 106 |
1,062 | <reponame>icarazob/mr4c
/**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.mr4c.algorithm;
import com.google.mr4c.dataset.Dataset;
import com.google.mr4c.dataset.DatasetTestUtils;
import com.google.mr4c.keys.DataKeyDimension;
import com.google.mr4c.keys.DataKeyFilter;
import com.google.mr4c.keys.IdentityDataKeyFilter;
import java.net.URISyntaxException;
public abstract class AlgorithmDataTestUtils {
public static AlgorithmData buildAlgorithmData1() {
return buildAlgorithmData1Slice(IdentityDataKeyFilter.INSTANCE);
}
public static AlgorithmData buildAlgorithmData1Slice(DataKeyFilter filter) {
Dataset input1 = DatasetTestUtils.buildDataset1();
Dataset input2 = DatasetTestUtils.buildDataset2();
Dataset output = DatasetTestUtils.buildDataset2();
AlgorithmData algoData = new AlgorithmData();
algoData.addInputDataset("input1", input1.slice(filter));
algoData.addInputDataset("input2", input2.slice(filter));
algoData.addOutputDataset("output", output.slice(filter));
algoData.generateKeyspaceFromInputDatasets();
algoData.getConfig().setProperty("param1", "val1");
algoData.getConfig().setProperty("param2", "val2");
return algoData;
}
public static AlgorithmData buildAlgorithmData2() {
Dataset input = DatasetTestUtils.buildDataset2();
Dataset output1 = DatasetTestUtils.buildDataset2();
Dataset output2 = DatasetTestUtils.buildDataset1();
AlgorithmData algoData = new AlgorithmData();
algoData.addInputDataset("input", input);
algoData.addOutputDataset("output1", output1);
algoData.addOutputDataset("output2", output2);
algoData.generateKeyspaceFromInputDatasets();
algoData.getConfig().setProperty("param3", "val3");
return algoData;
}
public static AlgorithmSchema buildAlgorithmSchema() {
AlgorithmSchema schema = new AlgorithmSchema();
schema.addInputDataset("input1");
schema.addInputDataset("input2");
schema.addInputDataset("input3", true);
schema.addInputDataset("input4", false, true);
schema.addInputDataset("input5", true, true);
schema.addOutputDataset("output1");
schema.addOutputDataset("output2");
schema.addExpectedDimension(new DataKeyDimension("dim1"));
schema.addExpectedDimension(new DataKeyDimension("dim2"));
return schema;
}
}
| 990 |
670 | package com.uddernetworks.mspaint.code.languages.java.buildsystem;
public interface BuildSystem {
String getName();
BuildSystemSettings getSettings();
}
| 50 |
868 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.tests.integration.amqp;
import java.net.URI;
import java.util.LinkedList;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.activemq.artemis.protocol.amqp.broker.AMQPStandardMessage;
import org.apache.activemq.artemis.protocol.amqp.util.NettyWritable;
import org.apache.activemq.artemis.tests.util.ActiveMQTestBase;
import org.apache.activemq.transport.amqp.client.AmqpClient;
import org.apache.activemq.transport.amqp.client.AmqpConnection;
import org.apache.qpid.proton.message.impl.MessageImpl;
import org.junit.After;
/**
* Base test support class providing client support methods to aid in
* creating and configuration the AMQP test client.
*/
public class AmqpTestSupport extends ActiveMQTestBase {
protected static final int AMQP_PORT = 5672;
protected LinkedList<AmqpConnection> connections = new LinkedList<>();
protected boolean useSSL;
protected boolean useWebSockets;
protected AmqpConnection addConnection(AmqpConnection connection) {
connections.add(connection);
return connection;
}
@After
@Override
public void tearDown() throws Exception {
for (AmqpConnection conn : connections) {
try {
conn.close();
} catch (Throwable ignored) {
ignored.printStackTrace();
}
}
super.tearDown();
}
public boolean isUseSSL() {
return useSSL;
}
public boolean isUseWebSockets() {
return useWebSockets;
}
public String getAmqpConnectionURIOptions() {
return "";
}
public URI getBrokerAmqpConnectionURI() {
boolean webSocket = isUseWebSockets();
try {
int port = AMQP_PORT;
String uri = null;
if (isUseSSL()) {
if (webSocket) {
uri = "wss://127.0.0.1:" + port;
} else {
uri = "ssl://127.0.0.1:" + port;
}
} else {
if (webSocket) {
uri = "ws://127.0.0.1:" + port;
} else {
uri = "tcp://127.0.0.1:" + port;
}
}
if (!getAmqpConnectionURIOptions().isEmpty()) {
uri = uri + "?" + getAmqpConnectionURIOptions();
}
return new URI(uri);
} catch (Exception e) {
throw new RuntimeException();
}
}
public AmqpConnection createAmqpConnection() throws Exception {
return createAmqpConnection(getBrokerAmqpConnectionURI());
}
public AmqpConnection createAmqpConnection(String username, String password) throws Exception {
return createAmqpConnection(getBrokerAmqpConnectionURI(), username, password);
}
public AmqpConnection createAmqpConnection(URI brokerURI) throws Exception {
return createAmqpConnection(brokerURI, null, null);
}
public AmqpConnection createAmqpConnection(URI brokerURI, String username, String password) throws Exception {
return createAmqpClient(brokerURI, username, password).connect();
}
public AmqpClient createAmqpClient() throws Exception {
return createAmqpClient(getBrokerAmqpConnectionURI(), null, null);
}
public AmqpClient createAmqpClient(URI brokerURI) throws Exception {
return createAmqpClient(brokerURI, null, null);
}
public AmqpClient createAmqpClient(String username, String password) throws Exception {
return createAmqpClient(getBrokerAmqpConnectionURI(), username, password);
}
public AmqpClient createAmqpClient(URI brokerURI, String username, String password) throws Exception {
return new AmqpClient(brokerURI, username, password);
}
public static AMQPStandardMessage encodeAndDecodeMessage(int messageFormat, MessageImpl message, int expectedSize) {
ByteBuf nettyBuffer = Unpooled.buffer(expectedSize);
message.encode(new NettyWritable(nettyBuffer));
byte[] bytes = new byte[nettyBuffer.writerIndex()];
nettyBuffer.readBytes(bytes);
return new AMQPStandardMessage(messageFormat, bytes, null);
}
}
| 1,744 |
14,668 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ppapi/cpp/vpn_provider.h"
#include "ppapi/c/ppb_vpn_provider.h"
#include "ppapi/cpp/instance.h"
#include "ppapi/cpp/instance_handle.h"
#include "ppapi/cpp/module.h"
#include "ppapi/cpp/module_impl.h"
#include "ppapi/cpp/var_array.h"
namespace pp {
namespace {
template <>
const char* interface_name<PPB_VpnProvider_0_1>() {
return PPB_VPNPROVIDER_INTERFACE_0_1;
}
} // namespace
VpnProvider::VpnProvider(const InstanceHandle& instance)
: associated_instance_(instance) {
if (has_interface<PPB_VpnProvider_0_1>()) {
PassRefFromConstructor(get_interface<PPB_VpnProvider_0_1>()->Create(
associated_instance_.pp_instance()));
}
}
VpnProvider::~VpnProvider() {}
// static
bool VpnProvider::IsAvailable() {
return has_interface<PPB_VpnProvider_0_1>();
}
int32_t VpnProvider::Bind(const Var& configuration_id,
const Var& configuration_name,
const CompletionCallback& callback) {
if (has_interface<PPB_VpnProvider_0_1>()) {
return get_interface<PPB_VpnProvider_0_1>()->Bind(
pp_resource(), configuration_id.pp_var(), configuration_name.pp_var(),
callback.pp_completion_callback());
}
return PP_ERROR_NOINTERFACE;
}
int32_t VpnProvider::SendPacket(const Var& packet,
const CompletionCallback& callback) {
if (has_interface<PPB_VpnProvider_0_1>()) {
return get_interface<PPB_VpnProvider_0_1>()->SendPacket(
pp_resource(), packet.pp_var(), callback.pp_completion_callback());
}
return PP_ERROR_NOINTERFACE;
}
int32_t VpnProvider::ReceivePacket(
const CompletionCallbackWithOutput<Var>& callback) {
if (has_interface<PPB_VpnProvider_0_1>()) {
return get_interface<PPB_VpnProvider_0_1>()->ReceivePacket(
pp_resource(), callback.output(), callback.pp_completion_callback());
}
return PP_ERROR_NOINTERFACE;
}
} // namespace pp
| 829 |
525 | # -*- coding: utf-8 -*-
import itertools
import io
from .compat import fix_pep_479
from .errors import NgxParserSyntaxError
EXTERNAL_LEXERS = {}
@fix_pep_479
def _iterescape(iterable):
chars = iter(iterable)
for char in chars:
if char == '\\':
char = char + next(chars)
yield char
def _iterlinecount(iterable):
line = 1
chars = iter(iterable)
for char in chars:
if char.endswith('\n'):
line += 1
yield (char, line)
@fix_pep_479
def _lex_file_object(file_obj):
"""
Generates token tuples from an nginx config file object
Yields 3-tuples like (token, lineno, quoted)
"""
token = '' # the token buffer
token_line = 0 # the line the token starts on
next_token_is_directive = True
it = itertools.chain.from_iterable(file_obj)
it = _iterescape(it) # treat escaped characters differently
it = _iterlinecount(it) # count the number of newline characters
for char, line in it:
# handle whitespace
if char.isspace():
# if token complete yield it and reset token buffer
if token:
yield (token, token_line, False)
if next_token_is_directive and token in EXTERNAL_LEXERS:
for custom_lexer_token in EXTERNAL_LEXERS[token](it, token):
yield custom_lexer_token
next_token_is_directive = True
else:
next_token_is_directive = False
token = ''
# disregard until char isn't a whitespace character
while char.isspace():
char, line = next(it)
# if starting comment
if not token and char == '#':
while not char.endswith('\n'):
token = token + char
char, _ = next(it)
yield (token, line, False)
token = ''
continue
if not token:
token_line = line
# handle parameter expansion syntax (ex: "${var[@]}")
if token and token[-1] == '$' and char == '{':
next_token_is_directive = False
while token[-1] != '}' and not char.isspace():
token += char
char, line = next(it)
# if a quote is found, add the whole string to the token buffer
if char in ('"', "'"):
# if a quote is inside a token, treat it like any other char
if token:
token += char
continue
quote = char
char, line = next(it)
while char != quote:
token += quote if char == '\\' + quote else char
char, line = next(it)
yield (token, token_line, True) # True because this is in quotes
# handle quoted external directives
if next_token_is_directive and token in EXTERNAL_LEXERS:
for custom_lexer_token in EXTERNAL_LEXERS[token](it, token):
yield custom_lexer_token
next_token_is_directive = True
else:
next_token_is_directive = False
token = ''
continue
# handle special characters that are treated like full tokens
if char in ('{', '}', ';'):
# if token complete yield it and reset token buffer
if token:
yield (token, token_line, False)
token = ''
# this character is a full token so yield it now
yield (char, line, False)
next_token_is_directive = True
continue
# append char to the token buffer
token += char
def _balance_braces(tokens, filename=None):
"""Raises syntax errors if braces aren't balanced"""
depth = 0
for token, line, quoted in tokens:
if token == '}' and not quoted:
depth -= 1
elif token == '{' and not quoted:
depth += 1
# raise error if we ever have more right braces than left
if depth < 0:
reason = 'unexpected "}"'
raise NgxParserSyntaxError(reason, filename, line)
else:
yield (token, line, quoted)
# raise error if we have less right braces than left at EOF
if depth > 0:
reason = 'unexpected end of file, expecting "}"'
raise NgxParserSyntaxError(reason, filename, line)
def lex(filename):
"""Generates tokens from an nginx config file"""
with io.open(filename, mode='r', encoding='utf-8') as f:
it = _lex_file_object(f)
it = _balance_braces(it, filename)
for token, line, quoted in it:
yield (token, line, quoted)
def register_external_lexer(directives, lexer):
for directive in directives:
EXTERNAL_LEXERS[directive] = lexer
| 2,249 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.extbrowser;
import java.util.logging.Level;
import org.openide.awt.HtmlBrowser;
import org.openide.execution.NbProcessDescriptor;
import org.openide.util.NbBundle;
import org.openide.util.Utilities;
/**
* @author <NAME>
*/
public class MozillaBrowser extends ExtWebBrowser {
private static final long serialVersionUID = -3982770681461437966L;
/** Creates new ExtWebBrowser */
public MozillaBrowser() {
super(PrivateBrowserFamilyId.MOZILLA);
ddeServer = ExtWebBrowser.MOZILLA;
}
/** Determines whether the browser should be visible or not
* @return true when OS is Windows.
* false in all other cases.
*/
public static Boolean isHidden () {
String detectedPath = null;
if (Utilities.isWindows()) {
try {
detectedPath = NbDdeBrowserImpl.getBrowserPath("MOZILLA"); // NOI18N
} catch (NbBrowserException e) {
ExtWebBrowser.getEM().log(Level.FINEST, "Cannot detect Mozilla : {0}", e); // NOI18N
}
if ((detectedPath != null) && (detectedPath.trim().length() > 0)) {
return Boolean.FALSE;
}
return Boolean.TRUE;
}
return (Utilities.isUnix() && !Utilities.isMac()) ? Boolean.FALSE : Boolean.TRUE;
}
/** Getter for browser name
* @return name of browser
*/
@Override
public String getName () {
if (name == null) {
this.name = NbBundle.getMessage(MozillaBrowser.class, "CTL_MozillaBrowserName");
}
return name;
}
/**
* Returns a new instance of BrowserImpl implementation.
* @throws UnsupportedOperationException when method is called and OS is not Windows.
* @return browserImpl implementation of browser.
*/
@Override
public HtmlBrowser.Impl createHtmlBrowserImpl() {
ExtBrowserImpl impl = null;
if (Utilities.isWindows()) {
impl = new NbDdeBrowserImpl(this);
} else if (Utilities.isUnix() && !Utilities.isMac()) {
impl = new UnixBrowserImpl(this);
} else {
throw new UnsupportedOperationException (NbBundle.getMessage (MozillaBrowser.class, "MSG_CannotUseBrowser"));
}
return impl;
}
/** Default command for browser execution.
* Can be overriden to return browser that suits to platform and settings.
*
* @return process descriptor that allows to start browser.
*/
@Override
protected NbProcessDescriptor defaultBrowserExecutable () {
String prg;
String params = ""; // NOI18N
NbProcessDescriptor retValue;
//Windows
if (Utilities.isWindows()) {
params += "{" + ExtWebBrowser.UnixBrowserFormat.TAG_URL + "}";
try {
prg = NbDdeBrowserImpl.getBrowserPath(getDDEServer());
return new NbProcessDescriptor (prg, params);
} catch (NbBrowserException e) {
prg = "C:\\Program Files\\Mozilla.org\\Mozilla\\mozilla.exe"; // NOI18N
} catch (UnsatisfiedLinkError e) {
prg = "iexplore"; // NOI18N
}
retValue = new NbProcessDescriptor (prg, params);
return retValue;
//Unix
} else {
prg = "mozilla"; // NOI18N
if (Utilities.getOperatingSystem() == Utilities.OS_LINUX) {
java.io.File f = new java.io.File ("/usr/bin/mozilla"); // NOI18N
if (f.exists()) {
prg = f.getAbsolutePath();
}
f = new java.io.File ("/usr/local/mozilla/mozilla"); // NOI18N
if (f.exists()) {
prg = f.getAbsolutePath();
}
} else if (Utilities.getOperatingSystem() == Utilities.OS_SOLARIS) {
java.io.File f = new java.io.File ("/usr/sfw/lib/mozilla/mozilla"); // NOI18N
if (f.exists()) {
prg = f.getAbsolutePath();
} else {
f = new java.io.File ("/opt/csw/bin/mozilla"); // NOI18N
if (f.exists()) {
prg = f.getAbsolutePath();
}
}
}
retValue = new NbProcessDescriptor(
prg,
"-remote \"openURL({" + ExtWebBrowser.UnixBrowserFormat.TAG_URL + "})\"", // NOI18N
NbBundle.getMessage(MozillaBrowser.class, "MSG_BrowserExecutorHint")
);
}
return retValue;
}
}
| 2,631 |
1,336 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.swagger.generator.springmvc;
import org.apache.servicecomb.swagger.generator.core.unittest.UnitTestSwaggerUtils;
import org.apache.servicecomb.swagger.generator.springmvc.model.DefaultParameterSchema;
import org.apache.servicecomb.swagger.generator.springmvc.model.SwaggerTestTarget;
import org.apache.servicecomb.swagger.generator.springmvc.model.SwaggerTestTarget_ValueOverWritePath;
import org.apache.servicecomb.swagger.generator.springmvc.model.TestProducer;
import org.junit.Test;
public class TestSpringmvc {
@Test
public void testMultiDefaultPath() {
UnitTestSwaggerUtils.testException(
"Only allowed one default path. method=org.apache.servicecomb.swagger.generator.springmvc.MultiDefaultPath:p2.",
MultiDefaultPath.class);
}
@Test
public void testResponseEntity() {
UnitTestSwaggerUtils.testSwagger("schemas/responseEntity.yaml", MethodResponseEntity.class);
}
@Test
public void testEmptyPath() {
UnitTestSwaggerUtils.testSwagger("schemas/emptyPath.yaml", Echo.class, "emptyPath");
UnitTestSwaggerUtils.testSwagger("schemas/MethodEmptyPath.yaml", MethodEmptyPath.class);
}
@Test
public void testMixupAnnotations() {
UnitTestSwaggerUtils.testSwagger("schemas/mixupAnnotations.yaml", MethodMixupAnnotations.class);
}
@Test
public void testDefaultParameter() {
UnitTestSwaggerUtils.testSwagger("schemas/defaultParameter.yaml", MethodDefaultParameter.class);
}
@Test
public void testInheritHttpMethod() {
UnitTestSwaggerUtils.testSwagger("schemas/inheritHttpMethod.yaml", Echo.class, "inheritHttpMethod");
}
@Test
public void testRawJsonStringMethod() {
UnitTestSwaggerUtils.testSwagger("schemas/rawJsonStringMethod.yaml", Echo.class, "rawJsonStringMethod");
}
@Test
public void testClassMethodNoPath() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.ClassMethodNoPath:noPath.",
"Path must not both be empty in class and method",
ClassMethodNoPath.class,
"noPath");
}
@Test
public void testClassMethodNoHttpMetod() {
UnitTestSwaggerUtils.testException(
"HttpMethod must not both be empty in class and method, method=org.apache.servicecomb.swagger.generator.springmvc.ClassMethodNoHttpMethod:noHttpMethod.",
ClassMethodNoHttpMethod.class);
}
@Test
public void testMethodMultiHttpMethod() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.Echo:multiHttpMethod.",
"not allowed multi http method.",
Echo.class,
"multiHttpMethod");
}
@Test
public void testClassMultiHttpMethod() {
UnitTestSwaggerUtils.testException(
"not support multi http method, class=org.apache.servicecomb.swagger.generator.springmvc.ClassMultiHttpMethod.",
ClassMultiHttpMethod.class);
}
@Test
public void testMethodMultiPathUsingRequestMapping() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.MethodMultiPath:usingRequestMapping.",
"not allowed multi path.",
MethodMultiPath.class,
"usingRequestMapping");
}
@Test
public void testMethodMultiPathUsingGetMapping() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.MethodMultiPath:usingGetMapping.",
"not allowed multi path.",
MethodMultiPath.class,
"usingGetMapping");
}
@Test
public void testMethodMultiPathUsingPutMapping() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.MethodMultiPath:usingPutMapping.",
"not allowed multi path.",
MethodMultiPath.class,
"usingPutMapping");
}
@Test
public void testMethodMultiPathUsingPostMapping() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.MethodMultiPath:usingPostMapping.",
"not allowed multi path.",
MethodMultiPath.class,
"usingPostMapping");
}
@Test
public void testMethodMultiPathUsingPatchMapping() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.MethodMultiPath:usingPatchMapping.",
"not allowed multi path.",
MethodMultiPath.class,
"usingPatchMapping");
}
@Test
public void testMethodMultiPathUsingDeleteMapping() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.MethodMultiPath:usingDeleteMapping.",
"not allowed multi path.",
MethodMultiPath.class,
"usingDeleteMapping");
}
@Test
public void testClassMultiPath() {
UnitTestSwaggerUtils.testException(
"not support multi path, class=org.apache.servicecomb.swagger.generator.springmvc.ClassMultiPath.",
ClassMultiPath.class);
}
@Test
public void testEnumBody() {
UnitTestSwaggerUtils.testSwagger("schemas/enumBody.yaml", Echo.class, "enumBody");
}
@Test
public void testAsyncResponseEntity() {
UnitTestSwaggerUtils.testSwagger("schemas/asyncResponseEntity.yaml", Echo.class, "asyncResponseEntity");
}
@Test
public void testSimpleParam() {
UnitTestSwaggerUtils.testSwagger("schemas/testSimpleParam.yaml", DefaultParameterSchema.class, "testSimpleParam");
}
@Test
public void testObjectParam() {
UnitTestSwaggerUtils.testSwagger("schemas/testObjectParam.yaml", DefaultParameterSchema.class, "testObjectParam");
}
@Test
public void testMultiObjParamsWithSameFiledName() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.model.DefaultParameterSchema:testMultiObjParamsWithSameFiledName.",
"not support duplicated parameter, name=name.",
DefaultParameterSchema.class,
"testMultiObjParamsWithSameFiledName");
}
@Test
public void testUnsupportedParamType() {
UnitTestSwaggerUtils.testException(
"generate swagger operation failed, method=org.apache.servicecomb.swagger.generator.springmvc.model.DefaultParameterSchema:testUnsupportedParamType.",
"not allow complex type for query parameter, type=java.util.List<org.apache.servicecomb.swagger.generator.springmvc.model.TestParam>.",
DefaultParameterSchema.class,
"testUnsupportedParamType");
}
@Test
public void testSingleMediaType() {
UnitTestSwaggerUtils.testSwagger("schemas/testSingleMediaType.yaml", TestProducer.class, "testSingleMediaType");
}
@Test
public void testMultipleMediaType() {
UnitTestSwaggerUtils.testSwagger("schemas/testMultipleMediaType.yaml", TestProducer.class, "testMultipleMediaType");
}
@Test
public void testBlankMediaType() {
UnitTestSwaggerUtils.testSwagger("schemas/testBlankMediaType.yaml", TestProducer.class, "testBlankMediaType");
}
@Test
public void cookie() {
UnitTestSwaggerUtils.testSwagger("schemas/cookie.yaml", Echo.class, "cookie");
}
@Test
public void part() {
UnitTestSwaggerUtils.testSwagger("schemas/part.yaml", Echo.class, "part");
}
@Test
public void partArray() {
UnitTestSwaggerUtils.testSwagger("schemas/partArray.yaml", Echo.class, "partArray");
}
@Test
public void partList() {
UnitTestSwaggerUtils.testSwagger("schemas/partList.yaml", Echo.class, "partList");
}
@Test
public void partAnnotation() {
UnitTestSwaggerUtils.testSwagger("schemas/partAnnotation.yaml", Echo.class, "partAnnotation");
}
@Test
public void partArrayAnnotation() {
UnitTestSwaggerUtils.testSwagger("schemas/partArrayAnnotation.yaml", Echo.class, "partArrayAnnotation");
}
@Test
public void partListAnnotation() {
UnitTestSwaggerUtils.testSwagger("schemas/partListAnnotation.yaml", Echo.class, "partListAnnotation");
}
@Test
public void swaggerTestTarget() {
UnitTestSwaggerUtils.testSwagger("schemas/swaggerTestTarget.yaml", SwaggerTestTarget.class);
}
@Test
public void swaggerTestTarget_ValueOverWritePath() {
UnitTestSwaggerUtils
.testSwagger("schemas/swaggerTestTarget_ValueOverWritePath.yaml", SwaggerTestTarget_ValueOverWritePath.class);
}
@Test
public void testResponseEntityOptional() {
UnitTestSwaggerUtils
.testSwagger("schemas/testResponseEntityOptional.yaml", Echo.class, "testResponseEntityOptional");
}
@Test
public void testCompletableFutureResponseEntityOptional() {
UnitTestSwaggerUtils
.testSwagger("schemas/testCompletableFutureResponseEntityOptional.yaml", Echo.class,
"testCompletableFutureResponseEntityOptional");
}
@Test
public void nestedListString() {
UnitTestSwaggerUtils.testSwagger("schemas/nestedListString.yaml", Echo.class, "nestedListString");
}
}
| 3,385 |
486 | <filename>apps/infer.py
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <EMAIL>
import logging
from lib.common.render import query_color, image2vid
from lib.common.config import cfg
from lib.common.cloth_extraction import extract_cloth
from lib.dataset.mesh_util import (
load_checkpoint,
update_mesh_shape_prior_losses,
get_optim_grid_image,
blend_rgb_norm,
unwrap,
)
from lib.dataset.TestDataset import TestDataset
from apps.ICON import ICON
import os
from termcolor import colored
import argparse
import numpy as np
from PIL import Image
import torch
import trimesh
import pickle
import numpy as np
torch.backends.cudnn.benchmark = True
logging.getLogger("trimesh").setLevel(logging.ERROR)
def tensor2variable(tensor, device):
# [1,23,3,3]
return torch.tensor(tensor, device=device, requires_grad=True)
if __name__ == "__main__":
# loading cfg file
parser = argparse.ArgumentParser()
parser.add_argument("-gpu", "--gpu_device", type=int, default=0)
parser.add_argument("-colab", action="store_true")
parser.add_argument("-loop_smpl", "--loop_smpl", type=int, default=100)
parser.add_argument("-patience", "--patience", type=int, default=5)
parser.add_argument("-vis_freq", "--vis_freq", type=int, default=10)
parser.add_argument("-loop_cloth", "--loop_cloth", type=int, default=100)
parser.add_argument("-hps_type", "--hps_type", type=str, default="pymaf")
parser.add_argument("-export_video", action="store_true")
parser.add_argument("-in_dir", "--in_dir", type=str, default="./examples")
parser.add_argument("-out_dir", "--out_dir",
type=str, default="./results")
parser.add_argument('-seg_dir', '--seg_dir', type=str, default=None)
parser.add_argument(
"-cfg", "--config", type=str, default="./configs/icon-filter.yaml"
)
args = parser.parse_args()
# cfg read and merge
cfg.merge_from_file(args.config)
cfg.merge_from_file("./lib/pymaf/configs/pymaf_config.yaml")
cfg_show_list = [
"test_gpus",
[args.gpu_device],
"mcube_res",
256,
"clean_mesh",
True,
]
cfg.merge_from_list(cfg_show_list)
cfg.freeze()
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
device = torch.device(f"cuda:{args.gpu_device}")
if args.colab:
print(colored("colab environment...", "red"))
from tqdm.notebook import tqdm
else:
print(colored("normal environment...", "red"))
from tqdm import tqdm
# load model and dataloader
model = ICON(cfg)
model = load_checkpoint(model, cfg)
dataset_param = {
'image_dir': args.in_dir,
'seg_dir': args.seg_dir,
'has_det': True, # w/ or w/o detection
'hps_type': args.hps_type # pymaf/pare/pixie
}
if args.hps_type == "pixie" and "pamir" in args.config:
print(colored("PIXIE isn't compatible with PaMIR, thus switch to PyMAF", "red"))
dataset_param["hps_type"] = "pymaf"
dataset = TestDataset(dataset_param, device)
print(colored(f"Dataset Size: {len(dataset)}", "green"))
pbar = tqdm(dataset)
for data in pbar:
pbar.set_description(f"{data['name']}")
in_tensor = {"smpl_faces": data["smpl_faces"], "image": data["image"]}
# The optimizer and variables
optimed_pose = torch.tensor(
data["body_pose"], device=device, requires_grad=True
) # [1,23,3,3]
optimed_trans = torch.tensor(
data["trans"], device=device, requires_grad=True
) # [3]
optimed_betas = torch.tensor(
data["betas"], device=device, requires_grad=True
) # [1,10]
optimed_orient = torch.tensor(
data["global_orient"], device=device, requires_grad=True
) # [1,1,3,3]
optimizer_smpl = torch.optim.SGD(
[optimed_pose, optimed_trans, optimed_betas, optimed_orient],
lr=1e-3,
momentum=0.9,
)
scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer_smpl,
mode="min",
factor=0.5,
verbose=0,
min_lr=1e-5,
patience=args.patience,
)
losses = {
"cloth": {"weight": 5.0, "value": 0.0},
"edge": {"weight": 100.0, "value": 0.0},
"normal": {"weight": 0.2, "value": 0.0},
"laplacian": {"weight": 100.0, "value": 0.0},
"smpl": {"weight": 1.0, "value": 0.0},
"deform": {"weight": 20.0, "value": 0.0},
"silhouette": {"weight": 1.0, "value": 0.0},
}
# smpl optimization
loop_smpl = tqdm(
range(args.loop_smpl if cfg.net.prior_type != "pifu" else 1))
per_data_lst = []
for i in loop_smpl:
per_loop_lst = []
optimizer_smpl.zero_grad()
if dataset_param["hps_type"] != "pixie":
smpl_out = dataset.smpl_model(
betas=optimed_betas,
body_pose=optimed_pose,
global_orient=optimed_orient,
pose2rot=False,
)
smpl_verts = ((smpl_out.vertices) +
optimed_trans) * data["scale"]
else:
smpl_verts, _, _ = dataset.smpl_model(
shape_params=optimed_betas,
expression_params=tensor2variable(data["exp"], device),
body_pose=optimed_pose,
global_pose=optimed_orient,
jaw_pose=tensor2variable(data["jaw_pose"], device),
left_hand_pose=tensor2variable(
data["left_hand_pose"], device),
right_hand_pose=tensor2variable(
data["right_hand_pose"], device),
)
smpl_verts = (smpl_verts + optimed_trans) * data["scale"]
smpl_verts *= torch.tensor([1.0, -1.0, -1.0]).to(device)
# render optimized mesh (normal, T_normal, image [-1,1])
in_tensor["T_normal_F"], in_tensor["T_normal_B"] = dataset.render_normal(
smpl_verts, in_tensor["smpl_faces"]
)
T_mask_F, T_mask_B = dataset.render.get_silhouette_image()
with torch.no_grad():
in_tensor["normal_F"], in_tensor["normal_B"] = model.netG.normal_filter(
in_tensor
)
diff_F_smpl = torch.abs(
in_tensor["T_normal_F"] - in_tensor["normal_F"])
diff_B_smpl = torch.abs(
in_tensor["T_normal_B"] - in_tensor["normal_B"])
losses["smpl"]["value"] = (diff_F_smpl + diff_B_smpl).mean()
# silhouette loss
smpl_arr = torch.cat([T_mask_F, T_mask_B], dim=-1)[0]
gt_arr = torch.cat(
[in_tensor["normal_F"][0], in_tensor["normal_B"][0]], dim=2
).permute(1, 2, 0)
gt_arr = ((gt_arr + 1.0) * 0.5).to(device)
bg_color = (
torch.Tensor([0.5, 0.5, 0.5]).unsqueeze(
0).unsqueeze(0).to(device)
)
gt_arr = ((gt_arr - bg_color).sum(dim=-1) != 0.0).float()
diff_S = torch.abs(smpl_arr - gt_arr)
losses["silhouette"]["value"] = diff_S.mean()
# Weighted sum of the losses
smpl_loss = 0.0
for k in ["smpl", "silhouette"]:
smpl_loss += losses[k]["value"] * losses[k]["weight"]
loop_smpl.set_description(f"Body Fitting = {smpl_loss:.3f}")
if i % args.vis_freq == 0:
per_loop_lst.extend(
[
in_tensor["image"],
in_tensor["T_normal_F"],
in_tensor["normal_F"],
diff_F_smpl / 2.0,
diff_S[:, :512].unsqueeze(
0).unsqueeze(0).repeat(1, 3, 1, 1),
]
)
per_loop_lst.extend(
[
in_tensor["image"],
in_tensor["T_normal_B"],
in_tensor["normal_B"],
diff_B_smpl / 2.0,
diff_S[:, 512:].unsqueeze(
0).unsqueeze(0).repeat(1, 3, 1, 1),
]
)
per_data_lst.append(
get_optim_grid_image(
per_loop_lst, None, nrow=5, type="smpl")
)
smpl_loss.backward(retain_graph=True)
optimizer_smpl.step()
scheduler_smpl.step(smpl_loss)
in_tensor["smpl_verts"] = smpl_verts
# visualize the optimization process
# 1. SMPL Fitting
# 2. Clothes Refinement
os.makedirs(os.path.join(args.out_dir, cfg.name,
"refinement"), exist_ok=True)
# visualize the final results in self-rotation mode
os.makedirs(os.path.join(args.out_dir, cfg.name, "vid"), exist_ok=True)
# final results rendered as image
# 1. Render the final fitted SMPL (xxx_smpl.png)
# 2. Render the final reconstructed clothed human (xxx_cloth.png)
# 3. Blend the original image with predicted cloth normal (xxx_overlap.png)
os.makedirs(os.path.join(args.out_dir, cfg.name, "png"), exist_ok=True)
# final reconstruction meshes
# 1. SMPL mesh (xxx_smpl.obj)
# 2. clohted mesh (xxx_recon.obj)
# 3. refined clothed mesh (xxx_refine.obj)
os.makedirs(os.path.join(args.out_dir, cfg.name, "obj"), exist_ok=True)
if cfg.net.prior_type != "pifu":
per_data_lst[0].save(
os.path.join(
args.out_dir, cfg.name, f"refinement/{data['name']}_smpl.gif"
),
save_all=True,
append_images=per_data_lst[1:],
duration=500,
loop=0,
)
if args.vis_freq == 1:
image2vid(
per_data_lst,
os.path.join(
args.out_dir, cfg.name, f"refinement/{data['name']}_smpl.avi"
),
)
per_data_lst[-1].save(
os.path.join(args.out_dir, cfg.name,
f"png/{data['name']}_smpl.png")
)
norm_pred = (
((in_tensor["normal_F"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
.detach()
.cpu()
.numpy()
.astype(np.uint8)
)
norm_orig = unwrap(norm_pred, data)
mask_orig = unwrap(
np.repeat(
data["mask"].permute(1, 2, 0).detach().cpu().numpy(), 3, axis=2
).astype(np.uint8),
data,
)
rgb_norm = blend_rgb_norm(data["ori_image"], norm_orig, mask_orig)
Image.fromarray(
np.concatenate(
[data["ori_image"].astype(np.uint8), rgb_norm], axis=1)
).save(os.path.join(args.out_dir, cfg.name, f"png/{data['name']}_overlap.png"))
# ------------------------------------------------------------------------------------------------------------------
# cloth optimization
loop_cloth = tqdm(range(args.loop_cloth))
per_data_lst = []
# cloth recon
in_tensor.update(
dataset.compute_vis_cmap(
in_tensor["smpl_verts"][0], in_tensor["smpl_faces"][0]
)
)
if cfg.net.prior_type == "pamir":
in_tensor.update(
dataset.compute_voxel_verts(
optimed_pose,
optimed_orient,
optimed_betas,
optimed_trans,
data["scale"],
)
)
with torch.no_grad():
verts_pr, faces_pr, _ = model.test_single(in_tensor)
recon_obj = trimesh.Trimesh(
verts_pr, faces_pr, process=False, maintains_order=True
)
recon_obj.export(
os.path.join(args.out_dir, cfg.name,
f"obj/{data['name']}_recon.obj")
)
# # remeshing for better surface topology (minor improvement, yet time-consuming)
# if cfg.net.prior_type == 'icon':
# import pymeshlab
# ms = pymeshlab.MeshSet()
# ms.load_new_mesh(
# os.path.join(args.out_dir, cfg.name,
# f"obj/{data['name']}_recon.obj"))
# ms.laplacian_smooth()
# ms.remeshing_isotropic_explicit_remeshing(
# targetlen=pymeshlab.Percentage(0.5))
# ms.save_current_mesh(
# os.path.join(args.out_dir, cfg.name,
# f"obj/{data['name']}_recon.obj"))
# polished_mesh = trimesh.load_mesh(
# os.path.join(args.out_dir, cfg.name,
# f"obj/{data['name']}_recon.obj"))
# verts_pr = torch.tensor(polished_mesh.vertices).float()
# faces_pr = torch.tensor(polished_mesh.faces).long()
deform_verts = torch.full(
verts_pr.shape, 0.0, device=device, requires_grad=True
)
optimizer_cloth = torch.optim.SGD(
[deform_verts], lr=1e-1, momentum=0.9)
scheduler_cloth = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer_cloth,
mode="min",
factor=0.1,
verbose=0,
min_lr=1e-4,
patience=args.patience,
)
if args.loop_cloth == 0:
per_loop_lst = []
in_tensor["P_normal_F"], in_tensor["P_normal_B"] = dataset.render_normal(
verts_pr.unsqueeze(0).to(device),
faces_pr.unsqueeze(0).to(device).long(),
deform_verts,
)
recon_render_lst = dataset.render.get_clean_image(cam_ids=[
0, 1, 2, 3])
per_loop_lst.extend(recon_render_lst)
per_data_lst.append(get_optim_grid_image(
per_loop_lst, None, type="cloth"))
for i in loop_cloth:
per_loop_lst = []
optimizer_cloth.zero_grad()
in_tensor["P_normal_F"], in_tensor["P_normal_B"] = dataset.render_normal(
verts_pr.unsqueeze(0).to(device),
faces_pr.unsqueeze(0).to(device).long(),
deform_verts,
)
recon_render_lst = dataset.render.get_clean_image(cam_ids=[
0, 1, 2, 3])
update_mesh_shape_prior_losses(dataset.render.mesh, losses)
diff_F_cloth = torch.abs(
in_tensor["P_normal_F"] - in_tensor["normal_F"])
diff_B_cloth = torch.abs(
in_tensor["P_normal_B"] - in_tensor["normal_B"])
losses["cloth"]["value"] = (diff_F_cloth + diff_B_cloth).mean()
losses["deform"]["value"] = torch.topk(
torch.abs(deform_verts.flatten()), 100
)[0].mean()
# Weighted sum of the losses
cloth_loss = torch.tensor(0.0, device=device)
pbar_desc = ""
for k in losses.keys():
if k not in ["smpl", "silhouette"]:
cloth_loss += losses[k]["value"] * losses[k]["weight"]
pbar_desc = f"Cloth Refinement: {cloth_loss:.3f}"
loop_cloth.set_description(pbar_desc)
if i % args.vis_freq == 0:
per_loop_lst.extend(
[
in_tensor["image"],
in_tensor["P_normal_F"],
in_tensor["normal_F"],
diff_F_cloth / 2.0,
]
)
per_loop_lst.extend(
[
in_tensor["image"],
in_tensor["P_normal_B"],
in_tensor["normal_B"],
diff_B_cloth / 2.0,
]
)
per_loop_lst.extend(recon_render_lst)
per_data_lst.append(
get_optim_grid_image(per_loop_lst, None, type="cloth")
)
cloth_loss.backward(retain_graph=True)
optimizer_cloth.step()
scheduler_cloth.step(cloth_loss)
if args.loop_cloth > 0:
# gif for optimization
per_data_lst[0].save(
os.path.join(
args.out_dir, cfg.name, f"refinement/{data['name']}_cloth.gif"
),
save_all=True,
append_images=per_data_lst[1:],
duration=500,
loop=0,
)
if args.vis_freq == 1:
image2vid(
per_data_lst,
os.path.join(
args.out_dir, cfg.name, f"refinement/{data['name']}_cloth.avi"
),
)
per_data_lst[-1].save(
os.path.join(args.out_dir, cfg.name,
f"png/{data['name']}_cloth.png")
)
if args.export_video:
# self-rotated video
dataset.render.get_rendered_video(
[data["ori_image"], rgb_norm],
os.path.join(args.out_dir, cfg.name,
f"vid/{data['name']}_cloth.mp4"),
)
deform_verts = deform_verts.flatten().detach()
deform_verts[torch.topk(torch.abs(deform_verts), 30)[
1]] = deform_verts.mean()
deform_verts = deform_verts.view(-1, 3).cpu()
final = trimesh.Trimesh(
verts_pr + deform_verts, faces_pr, process=False, maintains_order=True
)
final_colors = query_color(
verts_pr + deform_verts.detach().cpu(),
faces_pr,
in_tensor["image"],
device=device,
)
final.visual.vertex_colors = final_colors
final.export(
f"{args.out_dir}/{cfg.name}/obj/{data['name']}_refine.obj")
smpl_obj = trimesh.Trimesh(
in_tensor["smpl_verts"].detach().cpu()[0] *
torch.tensor([1.0, -1.0, 1.0]),
in_tensor['smpl_faces'].detach().cpu()[0],
process=False,
maintains_order=True
)
smpl_obj.export(
f"{args.out_dir}/{cfg.name}/obj/{data['name']}_smpl.obj")
if not (args.seg_dir is None):
os.makedirs(os.path.join(
args.out_dir, cfg.name, "clothes"), exist_ok=True)
os.makedirs(os.path.join(args.out_dir, cfg.name,
"clothes", "info"), exist_ok=True)
for seg in data['segmentations']:
# These matrices work for PyMaf, not sure about the other hps type
K = np.array([[1.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 1.0000, 0.0000, 0.0000],
[0.0000, 0.0000, -0.5000, 0.0000],
[-0.0000, -0.0000, 0.5000, 1.0000]]).T
R = np.array([[-1., 0., 0.],
[0., 1., 0.],
[0., 0., -1.]])
t = np.array([[-0., -0., 100.]])
clothing_obj = extract_cloth(recon_obj, seg, K, R, t, smpl_obj)
if clothing_obj is not None:
cloth_type = seg['type'].replace(' ', '_')
cloth_info = {
'betas': optimed_betas,
'body_pose': optimed_pose,
'global_orient': optimed_orient,
'pose2rot': False,
'clothing_type': cloth_type,
}
file_id = f"{data['name']}_{cloth_type}"
with open(os.path.join(args.out_dir, cfg.name, "clothes", "info", f"{file_id}_info.pkl"), 'wb') as fp:
pickle.dump(cloth_info, fp)
clothing_obj.export(os.path.join(
args.out_dir, cfg.name, "clothes", f"{file_id}.obj"))
else:
print(
f"Unable to extract clothing of type {seg['type']} from image {data['name']}")
| 11,948 |
1,104 | <reponame>yblucky/mdrill<gh_stars>1000+
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* HeadlessEventCollector.java
* Copyright (C) 2011 University of Waikato, Hamilton, New Zealand
*
*/
package weka.gui.beans;
import java.util.EventObject;
import java.util.List;
/**
* Interface for Knowledge Flow components that (typically) provide
* an interactive graphical visualization to implement. This allows
* events that would normally be processed to provide a graphical display
* to be collected and retrieved when running in headless mode (perhaps on
* a server for example). A copy of the component that is running with
* access to a display can be passed the list of events in order to
* construct its display-dependent output.
*
* @author <NAME> (mhall{[at]}pentaho{[dot]}com).
* @version $Revision: 7567 $
*/
public interface HeadlessEventCollector {
/**
* Get the list of events processed in headless mode. May return
* null or an empty list if not running in headless mode or no
* events were processed
*
* @return a list of EventObjects or null.
*/
List<EventObject> retrieveHeadlessEvents();
/**
* Process a list of events that have been collected earlier. Has
* no affect if the component is running in headless mode.
*
* @param headless a list of EventObjects to process.
*/
void processHeadlessEvents(List<EventObject> headless);
}
| 600 |
1,539 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
""" Global monitor for the fuzzing run """
import time
from engine.core.status_codes_monitor import StatusCodesMonitor
from engine.core.renderings_monitor import RenderingsMonitor
def Monitor():
""" Accessor for the FuzzingMonitor singleton """
return FuzzingMonitor.Instance()
class FuzzingMonitor(object):
__instance = None
@staticmethod
def Instance():
""" Singleton's instance accessor
@return FuzzingMonitor instance
@rtype FuzzingMonitor
"""
if FuzzingMonitor.__instance == None:
raise Exception("FuzzingMonitor not yet initialized.")
return FuzzingMonitor.__instance
def __init__(self):
if FuzzingMonitor.__instance:
raise Exception("Attempting to create a new singleton instance.")
# timestamp of the beginning of fuzzing session
self._start_time = int(time.time()*10**6)
# time budget to stop fuzzing jobs (time in hours)
self._time_budget = 24*30 # (~ 1 month)
# Create the status codes monitor
self.status_codes_monitor = StatusCodesMonitor(self._start_time)
# Create the renderings monitor
self.renderings_monitor = RenderingsMonitor()
FuzzingMonitor.__instance = self
def set_time_budget(self, time_in_hours):
""" Set the initial time budget.
@param time_in_hours: Time budget in hours.
@type time_in_hours: Int
@return: None
@rtype : None
"""
self._time_budget = 10**6*3600*float(time_in_hours)
def reset_start_time(self):
""" Resets start time to now (time of routine's invocation in
microseconds).
@return: None
@rtype : None
"""
self._start_time = int(time.time()*10**6)
self.status_codes_monitor._start_time = self._start_time
@property
def running_time(self):
""" Returns the running time.
@return: Running time in microseconds.
@rtype : int
"""
_running_time = int(time.time()*10**6) - self._start_time
return _running_time
@property
def remaining_time_budget(self):
""" Returns the time remaining from the initial budget.
@return: Remaining time in microseconds.
@rtype : int
"""
running_time = int(time.time()*10**6) - self._start_time
return self._time_budget - running_time
@property
def start_time(self):
""" Returns start time of fuzzing.
@return: The start time in seconds.
@rtype : int
"""
return self._start_time
def terminate_fuzzing(self):
""" Terminates the fuzzing thread by setting the time budget to zero
@return: None
@rtype : None
"""
self._time_budget = 0.0
## Start of RenderingsMonitor functions
def update_renderings_monitor(self, request, is_valid):
""" Calls the renderings monitor's update function
@param request: The request whose current rendering we are registering.
@type request: Request class object.
@param is_valid: Flag indicating whether the current rendering leads to
a valid status code or not.
@type is_valid: Bool
@return: None
@rtype : None
"""
self.renderings_monitor.update(request, is_valid)
def reset_renderings_monitor(self):
""" Calls internal renderings monitor's reset function
@return: None
@rtype : None
"""
self.renderings_monitor.reset()
def is_invalid_rendering(self, request):
""" Calls internal renderings monitor's is_invalid_rendering function
@param request: The request whose current rendering we are registering.
@type request: Request class object.
@return: True, if rendering is known invalid.
@rtype : Bool
"""
return self.renderings_monitor.is_invalid_rendering(request)
def is_fully_rendered_request(self, request, lock=None):
""" Calls internal renderings monitor's is_fully_rendered_request function
@param request: The request in question.
@type request: Request class object.
@param lock: Lock object used for sync of more than one fuzzing jobs.
@type lock: thread.Lock object
@return: True if the request in question has been rendered in the past.
@rtype : Bool
"""
return self.renderings_monitor.is_fully_rendered_request(request, lock)
def num_fully_rendered_requests(self, request_list, lock=None):
""" Calls internal renderings monitor's num_fully_rendered_requests function
@param request_list: The complete list of requests to check for full renderings
@type request_list: List[Request]
@return: The number of requests that have been rendered at least once.
@rtype : Int
"""
return self.renderings_monitor.num_fully_rendered_requests(request_list, lock)
def set_memoize_invalid_past_renderings_on(self):
""" Calls internal renderings monitor's set_memoize_invalid_past_renderings_on functino
@return: None
@rtype : None
"""
self.renderings_monitor.set_memoize_invalid_past_renderings_on()
@property
def current_fuzzing_generation(self):
""" Returns the current fuzzing generation
@return: The current fuzzing generation
@rtype : Int
"""
return self.renderings_monitor.current_fuzzing_generation
@current_fuzzing_generation.setter
def current_fuzzing_generation(self, generation):
""" Setter for the current fuzzing generation
@param generation: The new generation to set
@type generation: Int
@return: None
@rtype : None
"""
self.renderings_monitor.current_fuzzing_generation = generation
# Start of StatusCodeMonitor functions
def increment_requests_count(self, type):
""" Calls internal status codes monitor's increment_requests_count function
@param type: The type of request count to increment (i.e. gc)
@type type: Str
@return: None
@rtype : None
"""
self.status_codes_monitor.increment_requests_count(type)
def num_requests_sent(self):
""" Calls internal status codes monitor's num_requests_sent function
@param lock: Lock object used for sync of more than one fuzzing jobs.
@type lock: thread.Lock object
@return: Number of requests sent so far.
@rtype : Dict
"""
return self.status_codes_monitor.num_requests_sent()
def num_test_cases(self, lock=None):
""" Calls internal status codes monitor's num_test_cases function
DEPRECATED: This function is currently deprecated and unused
@param lock: Lock object used for sync of more than one fuzzing jobs.
@type lock: thread.Lock object
@return: Number of test cases executed so far.
@rtype : Int
"""
return self.status_codes_monitor.num_test_cases(lock)
def query_status_codes_monitor(self, request, valid_codes, fail_codes, lock=None):
""" Calls internal status codes monitor's query_response_codes function
@param request: The request in question.
@type request: Request class object.
@param valid_codes: List of status codes to query for.
@type valid_codes: List[str]
@param fail_codes: List of failure status codes to query for.
@type fail_codes: List[str]
@param lock: Lock object used for sync of more than one fuzzing jobs.
@type lock: thread.Lock object
@return: A namedtuple object, which contains:
whether or not the status code was valid, the request was fully valid,
and if the request failed due to a failed sequence re-render
@rtype : Namedtuple(valid_code, fully_valid, sequence_failure)
"""
return self.status_codes_monitor.query_response_codes(request, valid_codes, fail_codes, lock)
def update_status_codes_monitor(self, sequence, status_codes, lock=None):
""" Calls internal status codes monitor's update function
@param sequence: The sequence which was just executed and whose status
codes going to be registered in the internal monitor.
@type sequence: Sequence class object.
@param status_codes: List of RequestExecutionStatus objects used when updating
the status codes monitor
@type status_codes: List[RequestExecutionStatus]
@param lock: Lock object used for sync of more than one fuzzing jobs.
@type lock: thread.Lock object
@return: None
@rtype : None
"""
self.status_codes_monitor.update(sequence, status_codes, lock)
@property
def sequence_statuses(self):
""" Returns a copy of the status codoes monitor's sequence_statuses dictionary
@return A copy of the sequence_statuses dictionary
@rtype Dict(int, SequenceStatusCodes)
"""
return self.status_codes_monitor.sequence_statuses
| 3,541 |
7,892 | /* SoX Resampler Library Copyright (c) 2007-18 <EMAIL>
* Licence for this file: LGPL v2.1 See LICENCE for details. */
/* Common includes etc. for the examples. */
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <math.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _WIN32
/* Work-around for broken file-I/O on MS-Windows: */
#include <io.h>
#include <fcntl.h>
#define USE_STD_STDIO _setmode(_fileno(stdout), _O_BINARY), \
_setmode(_fileno(stdin ), _O_BINARY)
#else
#define USE_STD_STDIO
#endif
#undef int16_t
#define int16_t short
#undef int32_t
#if LONG_MAX > 2147483647L
#define int32_t int
#elif LONG_MAX < 2147483647L
#error this programme requires that 'long int' has at least 32-bits
#else
#define int32_t long
#endif
#undef min
#define min(x,y) ((x)<(y)?(x):(y))
#undef max
#define max(x,y) ((x)>(y)?(x):(y))
#undef AL
#define AL(a) (sizeof(a)/sizeof((a)[0])) /* Array Length */
#undef M_PI /* Sometimes missing, so ensure that it is defined: */
#define M_PI 3.14159265358979323846
| 495 |
653 | <filename>iexfinance/apidata/__init__.py
from iexfinance.apidata.base import APIReader
def get_api_status(**kwargs):
"""
IEX Cloud API status
Reference: https://iexcloud.io/docs/api/#status
Data Weighting: ``Free``
"""
return APIReader(**kwargs).fetch()
| 112 |
678 | //
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "NSObject.h"
@class NSError, NSThread;
@interface WCDBHandleWrap : NSObject
{
NSThread *m_usedThread;
struct sqlite3 *m_handle;
id <WCDBHandleWrapProtocol> m_delegate;
unsigned long long m_status;
int m_threadedRetainCount;
NSError *_error;
}
@property(retain, nonatomic) NSError *error; // @synthesize error=_error;
- (void).cxx_destruct;
- (void)resetStatus;
- (void)updateStatus:(unsigned long long)arg1;
@property(readonly, nonatomic) unsigned long long status;
- (void)releaseAtCurrentThread;
- (void)retainAtCurrentThread;
- (void)reset;
@property(readonly, nonatomic) __weak NSThread *usedThread;
@property(readonly, nonatomic) struct sqlite3 *handle;
- (id)initWithHandle:(struct sqlite3 *)arg1 andDelegate:(id)arg2;
@end
| 343 |
1,652 | package com.ctrip.xpipe.redis.keeper.impl;
import com.ctrip.xpipe.concurrent.AbstractExceptionLogTask;
import com.ctrip.xpipe.netty.filechannel.ReferenceFileRegion;
import com.ctrip.xpipe.redis.core.protocal.protocal.EofType;
import com.ctrip.xpipe.redis.core.protocal.protocal.LenEofType;
import com.ctrip.xpipe.redis.core.store.ReplicationStore;
import com.ctrip.xpipe.redis.keeper.AbstractRedisKeeperTest;
import com.ctrip.xpipe.redis.keeper.RedisClient;
import com.ctrip.xpipe.redis.keeper.RedisKeeperServer;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.DefaultChannelPromise;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Mockito.*;
/**
* @author wenchao.meng
* <p>
* Sep 13, 2016
*/
@RunWith(MockitoJUnitRunner.class)
public class DefaultRedisSlaveTest extends AbstractRedisKeeperTest {
private int waitDumpMilli = 100;
@Mock
public Channel channel;
@Mock
public RedisKeeperServer redisKeeperServer;
@Mock
private ReplicationStore replicationStore;
public DefaultRedisSlave redisSlave;
@Before
public void beforeDefaultRedisSlaveTest() {
when(channel.closeFuture()).thenReturn(new DefaultChannelPromise(channel));
when(channel.remoteAddress()).thenReturn(localhostInetAdress(randomPort()));
RedisClient redisClient = new DefaultRedisClient(channel, redisKeeperServer);
redisSlave= new DefaultRedisSlave(redisClient);
redisSlave.setRdbDumpMaxWaitMilli(waitDumpMilli);
}
@Test
public void testClose() throws IOException {
redisSlave.close();
//should success
redisSlave.sendMessage(randomString(10).getBytes());
redisSlave.sendMessage(Unpooled.wrappedBuffer(randomString(10).getBytes()));
//should fail
shouldThrowException(() -> redisSlave.onCommand(mock(ReferenceFileRegion.class)));
shouldThrowException(() -> redisSlave.beginWriteRdb(mock(EofType.class), 0L));
shouldThrowException(() -> redisSlave.beginWriteCommands(0L));
redisSlave.markPsyncProcessed();
//all should fail
shouldThrowException(() -> redisSlave.sendMessage(randomString(10).getBytes()));
shouldThrowException(() -> redisSlave.sendMessage(Unpooled.wrappedBuffer(randomString(10).getBytes())));
shouldThrowException(() -> redisSlave.onCommand(mock(ReferenceFileRegion.class)));
shouldThrowException(() -> redisSlave.beginWriteRdb(mock(EofType.class), 0L));
shouldThrowException(() -> redisSlave.beginWriteCommands(0L));
}
@Test
public void testDoRealCloseTimeout() throws IOException, TimeoutException {
int timeoutMilli = 100;
redisSlave.setWaitForPsyncProcessedTimeoutMilli(timeoutMilli);
redisSlave.close();
Assert.assertTrue(redisSlave.getCloseState().isClosing());
waitConditionUntilTimeOut(() -> redisSlave.getCloseState().isClosed());
}
@Test
public void testCloseTimeoutNotMarkPsyncProcessed() throws IOException {
redisSlave.markPsyncProcessed();
Assert.assertTrue(redisSlave.getCloseState().isOpen());
redisSlave.close();
Assert.assertTrue(redisSlave.getCloseState().isClosed());
}
@Test
public void testConcurrentCloseAndMarkPsyncProcessed() throws IOException, InterruptedException {
int rount = 10;
final AtomicReference<Exception> exception = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(rount*2);
for(int i=0;i<rount;i++) {
RedisClient redisClient = new DefaultRedisClient(channel, redisKeeperServer);
redisSlave= new DefaultRedisSlave(redisClient);
executors.execute(new Runnable() {
@Override
public void run() {
try {
redisSlave.close(50);
} catch (Exception e) {
logger.error("error close slave", e);
exception.set(e);
}finally {
latch.countDown();
}
}
});
executors.execute(new Runnable() {
@Override
public void run() {
try {
sleep(3);
redisSlave.markPsyncProcessed();
}finally {
latch.countDown();
}
}
});
}
latch.await(5, TimeUnit.SECONDS);
Assert.assertNull(exception.get());
}
@SuppressWarnings("resource")
@Test
public void testWaitRdbTimeout() {
redisSlave.waitForRdbDumping();
sleep(waitDumpMilli * 2);
Assert.assertTrue(!redisSlave.isOpen());
}
@SuppressWarnings("resource")
@Test
public void testWaitRdbNormal() {
redisSlave.waitForRdbDumping();
sleep(waitDumpMilli / 2);
redisSlave.beginWriteRdb(new LenEofType(1000), 2);
sleep(waitDumpMilli);
Assert.assertTrue(redisSlave.isOpen());
}
@Test
public void testFuture() {
SettableFuture<Boolean> objectSettableFuture = SettableFuture.create();
AtomicInteger listenerCount = new AtomicInteger(0);
AtomicInteger notifyCount = new AtomicInteger();
executors.execute(new AbstractExceptionLogTask() {
@Override
protected void doRun() throws Exception {
while (!Thread.interrupted()) {
listenerCount.incrementAndGet();
objectSettableFuture.addListener(new Runnable() {
@Override
public void run() {
notifyCount.incrementAndGet();
}
}, MoreExecutors.directExecutor());
}
logger.info("exit thread");
}
});
sleep(10);
objectSettableFuture.set(true);
executors.shutdownNow();
sleep(10);
logger.info("{}, {}", listenerCount, notifyCount);
Assert.assertEquals(listenerCount.get(), notifyCount.get());
}
@Test
public void testSlaveClosedWhenSendCommandFail() throws Exception {
when(redisKeeperServer.getReplicationStore()).thenReturn(replicationStore);
doThrow(new IOException("File for offset 0 does not exist")).when(replicationStore).addCommandsListener(anyLong(), any());
redisSlave.beginWriteRdb(new LenEofType(1000), 0);
Assert.assertTrue(redisSlave.isOpen());
redisSlave.sendCommandForFullSync();
waitConditionUntilTimeOut(() -> !redisSlave.isOpen());
}
@Test
public void testMultiBeginWritingCmds() throws Exception {
when(redisKeeperServer.getReplicationStore()).thenReturn(replicationStore);
redisSlave.beginWriteCommands(0L);
redisSlave.beginWriteCommands(0L);
verify(replicationStore).addCommandsListener(anyLong(), any());
}
}
| 3,340 |
1,909 | <reponame>danilopiazza/spring-batch
/*
* Copyright 2006-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.batch.core.resource;
import org.springframework.batch.core.JobParameters;
import org.springframework.batch.core.StepExecution;
import org.springframework.batch.core.StepExecutionListener;
import org.springframework.batch.repeat.CompletionPolicy;
import org.springframework.batch.repeat.RepeatContext;
import org.springframework.batch.repeat.RepeatStatus;
import org.springframework.batch.repeat.policy.SimpleCompletionPolicy;
import org.springframework.util.Assert;
/**
* <p>
* A {@link CompletionPolicy} that picks up a commit interval from
* {@link JobParameters} by listening to the start of a step. Use anywhere that
* a {@link CompletionPolicy} can be used (usually at the chunk level in a
* step), and inject as a {@link StepExecutionListener} into the surrounding
* step. N.B. only after the step has started will the completion policy be
* usable.
* </p>
*
* <p>
* It is easier and probably preferable to simply declare the chunk with a
* commit-interval that is a late-binding expression (e.g.
* <code>#{jobParameters['commit.interval']}</code>). That feature is available
* from of Spring Batch 2.1.7.
* </p>
*
* @author <NAME>
* @author <NAME>
*
* @see CompletionPolicy
*/
public class StepExecutionSimpleCompletionPolicy implements StepExecutionListener, CompletionPolicy {
private CompletionPolicy delegate;
private String keyName = "commit.interval";
/**
* Public setter for the key name of a Long value in the
* {@link JobParameters} that will contain a commit interval. Defaults to
* "commit.interval".
* @param keyName the keyName to set
*/
public void setKeyName(String keyName) {
this.keyName = keyName;
}
/**
* Set up a {@link SimpleCompletionPolicy} with a commit interval taken from
* the {@link JobParameters}. If there is a Long parameter with the given
* key name, the intValue of this parameter is used. If not an exception
* will be thrown.
*
* @see org.springframework.batch.core.StepExecutionListener#beforeStep(org.springframework.batch.core.StepExecution)
*/
@Override
public void beforeStep(StepExecution stepExecution) {
JobParameters jobParameters = stepExecution.getJobParameters();
Assert.state(jobParameters.getParameters().containsKey(keyName),
"JobParameters do not contain Long parameter with key=[" + keyName + "]");
delegate = new SimpleCompletionPolicy(jobParameters.getLong(keyName).intValue());
}
/**
* @return true if the commit interval has been reached or the result
* indicates completion
* @see CompletionPolicy#isComplete(RepeatContext, RepeatStatus)
*/
@Override
public boolean isComplete(RepeatContext context, RepeatStatus result) {
Assert.state(delegate != null, "The delegate resource has not been initialised. "
+ "Remember to register this object as a StepListener.");
return delegate.isComplete(context, result);
}
/**
* @return if the commit interval has been reached
* @see org.springframework.batch.repeat.CompletionPolicy#isComplete(org.springframework.batch.repeat.RepeatContext)
*/
@Override
public boolean isComplete(RepeatContext context) {
Assert.state(delegate != null, "The delegate resource has not been initialised. "
+ "Remember to register this object as a StepListener.");
return delegate.isComplete(context);
}
/**
* @return a new {@link RepeatContext}
* @see org.springframework.batch.repeat.CompletionPolicy#start(org.springframework.batch.repeat.RepeatContext)
*/
@Override
public RepeatContext start(RepeatContext parent) {
Assert.state(delegate != null, "The delegate resource has not been initialised. "
+ "Remember to register this object as a StepListener.");
return delegate.start(parent);
}
/**
* @see org.springframework.batch.repeat.CompletionPolicy#update(org.springframework.batch.repeat.RepeatContext)
*/
@Override
public void update(RepeatContext context) {
Assert.state(delegate != null, "The delegate resource has not been initialised. "
+ "Remember to register this object as a StepListener.");
delegate.update(context);
}
/**
* Delegates to the wrapped {@link CompletionPolicy} if set, otherwise
* returns the value of {@link #setKeyName(String)}.
*/
@Override
public String toString() {
return (delegate == null) ? keyName : delegate.toString();
}
}
| 1,431 |
341 | /*
** Bundled memory allocator.
** Donated to the public domain.
*/
#ifndef _LJ_ALLOC_H
#define _LJ_ALLOC_H
#include "lj_def.h"
#ifndef LUAJIT_USE_SYSMALLOC
LJ_FUNC void *lj_alloc_create(void);
LJ_FUNC void lj_alloc_destroy(void *msp);
LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize);
#endif
#endif
| 171 |
1,006 | <filename>arch/arm/src/sam34/sam_gpio.h<gh_stars>1000+
/****************************************************************************
* arch/arm/src/sam34/sam_gpio.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_ARM_SRC_SAM34_SAM_GPIO_H
#define __ARCH_ARM_SRC_SAM34_SAM_GPIO_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <stdbool.h>
#include "chip.h"
#if defined(CONFIG_ARCH_CHIP_SAM3U)
# include "sam3u_gpio.h"
#elif defined(CONFIG_ARCH_CHIP_SAM3X) || defined(CONFIG_ARCH_CHIP_SAM3A)
# include "sam3x_gpio.h"
#elif defined(CONFIG_ARCH_CHIP_SAM4CM)
# include "sam4cm_gpio.h"
#elif defined(CONFIG_ARCH_CHIP_SAM4E)
# include "sam4e_gpio.h"
#elif defined(CONFIG_ARCH_CHIP_SAM4L)
# include "sam4l_gpio.h"
#elif defined(CONFIG_ARCH_CHIP_SAM4S)
# include "sam4s_gpio.h"
#else
# error Unrecognized SAM architecture
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Configuration ************************************************************/
#if defined(CONFIG_SAM34_GPIOA_IRQ) || defined(CONFIG_SAM34_GPIOB_IRQ) || \
defined(CONFIG_SAM34_GPIOC_IRQ) || defined(CONFIG_SAM34_GPIOD_IRQ) || \
defined(CONFIG_SAM34_GPIOE_IRQ) || defined(CONFIG_SAM34_GPIOF_IRQ)
# define CONFIG_SAM34_GPIO_IRQ 1
#else
# undef CONFIG_SAM34_GPIO_IRQ
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#ifndef __ASSEMBLY__
#undef EXTERN
#if defined(__cplusplus)
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Name: sam_gpioirqinitialize
*
* Description:
* Initialize logic to support a second level of interrupt decoding for
* GPIO pins.
*
****************************************************************************/
#ifdef CONFIG_SAM34_GPIO_IRQ
void sam_gpioirqinitialize(void);
#else
# define sam_gpioirqinitialize()
#endif
/****************************************************************************
* Name: sam_configgpio
*
* Description:
* Configure a GPIO pin based on bit-encoded description of the pin.
*
****************************************************************************/
int sam_configgpio(gpio_pinset_t cfgset);
/****************************************************************************
* Name: sam_gpiowrite
*
* Description:
* Write one or zero to the selected GPIO pin
*
****************************************************************************/
void sam_gpiowrite(gpio_pinset_t pinset, bool value);
/****************************************************************************
* Name: sam_gpioread
*
* Description:
* Read one or zero from the selected GPIO pin
*
****************************************************************************/
bool sam_gpioread(gpio_pinset_t pinset);
/****************************************************************************
* Name: sam_gpioirq
*
* Description:
* Configure an interrupt for the specified GPIO pin.
*
****************************************************************************/
#ifdef CONFIG_SAM34_GPIO_IRQ
void sam_gpioirq(gpio_pinset_t pinset);
#else
# define sam_gpioirq(pinset)
#endif
/****************************************************************************
* Name: sam_gpioirqenable
*
* Description:
* Enable the interrupt for specified GPIO IRQ
*
****************************************************************************/
#ifdef CONFIG_SAM34_GPIO_IRQ
void sam_gpioirqenable(int irq);
#else
# define sam_gpioirqenable(irq)
#endif
/****************************************************************************
* Name: sam_gpioirqdisable
*
* Description:
* Disable the interrupt for specified GPIO IRQ
*
****************************************************************************/
#ifdef CONFIG_SAM34_GPIO_IRQ
void sam_gpioirqdisable(int irq);
#else
# define sam_gpioirqdisable(irq)
#endif
/****************************************************************************
* Function: sam_dumpgpio
*
* Description:
* Dump all GPIO registers associated with the base address of the provided
* pinset.
*
****************************************************************************/
#ifdef CONFIG_DEBUG_GPIO_INFO
int sam_dumpgpio(uint32_t pinset, const char *msg);
#else
# define sam_dumpgpio(p,m)
#endif
#undef EXTERN
#if defined(__cplusplus)
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_ARM_SRC_SAM34_SAM_GPIO_H */
| 1,552 |
892 | <reponame>westonsteimel/advisory-database-github
{
"schema_version": "1.2.0",
"id": "GHSA-mfrc-8wqr-f298",
"modified": "2022-05-01T02:28:05Z",
"published": "2022-05-01T02:28:05Z",
"aliases": [
"CVE-2005-4515"
],
"details": "** DISPUTED ** SQL injection vulnerability in WebDB 1.1 and earlier allows remote attackers to execute arbitrary SQL commands via unspecified search parameters, possibly Search0. NOTE: the vendor has disputed this issue, saying that \"WebDB is a generic online database system used by many of the clients of Lois Software. The flaw that was identified was some code that was added for a client to do some testing of his system and only certain safe commands were allowed. This code has now been removed and it is not now possible to use SQL queries as part of the query string. No installation or patch is required All clients use a common code library and have their own front end and databases and connections. So as soon as a change / upgrade / enhancement is made to the code, all users of the software begin to use the latest changes immediately.\" Since the issue appeared in a custom web site and no action is required on the part of customers, this issue should not be included in CVE.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2005-4515"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/23840"
},
{
"type": "WEB",
"url": "http://pridels0.blogspot.com/2005/12/webdb-sql-inj-vuln.html"
},
{
"type": "WEB",
"url": "http://pridels0.blogspot.com/2005/12/webdb-sql-inj-vuln.html#c114176251867558161"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/18226"
},
{
"type": "WEB",
"url": "http://www.osvdb.org/21910"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/16038"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2005/3071"
}
],
"database_specific": {
"cwe_ids": [
"CWE-89"
],
"severity": "HIGH",
"github_reviewed": false
}
} | 855 |
388 | <gh_stars>100-1000
#ifndef _FILTER_NORMAL_H_
#define _FILTER_NORMAL_H_
#include "util_render_target.h"
int init_normal_filter ();
int apply_normal_filter (render_target_t *dst_fbo, render_target_t *src_fbo);
#endif /* _FILTER_NORMAL_H_ */
| 103 |
434 | #ifndef KVM__VIRTIO_9P_H
#define KVM__VIRTIO_9P_H
#include "kvm/virtio.h"
#include "kvm/pci.h"
#include "kvm/threadpool.h"
#include "kvm/parse-options.h"
#include <dirent.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#define NUM_VIRT_QUEUES 1
#define VIRTQUEUE_NUM 128
#define VIRTIO_9P_DEFAULT_TAG "kvm_9p"
#define VIRTIO_9P_HDR_LEN (sizeof(u32)+sizeof(u8)+sizeof(u16))
#define VIRTIO_9P_VERSION_DOTL "9P2000.L"
#define MAX_TAG_LEN 32
struct p9_msg {
u32 size;
u8 cmd;
u16 tag;
u8 msg[0];
} __attribute__((packed));
struct p9_fid {
u32 fid;
u32 uid;
char abs_path[PATH_MAX];
char *path;
DIR *dir;
int fd;
struct rb_node node;
};
struct p9_dev_job {
struct virt_queue *vq;
struct p9_dev *p9dev;
struct thread_pool__job job_id;
};
struct p9_dev {
struct list_head list;
struct virtio_device vdev;
struct rb_root fids;
struct virtio_9p_config *config;
u32 features;
/* virtio queue */
struct virt_queue vqs[NUM_VIRT_QUEUES];
struct p9_dev_job jobs[NUM_VIRT_QUEUES];
char root_dir[PATH_MAX];
};
struct p9_pdu {
u32 queue_head;
size_t read_offset;
size_t write_offset;
u16 out_iov_cnt;
u16 in_iov_cnt;
struct iovec in_iov[VIRTQUEUE_NUM];
struct iovec out_iov[VIRTQUEUE_NUM];
};
struct kvm;
int virtio_9p_rootdir_parser(const struct option *opt, const char *arg, int unset);
int virtio_9p_img_name_parser(const struct option *opt, const char *arg, int unset);
int virtio_9p__register(struct kvm *kvm, const char *root, const char *tag_name);
int virtio_9p__init(struct kvm *kvm);
int virtio_p9_pdu_readf(struct p9_pdu *pdu, const char *fmt, ...);
int virtio_p9_pdu_writef(struct p9_pdu *pdu, const char *fmt, ...);
#endif
| 835 |
2,753 | /*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: <NAME>, <NAME>, <NAME>, <NAME>,
* <NAME>, <NAME>, <NAME>, <NAME>,
* <NAME>, <NAME>, <NAME>
*/
#ifndef GAUSSIANKERNEL_H
#define GAUSSIANKERNEL_H
#include <shogun/lib/config.h>
#include <shogun/kernel/ShiftInvariantKernel.h>
namespace shogun
{
class Features;
class DotFeatures;
namespace params {
class GaussianWidthAutoInit;
}
/** @brief The well known Gaussian kernel (swiss army knife for SVMs) computed
* on DotFeatures.
*
* It is computed as
*
* \f[
* k({\bf x},{\bf x'})= exp(-\frac{||{\bf x}-{\bf x'}||^2}{\tau})
* \f]
*
* where \f$\tau\f$ is the kernel width.
*
* If the kernel width is not provided it will be computed automatically using
* the median heuristics as described in
* http://www.stats.ox.ac.uk/~sejdinov/talks/pdf/2016-09-07_RSSManchester.pdf,
* where \f$\tau=2\theta^2\f$ and \f$\theta = median(||{\bf x}-{\bf x'}||^2)\f$.
*/
class GaussianKernel: public ShiftInvariantKernel
{
friend class params::GaussianWidthAutoInit;
public:
/** default constructor */
GaussianKernel();
/** constructor
*
* @param width width
*/
GaussianKernel(float64_t width);
/** constructor
*
* @param size cache size
* @param width width
*/
GaussianKernel(int32_t size, float64_t width);
/** constructor
*
* @param l features of left-hand side
* @param r features of right-hand side
* @param width width
* @param size cache size
*/
GaussianKernel(const std::shared_ptr<DotFeatures>& l, const std::shared_ptr<DotFeatures>& r, float64_t width, int32_t size=10);
/** destructor */
~GaussianKernel() override;
/** @param kernel is casted to GaussianKernel, error if not possible
* is SG_REF'ed
* @return casted GaussianKernel object
*/
static std::shared_ptr<GaussianKernel> obtain_from_generic(const std::shared_ptr<Kernel>& kernel);
/** initialize kernel
*
* @param l features of left-hand side
* @param r features of right-hand side
* @return if initializing was successful
*/
bool init(std::shared_ptr<Features> l, std::shared_ptr<Features> r) override;
/** clean up kernel */
void cleanup() override;
/** return what type of kernel we are
*
* @return kernel type GAUSSIAN
*/
EKernelType get_kernel_type() override
{
return K_GAUSSIAN;
}
/** @return feature type of distance used */
EFeatureType get_feature_type() override
{
return F_ANY;
}
/** @return feature class of distance used */
EFeatureClass get_feature_class() override
{
return C_ANY;
}
/** return the kernel's name
*
* @return name Gaussian
*/
const char* get_name() const override { return "GaussianKernel"; }
/** set the kernel's width
*
* @param w kernel width
*/
void set_width(float64_t w);
/** return the kernel's width
*
* @return kernel width
*/
float64_t get_width() const
{
return std::get<float64_t>(m_width);
}
/** return derivative with respect to specified parameter
*
* @param param the parameter
* @param index the index of the element if parameter is a vector
*
* @return gradient with respect to parameter
*/
SGMatrix<float64_t> get_parameter_gradient(Parameters::const_reference param, index_t index=-1) override;
/** Can (optionally) be overridden to post-initialize some member
* variables which are not PARAMETER::ADD'ed. Make sure that at first
* the overridden method BASE_CLASS::LOAD_SERIALIZABLE_POST is called.
*
* @exception ShogunException Will be thrown if an error occurres.
*/
void load_serializable_post() override;
protected:
/** compute kernel function for features a and b
* idx_{a,b} denote the index of the feature vectors
* in the corresponding feature object
*
* @param idx_a index a
* @param idx_b index b
* @return computed kernel function at indices a,b
*/
float64_t compute(int32_t idx_a, int32_t idx_b) override;
/** compute the distance between features a and b
* idx_{a,b} denote the index of the feature vectors
* in the corresponding feature object
*
* @param idx_a index a
* @param idx_b index b
* @return computed the distance
*
* Note that in GaussianKernel,
* kernel(idx_a, idx_b)=exp(-distance(idx_a, idx_b))
* \f[
* distance({\bf x},{\bf y})= \frac{||{\bf x}-{\bf y}||^2}{\tau}
* \f]
*/
float64_t distance(int32_t idx_a, int32_t idx_b) const override;
protected:
/** width */
AutoValue<float64_t> m_width = AutoValueEmpty{};
};
}
#endif /* _GAUSSIANKERNEL_H__ */
| 1,585 |
601 | <gh_stars>100-1000
/*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.data.jdbc.repository;
import static org.assertj.core.api.Assertions.*;
import junit.framework.AssertionFailedError;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.With;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.StringJoiner;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.function.UnaryOperator;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.platform.commons.util.ExceptionUtils;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.dao.IncorrectUpdateSemanticsDataAccessException;
import org.springframework.data.annotation.Id;
import org.springframework.data.jdbc.repository.support.JdbcRepositoryFactory;
import org.springframework.data.jdbc.testing.TestConfiguration;
import org.springframework.data.repository.CrudRepository;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.support.TransactionTemplate;
/**
* Tests that highly concurrent update operations of an entity don't cause deadlocks.
*
* @author <NAME>
* @author <NAME>
*/
@ExtendWith(SpringExtension.class)
public class JdbcRepositoryConcurrencyIntegrationTests {
@Configuration
@Import(TestConfiguration.class)
static class Config {
@Autowired JdbcRepositoryFactory factory;
@Bean
Class<?> testClass() {
return JdbcRepositoryConcurrencyIntegrationTests.class;
}
@Bean
DummyEntityRepository dummyEntityRepository() {
return factory.getRepository(DummyEntityRepository.class);
}
}
@Autowired NamedParameterJdbcTemplate template;
@Autowired DummyEntityRepository repository;
@Autowired PlatformTransactionManager transactionManager;
List<DummyEntity> concurrencyEntities;
DummyEntity entity;
TransactionTemplate transactionTemplate;
List<Exception> exceptions;
@BeforeAll
public static void beforeClass() {
Assertions.registerFormatterForType(CopyOnWriteArrayList.class, l -> {
StringJoiner joiner = new StringJoiner(", ", "List(", ")");
l.forEach(e -> {
if (e instanceof Throwable) {
printThrowable(joiner, (Throwable) e);
} else {
joiner.add(e.toString());
}
});
return joiner.toString();
});
}
private static void printThrowable(StringJoiner joiner, Throwable t) {
joiner.add(t.toString() + ExceptionUtils.readStackTrace(t));
if (t.getCause() != null) {
joiner.add("\ncaused by:\n");
printThrowable(joiner, t.getCause());
}
}
@BeforeEach
public void before() {
entity = repository.save(createDummyEntity());
assertThat(entity.getId()).isNotNull();
concurrencyEntities = createEntityStates(entity);
transactionTemplate = new TransactionTemplate(this.transactionManager);
exceptions = new CopyOnWriteArrayList<>();
}
@Test // DATAJDBC-488
public void updateConcurrencyWithEmptyReferences() throws Exception {
// latch for all threads to wait on.
CountDownLatch startLatch = new CountDownLatch(concurrencyEntities.size());
// latch for main thread to wait on until all threads are done.
CountDownLatch doneLatch = new CountDownLatch(concurrencyEntities.size());
UnaryOperator<DummyEntity> action = e -> repository.save(e);
concurrencyEntities.forEach(e -> executeInParallel(startLatch, doneLatch, action, e));
doneLatch.await();
DummyEntity reloaded = repository.findById(entity.id).orElseThrow(AssertionFailedError::new);
assertThat(reloaded.content).hasSize(2);
assertThat(exceptions).isEmpty();
}
@Test // DATAJDBC-493
public void concurrentUpdateAndDelete() throws Exception {
CountDownLatch startLatch = new CountDownLatch(concurrencyEntities.size() + 1); // latch for all threads to wait on.
CountDownLatch doneLatch = new CountDownLatch(concurrencyEntities.size() + 1); // latch for main thread to wait on
// until all threads are done.
UnaryOperator<DummyEntity> updateAction = e -> {
try {
return repository.save(e);
} catch (Exception ex) {
// When the delete execution is complete, the Update execution throws an
// IncorrectUpdateSemanticsDataAccessException.
if (ex.getCause() instanceof IncorrectUpdateSemanticsDataAccessException) {
return null;
}
throw ex;
}
};
UnaryOperator<DummyEntity> deleteAction = e -> {
repository.deleteById(entity.id);
return null;
};
concurrencyEntities.forEach(e -> executeInParallel(startLatch, doneLatch, updateAction, e));
executeInParallel(startLatch, doneLatch, deleteAction, entity);
doneLatch.await();
assertThat(exceptions).isEmpty();
assertThat(repository.findById(entity.id)).isEmpty();
}
@Test // DATAJDBC-493
public void concurrentUpdateAndDeleteAll() throws Exception {
CountDownLatch startLatch = new CountDownLatch(concurrencyEntities.size() + 1); // latch for all threads to wait on.
CountDownLatch doneLatch = new CountDownLatch(concurrencyEntities.size() + 1); // latch for main thread to wait on
// until all threads are done.
UnaryOperator<DummyEntity> updateAction = e -> {
try {
return repository.save(e);
} catch (Exception ex) {
// When the delete execution is complete, the Update execution throws an
// IncorrectUpdateSemanticsDataAccessException.
if (ex.getCause() instanceof IncorrectUpdateSemanticsDataAccessException) {
return null;
}
throw ex;
}
};
UnaryOperator<DummyEntity> deleteAction = e -> {
repository.deleteAll();
return null;
};
concurrencyEntities.forEach(e -> executeInParallel(startLatch, doneLatch, updateAction, e));
executeInParallel(startLatch, doneLatch, deleteAction, entity);
doneLatch.await();
assertThat(exceptions).isEmpty();
assertThat(repository.count()).isEqualTo(0);
}
private void executeInParallel(CountDownLatch startLatch, CountDownLatch doneLatch,
UnaryOperator<DummyEntity> deleteAction, DummyEntity entity) {
// delete
new Thread(() -> {
try {
startLatch.countDown();
startLatch.await();
transactionTemplate.execute(status -> deleteAction.apply(entity));
} catch (Exception ex) {
exceptions.add(ex);
} finally {
doneLatch.countDown();
}
}).start();
}
private List<DummyEntity> createEntityStates(DummyEntity entity) {
List<DummyEntity> concurrencyEntities = new ArrayList<>();
Element element1 = new Element(null, 1L);
Element element2 = new Element(null, 2L);
for (int i = 0; i < 50; i++) {
List<Element> newContent = Arrays.asList(element1.withContent(element1.content + i + 2),
element2.withContent(element2.content + i + 2));
concurrencyEntities.add(entity.withName(entity.getName() + i).withContent(newContent));
}
return concurrencyEntities;
}
private static DummyEntity createDummyEntity() {
return new DummyEntity(null, "Entity Name", new ArrayList<>());
}
interface DummyEntityRepository extends CrudRepository<DummyEntity, Long> {}
@Getter
@AllArgsConstructor
static class DummyEntity {
@Id private Long id;
@With String name;
@With final List<Element> content;
}
@AllArgsConstructor
static class Element {
@Id private Long id;
@With final Long content;
}
}
| 2,854 |
436 | <gh_stars>100-1000
/*
* Copyright 2003-2005 the original author or authors.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.jdon.jivejdon.api;
import com.jdon.controller.events.EventModel;
import com.jdon.jivejdon.auth.CUDInputInterceptor;
import com.jdon.jivejdon.domain.model.ForumMessage;
import com.jdon.jivejdon.domain.model.ForumThread;
import com.jdon.jivejdon.infrastructure.dto.AnemicMessageDTO;
import com.jdon.jivejdon.domain.model.message.output.RenderingFilterManager;
/**
* Message operations interface. if modify this interface, remmeber modify
* com.jdon.jivejdon.domain.model.jivejdon_permission.xml
*
* @author <a href="mailto:<EMAIL>">banq</a>
* @see CUDInputInterceptor
*
*/
public interface ForumMessageService {
AnemicMessageDTO initMessage(EventModel em);
AnemicMessageDTO initReplyMessage(EventModel em);
/**
* has Authorization ; no cache Intercepts it, it is Called by message's modify
* or deletion first time accessing this method must be checked. it is
* configured in jdonframework.xml
*
* <getMethod name="findMessage"/>
*/
AnemicMessageDTO findMessage(Long messageId);
/**
* no Authorization ; cache Intercepts it, Called by message List, not for
* modify or deletion .
*
* now MessageListAction or MessageRecursiveListAction call this method
*/
ForumMessage getMessage(Long messageId);
/**
* create a topic message, it is a root message
*/
Long createTopicMessage(EventModel em) throws Exception;
/**
* create a reply message.
*/
Long createReplyMessage(EventModel em) throws Exception;
void updateMessage(EventModel em) throws Exception;
void deleteMessage(EventModel em) throws Exception;
void deleteUserMessages(String username) throws Exception;
/**
* for batch inquiry
*/
ForumThread getThread(Long id);
RenderingFilterManager getFilterManager();
/**
* check if forumMessage is Authenticated by current login user.
*/
boolean checkIsAuthenticated(ForumMessage forumMessage);
// /message/messageMaskAction.shtml?method=maskMessage
void maskMessage(EventModel em) throws Exception;
public void updateThreadName(Long threadId, String name) throws Exception;
}
| 785 |
6,270 | [
{
"type": "bugfix",
"category": "S3",
"description": "fix issues when SDK populates wrong path if access point arn contains forward slash"
},
{
"type": "feature",
"category": "ApplicationAutoScaling",
"description": "This release supports auto scaling of provisioned concurrency for AWS Lambda."
},
{
"type": "feature",
"category": "EBS",
"description": "This release introduces the EBS direct APIs for Snapshots: 1. ListSnapshotBlocks, which lists the block indexes and block tokens for blocks in an Amazon EBS snapshot. 2. ListChangedBlocks, which lists the block indexes and block tokens for blocks that are different between two snapshots of the same volume/snapshot lineage. 3. GetSnapshotBlock, which returns the data in a block of an Amazon EBS snapshot."
},
{
"type": "feature",
"category": "Lambda",
"description": "- Added the ProvisionedConcurrency type and operations. Allocate provisioned concurrency to enable your function to scale up without fluctuations in latency. Use PutProvisionedConcurrencyConfig to configure provisioned concurrency on a version of a function, or on an alias."
},
{
"type": "feature",
"category": "RDS",
"description": "This release adds support for the Amazon RDS Proxy"
},
{
"type": "feature",
"category": "Rekognition",
"description": "This SDK Release introduces APIs for Amazon Rekognition Custom Labels feature (CreateProjects, CreateProjectVersion,DescribeProjects, DescribeProjectVersions, StartProjectVersion, StopProjectVersion and DetectCustomLabels). Also new is AugmentedAI (Human In The Loop) Support for DetectModerationLabels in Amazon Rekognition."
},
{
"type": "feature",
"category": "SageMaker",
"description": "You can now use SageMaker Autopilot for automatically training and tuning candidate models using a combination of various feature engineering, ML algorithms, and hyperparameters determined from the user's input data. SageMaker Automatic Model Tuning now supports tuning across multiple algorithms. With Amazon SageMaker Experiments users can create Experiments, ExperimentTrials, and ExperimentTrialComponents to track, organize, and evaluate their ML training jobs. With Amazon SageMaker Debugger, users can easily debug training jobs using a number of pre-built rules provided by Amazon SageMaker, or build custom rules. With Amazon SageMaker Processing, users can run on-demand, distributed, and fully managed jobs for data pre- or post- processing or model evaluation. With Amazon SageMaker Model Monitor, a user can create MonitoringSchedules to automatically monitor endpoints to detect data drift and other issues and get alerted on them. This release also includes the preview version of Amazon SageMaker Studio with Domains, UserProfiles, and Apps. This release also includes the preview version of Amazon Augmented AI to easily implement human review of machine learning predictions by creating FlowDefinitions, HumanTaskUis, and HumanLoops."
},
{
"type": "feature",
"category": "StepFunctions",
"description": "This release of the AWS Step Functions SDK introduces support for Express Workflows."
}
] | 937 |
369 | /*
* File: stdlib.h
* Purpose: Function prototypes for standard library functions
*
* Notes:
*/
#ifndef _STDLIB_H
#define _STDLIB_H
/********************************************************************
* Standard library functions
********************************************************************/
int
isspace (int);
int
isalnum (int);
int
isdigit (int);
int
isupper (int);
int
strcasecmp (const char *, const char *);
int
strncasecmp (const char *, const char *, int);
unsigned long
strtoul (char *, char **, int);
int
strlen (const char *);
char *
strcat (char *, const char *);
char *
strncat (char *, const char *, int);
char *
strcpy (char *, const char *);
char *
strncpy (char *, const char *, int);
int
strcmp (const char *, const char *);
int
strncmp (const char *, const char *, int);
void *
memcpy (void *, const void *, unsigned);
void *
memset (void *, int, unsigned);
void
free (void *);
void *
malloc (unsigned);
#define RAND_MAX 32767
int
rand (void);
void
srand (int);
/********************************************************************/
#endif
| 359 |
831 | <reponame>qq1056779951/android<gh_stars>100-1000
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.diagnostics;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import com.android.tools.idea.diagnostics.report.DiagnosticReport;
import com.android.tools.idea.diagnostics.report.DiagnosticReportProperties;
import com.android.tools.idea.diagnostics.report.FreezeReport;
import com.android.tools.idea.diagnostics.report.HistogramReport;
import com.android.tools.idea.diagnostics.report.MemoryReportReason;
import com.android.tools.idea.diagnostics.report.PerformanceThreadDumpReport;
import com.google.common.base.Charsets;
import com.intellij.util.containers.ContainerUtil;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.text.DateFormat;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collectors;
import org.hamcrest.CoreMatchers;
import org.jetbrains.annotations.NotNull;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
public class StudioReportDatabaseTest {
@Rule
public TemporaryFolder myTestFolder = new TemporaryFolder();
private File databaseFile;
private StudioReportDatabase db;
@Before
public void setup() {
databaseFile = new File(myTestFolder.getRoot(), "threads.dmp");
db = new StudioReportDatabase(databaseFile);
}
@Test
public void testEmptyDatabase() throws IOException {
List<DiagnosticReport> reports = db.getReports();
assertEquals(0, reports.size());
db.appendReport(new PerformanceThreadDumpReport(createTempFileWithThreadDump("1"), "test"));
reports = db.reapReports();
reports = db.getReports();
assertEquals(0, reports.size());
reports = db.reapReports();
assertEquals(0, reports.size());
}
@Test
public void testParser() throws IOException {
Path t1 = createTempFileWithThreadDump("1");
Path t2 = createTempFileWithThreadDump("1");
db.appendReport(new PerformanceThreadDumpReport(t1, "test"));
db.appendReport(new PerformanceThreadDumpReport(t2, "test"));
List<DiagnosticReport> reports = db.reapReports();
List<Path> paths = ContainerUtil.map(reports, r -> ((PerformanceThreadDumpReport)r).getThreadDumpPath());
assertThat(paths, hasItems(t1, t2));
reports = db.reapReports();
assertTrue(reports.isEmpty());
}
@Test
public void testDifferentTypes() throws IOException {
Path h1 = createTempFileWithThreadDump("H1");
Path t1 = createTempFileWithThreadDump("T1");
Path t2 = createTempFileWithThreadDump("T2");
Path h3 = createTempFileWithThreadDump("H3");
Path t3 = createTempFileWithThreadDump("T3");
db.appendReport(new HistogramReport(t1, h1, MemoryReportReason.LowMemory, "test"));
db.appendReport(new PerformanceThreadDumpReport(t2, "test"));
db.appendReport(new HistogramReport(t3, h3, MemoryReportReason.LowMemory, "test"));
List<DiagnosticReport> reports = db.reapReports();
assertEquals(3, reports.size());
assertEquals(2, reports.stream().filter(r -> r.getType().equals("Histogram")).count());
assertEquals(1, reports.stream().filter(r -> r.getType().equals("PerformanceThreadDump")).count());
}
@Test
public void testHistogramContent() throws IOException {
Path h1 = createTempFileWithThreadDump("H1");
Path t1 = createTempFileWithThreadDump("T1");
db.appendReport(new HistogramReport(t1, h1, MemoryReportReason.LowMemory, "Histogram description"));
DiagnosticReport details = db.reapReports().get(0);
assertThat(details, CoreMatchers.is(instanceOf(HistogramReport.class)));
HistogramReport report = (HistogramReport) details;
assertEquals("Histogram", details.getType());
assertEquals("LowMemory", report.getReason().toString());
assertEquals(t1, report.getThreadDumpPath());
assertEquals(h1, report.getHistogramPath());
assertEquals("Histogram description", report.getDescription());
}
@Test
public void testFreezeContent() throws IOException {
Path threadDump = createTempFileWithThreadDump("T1");
Path actions = createTempFileWithThreadDump("Actions");
Path memoryUse = createTempFileWithThreadDump("Memory use");
Path profile = createTempFileWithThreadDump("Profile");
Map<String, Path> paths = new TreeMap<>();
paths.put("actionsDiagnostics", actions);
paths.put("memoryUseDiagnostics", memoryUse);
paths.put("profileDiagnostics", profile);
db.appendReport(new FreezeReport(threadDump, paths, false, 20L, "Freeze report"));
db.appendReport(new FreezeReport(threadDump, paths, true, null, "Freeze report"));
List<DiagnosticReport> diagnosticReports = db.reapReports();
FreezeReport report = (FreezeReport) diagnosticReports.get(0);
assertEquals("Freeze", report.getType());
assertEquals(threadDump, report.getThreadDumpPath());
assertEquals(paths, report.getReportParts());
assertEquals(20, report.getTotalDuration().longValue());
assertFalse(report.getTimedOut());
assertEquals("Freeze report", report.getDescription());
assertTrue(((FreezeReport) diagnosticReports.get(1)).getTimedOut());
assertNull(((FreezeReport) diagnosticReports.get(1)).getTotalDuration());
}
@Test
public void testEmptyFreezeReport() throws IOException {
db.appendReport(new FreezeReport(null, new TreeMap<>(), false, null, null));
FreezeReport report = (FreezeReport) db.reapReports().get(0);
assertNull(report.getThreadDumpPath());
assertEquals(0, report.getReportParts().size());
assertNull(report.getTotalDuration());
assertNull(report.getDescription());
}
@Test
public void testPerformanceThreadDumpContent() throws IOException {
Path t1 = createTempFileWithThreadDump("T1");
db.appendReport(new PerformanceThreadDumpReport(t1, "Performance thread dump description"));
DiagnosticReport details = db.reapReports().get(0);
assertEquals("PerformanceThreadDump", details.getType());
assertEquals(t1, ((PerformanceThreadDumpReport) details).getThreadDumpPath());
assertEquals("Performance thread dump description", ((PerformanceThreadDumpReport) details).getDescription());
}
@Test
public void testCorruptedDatabaseFile() throws IOException {
Path t1 = createTempFileWithThreadDump("T1");
db.appendReport(new PerformanceThreadDumpReport(t1, "Performance thread dump description"));
Files.write(databaseFile.toPath(), "Corrupted json".getBytes(Charsets.UTF_8), StandardOpenOption.TRUNCATE_EXISTING);
List<DiagnosticReport> details = db.reapReports();
// If the db file contains corrupted of malformed json, return no reports.
assertEquals(0, details.size());
// Test that database works even after its file gets corrupted.
Path t2 = createTempFileWithThreadDump("T2");
db.appendReport(new PerformanceThreadDumpReport(t2, "Performance thread dump description"));
details = db.reapReports();
assertEquals(1, details.size());
assertEquals(t2, ((PerformanceThreadDumpReport) details.get(0)).getThreadDumpPath());
}
@Test
public void testDiagnosticProperties() throws Exception {
Path t1 = createTempFileWithThreadDump("T1");
Path t2 = createTempFileWithThreadDump("T2");
long time = DateFormat.getInstance().parse("07/10/2018 4:05 PM, PDT").getTime();
DiagnosticReportProperties properties = new DiagnosticReportProperties(
1000, // uptime
time, // report time
"testSessionId",
"1.2.3.4", //studio version
"9.8.7.6" // kotlin version
);
db.appendReport(new HistogramReport(t1, t2, MemoryReportReason.LowMemory, "", properties));
List<DiagnosticReport> reports = db.reapReports();
HistogramReport report = (HistogramReport) reports.get(0);
assertNotSame(properties, report.getProperties());
assertEquals(properties, report.getProperties());
}
@NotNull
private Path createTempFileWithThreadDump(@NotNull String contents) throws IOException {
File file = myTestFolder.newFile();
Files.write(file.toPath(), contents.getBytes(Charsets.UTF_8), StandardOpenOption.CREATE);
return file.toPath();
}
}
| 2,952 |
1,755 | /*=========================================================================
Program: Visualization Toolkit
Module: vtkPMergeArrays.h
Copyright (c) <NAME>, <NAME>, <NAME>
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
/**
* @class vtkPMergeArrays
* @brief Multiple inputs with one output, parallel version
*
* Like it's super class, this filter tries to combine all arrays from
* inputs into one output.
*
* @sa
* vtkMergeArrays
*/
#ifndef vtkPMergeArrays_h
#define vtkPMergeArrays_h
#include "vtkFiltersParallelModule.h" // For export macro
#include "vtkMergeArrays.h"
class VTKFILTERSPARALLEL_EXPORT vtkPMergeArrays : public vtkMergeArrays
{
public:
vtkTypeMacro(vtkPMergeArrays, vtkMergeArrays);
void PrintSelf(ostream& os, vtkIndent indent) override;
static vtkPMergeArrays* New();
protected:
vtkPMergeArrays();
~vtkPMergeArrays() override = default;
int MergeDataObjectFields(vtkDataObject* input, int idx, vtkDataObject* output) override;
private:
vtkPMergeArrays(const vtkPMergeArrays&) = delete;
void operator=(const vtkPMergeArrays&) = delete;
};
#endif
| 465 |
2,420 | from igramscraper.two_step_verification.two_step_verification_abstract_class import TwoStepVerificationAbstractClass
from igramscraper.two_step_verification.console_verification import ConsoleVerification
__all__ = ["TwoStepVerificationAbstractClass", "ConsoleVerification"]
| 73 |
2,023 | # <NAME>, Agricultural University, Plovdiv, Bulgaria
# <EMAIL>
choice=None
def qcr():
gap=input('Enter general amount of pesticide: ')
qr=raw_input('Enter quantitative ratios with colon between them: ')
sqr=qr.split(':')
fqr=[float(i) for i in sqr]
sqr=sum(fqr)
el_part=gap/sqr
am_ratio=[el_part*x for x in fqr]
print \
""""
"""
print "Calculated quantitative ratios are: " , am_ratio
while choice!="0":
print \
"""
Calculation of quantitative concentration ratios
1 - Begin calculation
0 - Exit
"""
choice= raw_input("Choice: ")
if choice == "0":
exit()
elif choice=="1":
qcr()
| 359 |
312 | #include <occa/internal/lang/token/charToken.hpp>
#include <occa/internal/utils/string.hpp>
namespace occa {
namespace lang {
charToken::charToken(const fileOrigin &origin_,
int encoding_,
const std::string &value_,
const std::string &udf_) :
token_t(origin_),
encoding(encoding_),
value(value_),
udf(udf_) {}
charToken::~charToken() {}
int charToken::type() const {
return tokenType::char_;
}
token_t* charToken::clone() const {
return new charToken(origin, encoding, value, udf);
}
void charToken::print(io::output &out) const {
if (encoding & encodingType::u) {
out << 'u';
} else if (encoding & encodingType::U) {
out << 'U';
} else if (encoding & encodingType::L) {
out << 'L';
}
out << '\'' << escape(value, '\'') << '\'' << udf;
}
}
}
| 450 |
8,194 | /*
* Copyright 2016 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hippo.ehviewer.client;
import android.util.Log;
import com.hippo.ehviewer.EhDB;
import com.hippo.ehviewer.client.data.GalleryInfo;
import com.hippo.ehviewer.dao.Filter;
import java.util.ArrayList;
import java.util.List;
public final class EhFilter {
private static final String TAG = EhFilter.class.getSimpleName();
public static final int MODE_TITLE = 0;
public static final int MODE_UPLOADER = 1;
public static final int MODE_TAG = 2;
public static final int MODE_TAG_NAMESPACE = 3;
private final List<Filter> mTitleFilterList = new ArrayList<>();
private final List<Filter> mUploaderFilterList = new ArrayList<>();
private final List<Filter> mTagFilterList = new ArrayList<>();
private final List<Filter> mTagNamespaceFilterList = new ArrayList<>();
private static EhFilter sInstance;
public static EhFilter getInstance() {
if (sInstance == null) {
sInstance = new EhFilter();
}
return sInstance;
}
private EhFilter() {
List<Filter> list = EhDB.getAllFilter();
for (int i = 0, n = list.size(); i < n; i++) {
Filter filter = list.get(i);
switch (filter.mode) {
case MODE_TITLE:
filter.text = filter.text.toLowerCase();
mTitleFilterList.add(filter);
break;
case MODE_UPLOADER:
mUploaderFilterList.add(filter);
break;
case MODE_TAG:
filter.text = filter.text.toLowerCase();
mTagFilterList.add(filter);
break;
case MODE_TAG_NAMESPACE:
filter.text = filter.text.toLowerCase();
mTagNamespaceFilterList.add(filter);
break;
default:
Log.d(TAG, "Unknown mode: " + filter.mode);
break;
}
}
}
public List<Filter> getTitleFilterList() {
return mTitleFilterList;
}
public List<Filter> getUploaderFilterList() {
return mUploaderFilterList;
}
public List<Filter> getTagFilterList() {
return mTagFilterList;
}
public List<Filter> getTagNamespaceFilterList() {
return mTagNamespaceFilterList;
}
public synchronized void addFilter(Filter filter) {
// enable filter by default before it is added to database
filter.enable = true;
EhDB.addFilter(filter);
switch (filter.mode) {
case MODE_TITLE:
filter.text = filter.text.toLowerCase();
mTitleFilterList.add(filter);
break;
case MODE_UPLOADER:
mUploaderFilterList.add(filter);
break;
case MODE_TAG:
filter.text = filter.text.toLowerCase();
mTagFilterList.add(filter);
break;
case MODE_TAG_NAMESPACE:
filter.text = filter.text.toLowerCase();
mTagNamespaceFilterList.add(filter);
break;
default:
Log.d(TAG, "Unknown mode: " + filter.mode);
break;
}
}
public synchronized void triggerFilter(Filter filter) {
EhDB.triggerFilter(filter);
}
public synchronized void deleteFilter(Filter filter) {
EhDB.deleteFilter(filter);
switch (filter.mode) {
case MODE_TITLE:
mTitleFilterList.remove(filter);
break;
case MODE_UPLOADER:
mUploaderFilterList.remove(filter);
break;
case MODE_TAG:
mTagFilterList.remove(filter);
break;
case MODE_TAG_NAMESPACE:
mTagNamespaceFilterList.remove(filter);
break;
default:
Log.d(TAG, "Unknown mode: " + filter.mode);
break;
}
}
public synchronized boolean needTags() {
return 0 != mTagFilterList.size() || 0 != mTagNamespaceFilterList.size();
}
public synchronized boolean filterTitle(GalleryInfo info) {
if (null == info) {
return false;
}
// Title
String title = info.title;
List<Filter> filters = mTitleFilterList;
if (null != title && filters.size() > 0) {
for (int i = 0, n = filters.size(); i < n; i++) {
if (filters.get(i).enable && title.toLowerCase().contains(filters.get(i).text)) {
return false;
}
}
}
return true;
}
public synchronized boolean filterUploader(GalleryInfo info) {
if (null == info) {
return false;
}
// Uploader
String uploader = info.uploader;
List<Filter> filters = mUploaderFilterList;
if (null != uploader && filters.size() > 0) {
for (int i = 0, n = filters.size(); i < n; i++) {
if (filters.get(i).enable && uploader.equals(filters.get(i).text)) {
return false;
}
}
}
return true;
}
private boolean matchTag(String tag, String filter) {
if (null == tag || null == filter) {
return false;
}
String tagNamespace;
String tagName;
String filterNamespace;
String filterName;
int index = tag.indexOf(':');
if (index < 0) {
tagNamespace = null;
tagName = tag;
} else {
tagNamespace = tag.substring(0, index);
tagName = tag.substring(index + 1);
}
index = filter.indexOf(':');
if (index < 0) {
filterNamespace = null;
filterName = filter;
} else {
filterNamespace = filter.substring(0, index);
filterName = filter.substring(index + 1);
}
if (null != tagNamespace && null != filterNamespace &&
!tagNamespace.equals(filterNamespace)) {
return false;
}
if (!tagName.equals(filterName)) {
return false;
}
return true;
}
public synchronized boolean filterTag(GalleryInfo info) {
if (null == info) {
return false;
}
// Tag
String[] tags = info.simpleTags;
List<Filter> filters = mTagFilterList;
if (null != tags && filters.size() > 0) {
for (String tag: tags) {
for (int i = 0, n = filters.size(); i < n; i++) {
if (filters.get(i).enable && matchTag(tag, filters.get(i).text)) {
return false;
}
}
}
}
return true;
}
private boolean matchTagNamespace(String tag, String filter) {
if (null == tag || null == filter) {
return false;
}
String tagNamespace;
int index = tag.indexOf(':');
if (index >= 0) {
tagNamespace = tag.substring(0, index);
return tagNamespace.equals(filter);
} else {
return false;
}
}
public synchronized boolean filterTagNamespace(GalleryInfo info) {
if (null == info) {
return false;
}
String[] tags = info.simpleTags;
List<Filter> filters = mTagNamespaceFilterList;
if (null != tags && filters.size() > 0) {
for (String tag: tags) {
for (int i = 0, n = filters.size(); i < n; i++) {
if (filters.get(i).enable && matchTagNamespace(tag, filters.get(i).text)) {
return false;
}
}
}
}
return true;
}
}
| 4,129 |
400 | /*
* xcam_smart_result.h - smart result(meta data)
*
* Copyright (c) 2016-2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: <NAME> <<EMAIL>>
*/
#ifndef C_XCAM_SMART_RESULT_H
#define C_XCAM_SMART_RESULT_H
#include <stdlib.h>
#include <stdint.h>
#include <stddef.h>
#include <base/xcam_3a_result.h>
XCAM_BEGIN_DECLARE
typedef struct _XCamFaceInfo {
uint32_t id;
uint32_t pos_x;
uint32_t pos_y;
uint32_t width;
uint32_t height;
uint32_t factor;
uint32_t landmark[128];
} XCamFaceInfo;
/*
* Face detection result
* head.type = XCAM_3A_RESULT_FACE_DETECTION;
* head.process_type = XCAM_IMAGE_PROCESS_POST;
* head.destroy = free fd result.
*/
typedef struct _XCamFDResult {
XCam3aResultHead head;
uint32_t face_num;
XCamFaceInfo faces[0];
} XCamFDResult;
/*
* Digital Video Stabilizer result
* head.type = XCAM_3A_RESULT_DVS;
* head.process_type = XCAM_IMAGE_PROCESS_POST;
* head.destroy = free dvs result.
*/
typedef struct _XCamDVSResult {
XCam3aResultHead head;
int frame_id;
int frame_width;
int frame_height;
double proj_mat[9];
} XCamDVSResult;
XCAM_END_DECLARE
#endif //C_XCAM_SMART_RESULT_H
| 763 |
1,755 | <gh_stars>1000+
/*=========================================================================
Program: Visualization Toolkit
Module: TestGPURayCastNearestDataTypesMIP.cxx
Copyright (c) <NAME>, <NAME>, <NAME>
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
// This test volume renders the same dataset using 8 different data types
// (unsigned char, signed char, unsigned short, signed short, unsigned int
// int, float, and double). It uses MIP. The extents of
// the dataset are (0, 114, 0, 100, 0, 74).
// The color transfer function is piecewise constant (sharpness=1.0) and
// the interpolation is set to nearest (initial value)
#include "vtkCamera.h"
#include "vtkColorTransferFunction.h"
#include "vtkGPUVolumeRayCastMapper.h"
#include "vtkImageShiftScale.h"
#include "vtkPiecewiseFunction.h"
#include "vtkRegressionTestImage.h"
#include "vtkRenderWindow.h"
#include "vtkRenderWindowInteractor.h"
#include "vtkRenderer.h"
#include "vtkTestUtilities.h"
#include "vtkTextActor.h"
#include "vtkTextProperty.h"
#include "vtkTransform.h"
#include "vtkVolumeProperty.h"
#include "vtkXMLImageDataReader.h"
int TestGPURayCastNearestDataTypesMIP(int argc, char* argv[])
{
cout << "CTEST_FULL_OUTPUT (Avoid ctest truncation of output)" << endl;
char* cfname = vtkTestUtilities::ExpandDataFileName(argc, argv, "Data/vase_1comp.vti");
vtkXMLImageDataReader* reader = vtkXMLImageDataReader::New();
reader->SetFileName(cfname);
delete[] cfname;
vtkImageShiftScale* shiftScale[4][2];
vtkColorTransferFunction* color[4][2];
vtkPiecewiseFunction* opacity[4][2];
// unsigned char
shiftScale[0][0] = vtkImageShiftScale::New();
shiftScale[0][0]->SetInputConnection(reader->GetOutputPort());
color[0][0] = vtkColorTransferFunction::New();
color[0][0]->AddRGBPoint(0, 0, 0, 1, 0.5, 1.0);
color[0][0]->AddRGBPoint(255, 0, 1, 0);
opacity[0][0] = vtkPiecewiseFunction::New();
opacity[0][0]->AddPoint(0, 0);
opacity[0][0]->AddPoint(255, 1);
// signed char
shiftScale[0][1] = vtkImageShiftScale::New();
shiftScale[0][1]->SetInputConnection(shiftScale[0][0]->GetOutputPort());
shiftScale[0][1]->SetShift(-128);
shiftScale[0][1]->SetOutputScalarType(15);
color[0][1] = vtkColorTransferFunction::New();
color[0][1]->AddRGBPoint(-128, 0, 0, 1, 0.5, 1.0);
color[0][1]->AddRGBPoint(127, 0, 1, 0);
opacity[0][1] = vtkPiecewiseFunction::New();
opacity[0][1]->AddPoint(-128, 0);
opacity[0][1]->AddPoint(127, 1);
// unsigned short
shiftScale[1][0] = vtkImageShiftScale::New();
shiftScale[1][0]->SetInputConnection(reader->GetOutputPort());
shiftScale[1][0]->SetScale(256);
shiftScale[1][0]->SetOutputScalarTypeToUnsignedShort();
color[1][0] = vtkColorTransferFunction::New();
color[1][0]->AddRGBPoint(0, 0, 0, 1, 0.5, 1.0);
color[1][0]->AddRGBPoint(65535, 0, 1, 0);
opacity[1][0] = vtkPiecewiseFunction::New();
opacity[1][0]->AddPoint(0, 0);
opacity[1][0]->AddPoint(65535, 1);
// short
shiftScale[1][1] = vtkImageShiftScale::New();
shiftScale[1][1]->SetInputConnection(shiftScale[1][0]->GetOutputPort());
shiftScale[1][1]->SetShift(-32768);
shiftScale[1][1]->SetOutputScalarTypeToShort();
color[1][1] = vtkColorTransferFunction::New();
color[1][1]->AddRGBPoint(-32768, 0, 0, 1, 0.5, 1.0);
color[1][1]->AddRGBPoint(32767, 0, 1, 0);
opacity[1][1] = vtkPiecewiseFunction::New();
opacity[1][1]->AddPoint(-32768, 0);
opacity[1][1]->AddPoint(32767, 1);
// unsigned int
shiftScale[2][0] = vtkImageShiftScale::New();
shiftScale[2][0]->SetInputConnection(reader->GetOutputPort());
shiftScale[2][0]->SetScale(16777216);
shiftScale[2][0]->SetOutputScalarTypeToUnsignedInt();
color[2][0] = vtkColorTransferFunction::New();
color[2][0]->AddRGBPoint(0, 0, 0, 1, 0.5, 1.0);
color[2][0]->AddRGBPoint(VTK_UNSIGNED_INT_MAX, 0, 1, 0);
opacity[2][0] = vtkPiecewiseFunction::New();
opacity[2][0]->AddPoint(0, 0);
opacity[2][0]->AddPoint(VTK_UNSIGNED_INT_MAX, 1);
// int
shiftScale[2][1] = vtkImageShiftScale::New();
shiftScale[2][1]->SetInputConnection(shiftScale[2][0]->GetOutputPort());
shiftScale[2][1]->SetShift(VTK_INT_MIN);
shiftScale[2][1]->SetOutputScalarTypeToInt();
color[2][1] = vtkColorTransferFunction::New();
color[2][1]->AddRGBPoint(VTK_INT_MIN, 0, 0, 1, 0.5, 1.0);
color[2][1]->AddRGBPoint(VTK_INT_MAX, 0, 1, 0);
opacity[2][1] = vtkPiecewiseFunction::New();
opacity[2][1]->AddPoint(VTK_INT_MIN, 0);
opacity[2][1]->AddPoint(VTK_INT_MAX, 1);
// float [-1 1]
vtkImageShiftScale* shiftScale_3_0_pre = vtkImageShiftScale::New();
shiftScale_3_0_pre->SetInputConnection(reader->GetOutputPort());
shiftScale_3_0_pre->SetScale(0.0078125);
shiftScale_3_0_pre->SetOutputScalarTypeToFloat();
shiftScale[3][0] = vtkImageShiftScale::New();
shiftScale[3][0]->SetInputConnection(shiftScale_3_0_pre->GetOutputPort());
shiftScale[3][0]->SetShift(-1.0);
shiftScale[3][0]->SetOutputScalarTypeToFloat();
color[3][0] = vtkColorTransferFunction::New();
color[3][0]->AddRGBPoint(-1.0, 0, 0, 1, 0.5, 1.0);
color[3][0]->AddRGBPoint(1.0, 0, 1, 0);
opacity[3][0] = vtkPiecewiseFunction::New();
opacity[3][0]->AddPoint(-1.0, 0);
opacity[3][0]->AddPoint(1.0, 1);
// double [-1000 3000]
vtkImageShiftScale* shiftScale_3_1_pre = vtkImageShiftScale::New();
shiftScale_3_1_pre->SetInputConnection(reader->GetOutputPort());
shiftScale_3_1_pre->SetScale(15.625);
shiftScale_3_1_pre->SetOutputScalarTypeToDouble();
shiftScale[3][1] = vtkImageShiftScale::New();
shiftScale[3][1]->SetInputConnection(shiftScale_3_1_pre->GetOutputPort());
shiftScale[3][1]->SetShift(-1000);
shiftScale[3][1]->SetOutputScalarTypeToDouble();
color[3][1] = vtkColorTransferFunction::New();
color[3][1]->AddRGBPoint(-1000, 0, 0, 1, 0.5, 1.0);
color[3][1]->AddRGBPoint(3000, 0, 1, 0);
opacity[3][1] = vtkPiecewiseFunction::New();
opacity[3][1]->AddPoint(-1000, 0);
opacity[3][1]->AddPoint(3000, 1);
vtkRenderer* ren1 = vtkRenderer::New();
vtkRenderWindow* renWin = vtkRenderWindow::New();
renWin->AddRenderer(ren1);
renWin->SetSize(600, 300);
vtkRenderWindowInteractor* iren = vtkRenderWindowInteractor::New();
iren->SetRenderWindow(renWin);
renWin->Render();
vtkGPUVolumeRayCastMapper* volumeMapper[4][2];
vtkVolumeProperty* volumeProperty[4][2];
vtkVolume* volume[4][2];
vtkTransform* userMatrix[4][2];
int i = 0;
while (i < 4)
{
int j = 0;
while (j < 2)
{
volumeMapper[i][j] = vtkGPUVolumeRayCastMapper::New();
volumeMapper[i][j]->SetBlendModeToMaximumIntensity();
volumeMapper[i][j]->SetInputConnection(shiftScale[i][j]->GetOutputPort());
volumeProperty[i][j] = vtkVolumeProperty::New();
volumeProperty[i][j]->SetColor(color[i][j]);
volumeProperty[i][j]->SetScalarOpacity(opacity[i][j]);
volumeProperty[i][j]->SetInterpolationType(VTK_NEAREST_INTERPOLATION);
volume[i][j] = vtkVolume::New();
volume[i][j]->SetMapper(volumeMapper[i][j]);
volume[i][j]->SetProperty(volumeProperty[i][j]);
userMatrix[i][j] = vtkTransform::New();
userMatrix[i][j]->PostMultiply();
userMatrix[i][j]->Identity();
userMatrix[i][j]->Translate(i * 120, j * 120, 0);
volume[i][j]->SetUserTransform(userMatrix[i][j]);
ren1->AddViewProp(volume[i][j]);
++j;
}
++i;
}
int valid = volumeMapper[0][1]->IsRenderSupported(renWin, volumeProperty[0][1]);
int retVal;
if (valid)
{
iren->Initialize();
ren1->SetBackground(0.1, 0.4, 0.2);
ren1->ResetCamera();
ren1->GetActiveCamera()->Zoom(2.0);
renWin->Render();
retVal = vtkTesting::Test(argc, argv, renWin, 75);
if (retVal == vtkRegressionTester::DO_INTERACTOR)
{
iren->Start();
}
}
else
{
retVal = vtkTesting::PASSED;
cout << "Required extensions not supported." << endl;
}
iren->Delete();
renWin->Delete();
ren1->Delete();
shiftScale_3_1_pre->Delete();
shiftScale_3_0_pre->Delete();
i = 0;
while (i < 4)
{
int j = 0;
while (j < 2)
{
volumeMapper[i][j]->Delete();
volumeProperty[i][j]->Delete();
volume[i][j]->Delete();
userMatrix[i][j]->Delete();
shiftScale[i][j]->Delete();
color[i][j]->Delete();
opacity[i][j]->Delete();
++j;
}
++i;
}
reader->Delete();
if ((retVal == vtkTesting::PASSED) || (retVal == vtkTesting::DO_INTERACTOR))
{
return 0;
}
else
{
return 1;
}
}
| 3,510 |
521 | #include <elle/test.hh>
#include <elle/Printable.hh>
#include <iostream>
#include <sstream>
namespace elle
{
class ComplexNumber : public Printable
{
public:
ComplexNumber(int real, int imag);
~ComplexNumber();
void
set(int real, int imag);
void
print(std::ostream& stream) const;
private:
int _real, _imag;
};
ComplexNumber::ComplexNumber(int real, int imag)
{
_real = real;
_imag = imag;
}
ComplexNumber::~ComplexNumber()
{
}
void
ComplexNumber::set(int real, int imag)
{
_real = real;
_imag = imag;
}
void
ComplexNumber::print(std::ostream& stream) const
{
if (_imag >= 0)
stream << _real << " + " << _imag << "j";
if (_imag < 0)
stream << _real << " - " << abs(_imag) << "j";
}
}
static
void
test_generic(int x, int y)
{
elle::ComplexNumber testPositive(x, y);
std::stringstream output, expected;
if (y >= 0)
expected << x << " + " << y << "j";
if (y < 0)
expected << x << " - " << abs(y) << "j";
testPositive.print(output);
BOOST_CHECK_EQUAL(output.str(), expected.str());
}
ELLE_TEST_SUITE()
{
boost::unit_test::test_suite* basics = BOOST_TEST_SUITE("Basics");
boost::unit_test::framework::master_test_suite().add(basics);
basics->add(BOOST_TEST_CASE(std::bind(test_generic, 1, 2)));
basics->add(BOOST_TEST_CASE(std::bind(test_generic, 3, -5)));
}
| 574 |
778 | //--------------------------------------------------------------------------------------------------------------------//
// //
// Tuplex: Blazing Fast Python Data Science //
// //
// //
// (c) 2017 - 2021, Tuplex team //
// Created by <NAME> first on 1/1/2021 //
// License: Apache 2.0 //
//--------------------------------------------------------------------------------------------------------------------//
#include <physical/SampleProcessor.h>
#include <logical/UDFOperator.h>
#include <logical/MapColumnOperator.h>
#include <logical/WithColumnOperator.h>
#include <logical/ResolveOperator.h>
#include <logical/FileInputOperator.h>
#include <Utils.h>
#include <vector>
#include <string>
#include <stdexcept>
namespace tuplex {
void SampleProcessor::releasePythonObjects() {
if(python::isInterpreterRunning()) {
python::lockGIL();
// release UDFs
for(auto keyval : _TUPLEXs) {
Py_XDECREF(keyval.second);
}
python::unlockGIL();
}
_TUPLEXs.clear();
}
void SampleProcessor::cachePythonUDFs() {
// lazy deserialize
if(!_udfsCached) {
assert(python::isInterpreterRunning());
for(auto op : _operators) {
if(hasUDF(op)) {
UDFOperator *udfop = dynamic_cast<UDFOperator*>(op);
assert(udfop);
auto pickled_code = udfop->getUDF().getPickledCode(); // this has internal python locks
python::lockGIL();
auto pFunc = python::deserializePickledFunction(python::getMainModule(),
pickled_code.c_str(), pickled_code.size());
python::unlockGIL();
_TUPLEXs[op->getID()] = pFunc;
}
}
_udfsCached = true;
}
}
std::string formatTraceback(const std::string& functionName,
const std::string& exceptionClass,
const std::string& message,
long lineNo) {
std::stringstream ss;
ss<<"line "<<lineNo<<" in "<<functionName<<":"<<"\n ---> "<<exceptionClass<<": "<<message;
return ss.str();
}
// get dictmode from fas166
// AND get
python::PythonCallResult SampleProcessor::applyOperator(LogicalOperator *op, PyObject *pyRow) {
python::PythonCallResult pcr;
// // check type of input row (either tuple OR dict)
// if(PyTuple_Check(pyRow)) {
// // tuple as input
// } else
// // dict as input (not yet supported!)
// throw std::runtime_error("dict as input not yet supported!!!");
// }
// extract UDF relevant information
PyObject *TUPLEX = nullptr;
bool dictMode = false;
std::vector<std::string> columns;
if(hasUDF(op)) {
TUPLEX = _TUPLEXs.at(op->getID());
UDFOperator* udfop = dynamic_cast<UDFOperator*>(op);
assert(udfop);
dictMode = udfop->getUDF().dictMode();
columns = udfop->columns();
}
// apply operation
switch(op->type()) {
// @Todo: solve for dict mode OR tuple mode...
// --> makes this a little more complicated...
// --> check for each function (using AST visitor), whether it is in dict mode or not
case LogicalOperatorType::MAP: {
Py_XINCREF(pyRow); // +1, for function to consume
// call function depending on mode
// Note: if result is not taken, decrease ref!
pcr = dictMode ? python::callFunctionWithDictEx(TUPLEX, pyRow, op->parent()->columns()) :
python::callFunctionEx(TUPLEX, pyRow);
break;
}
case LogicalOperatorType::FILTER: {
// return bool res here
Py_XINCREF(pyRow); // +1, for function to consume
// call function depending on mode
pcr = dictMode ? python::callFunctionWithDictEx(TUPLEX, pyRow, op->parent()->columns()) :
python::callFunctionEx(TUPLEX, pyRow);
break;
}
case LogicalOperatorType::WITHCOLUMN: {
PyObject* pyRes = nullptr;
// get cached UDF
auto wop = ((WithColumnOperator*)op);
// apply UDF and check for errors...
auto idx = wop->getColumnIndex();
auto num_columns = columns.size();
Py_XINCREF(pyRow); // count +1, because call Function consumes 1
pcr = dictMode ? python::callFunctionWithDictEx(TUPLEX, pyRow, op->parent()->columns()) :
python::callFunctionEx(TUPLEX, pyRow);
auto pyColRes = pcr.res;
if(pcr.exceptionCode == ExceptionCode::SUCCESS) {
assert(pyColRes);
pyRes = PyTuple_New(num_columns);
for(unsigned i = 0; i < num_columns; i++) {
if(i != idx) {
assert(i < PyTuple_Size(pyRow));
PyTuple_SET_ITEM(pyRes, i, PyTuple_GET_ITEM(pyRow, i));
}
else
PyTuple_SET_ITEM(pyRes, i, pyColRes);
}
}
pcr.res = pyRes;
break;
}
// @Todo: should compute graph for all samples too and flag which ones would get thrown out by filter...
case LogicalOperatorType::MAPCOLUMN: {
PyObject* pyRes = nullptr;
auto idx = ((MapColumnOperator*)op)->getColumnIndex();
PyObject *pyElement = PyTuple_GetItem(pyRow, idx);
PyObject *pyArg = PyTuple_New(1);
PyTuple_SET_ITEM(pyArg, 0, pyElement);
// only in tuple mode!
pcr = python::callFunctionEx(TUPLEX, pyArg);
auto pyColRes = pcr.res;
if(pcr.exceptionCode == ExceptionCode::SUCCESS) {
pyRes = PyTuple_New(PyTuple_Size(pyRow));
for(unsigned i = 0; i < PyTuple_Size(pyRow); ++i) {
if(i != idx)
PyTuple_SET_ITEM(pyRes, i, PyTuple_GET_ITEM(pyRow, i));
else
PyTuple_SET_ITEM(pyRes, i, pyColRes);
}
}
// output
pcr.res = pyRes;
break;
}
default:
throw "unknown operator " + op->name() + " seen in sampling procedure";
}
return pcr;
}
python::PythonCallResult SampleProcessor::applyMap(bool dictMode, PyObject *TUPLEX, PyObject *pyRow,
const std::vector<std::string> &columns) {
// debug: check & assert refcounts
#ifndef NDEBUG
auto oldRowRefCnt = pyRow->ob_refcnt;
auto oldUDFRefCnt = TUPLEX->ob_refcnt;
#endif
Py_XINCREF(pyRow);
// call function depending on mode
// Note: if result is not taken, decrease ref!
auto pcr = dictMode ? python::callFunctionWithDictEx(TUPLEX, pyRow, columns) :
python::callFunctionEx(TUPLEX, pyRow);
// debug assert refcnts, should not be cleared
#ifndef NDEBUG
assert(pyRow->ob_refcnt >= oldRowRefCnt);
assert(TUPLEX->ob_refcnt >= oldUDFRefCnt);
#endif
return pcr;
}
python::PythonCallResult SampleProcessor::applyMapColumn(bool dictMode, PyObject *TUPLEX, PyObject *pyRow,
int idx) {
assert(!dictMode); // no dict mode allowed in mapColumn!
// debug: check & assert refcounts
#ifndef NDEBUG
auto oldRowRefCnt = pyRow->ob_refcnt;
auto oldUDFRefCnt = TUPLEX->ob_refcnt;
#endif
Py_XINCREF(pyRow);
PyObject* pyRes = nullptr;
PyObject *pyElement = PyTuple_GetItem(pyRow, idx);
PyObject *pyArg = PyTuple_New(1);
PyTuple_SET_ITEM(pyArg, 0, pyElement);
// only in tuple mode!
auto pcr = python::callFunctionEx(TUPLEX, pyArg);
auto pyColRes = pcr.res;
if(pcr.exceptionCode == ExceptionCode::SUCCESS) {
pyRes = PyTuple_New(PyTuple_Size(pyRow));
for(unsigned i = 0; i < PyTuple_Size(pyRow); ++i) {
if(i != idx)
PyTuple_SET_ITEM(pyRes, i, PyTuple_GET_ITEM(pyRow, i));
else
PyTuple_SET_ITEM(pyRes, i, pyColRes);
}
}
// output
pcr.res = pyRes;
// debug assert refcnts, should not be cleared
#ifndef NDEBUG
assert(pyRow->ob_refcnt >= oldRowRefCnt);
assert(TUPLEX->ob_refcnt >= oldUDFRefCnt);
#endif
return pcr;
}
python::PythonCallResult SampleProcessor::applyWithColumn(bool dictMode, PyObject *TUPLEX, PyObject *pyRow,
const std::vector<std::string> &columns, int idx) {
// debug: check & assert refcounts
#ifndef NDEBUG
auto oldRowRefCnt = pyRow->ob_refcnt;
auto oldUDFRefCnt = TUPLEX->ob_refcnt;
#endif
PyObject* pyRes = nullptr;
auto num_columns = columns.size();
Py_XINCREF(pyRow); // required because of the consumption below.
// call function depending on mode
// Note: if result is not taken, decrease ref!
auto pcr = dictMode ? python::callFunctionWithDictEx(TUPLEX, pyRow, columns) :
python::callFunctionEx(TUPLEX, pyRow);
auto pyColRes = pcr.res;
if(pcr.exceptionCode == ExceptionCode::SUCCESS) {
assert(pyColRes);
pyRes = PyTuple_New(num_columns);
for(unsigned i = 0; i < num_columns; i++) {
if(i != idx) {
assert(i < PyTuple_Size(pyRow));
auto item = PyTuple_GET_ITEM(pyRow, i);
PyTuple_SET_ITEM(pyRes, i, item);
}
else
PyTuple_SET_ITEM(pyRes, i, pyColRes);
}
}
pcr.res = pyRes;
// debug assert refcnts, should not be cleared
#ifndef NDEBUG
assert(pyRow->ob_refcnt >= oldRowRefCnt);
assert(TUPLEX->ob_refcnt >= oldUDFRefCnt);
#endif
return pcr;
}
// trace row w/o resolvers/ignore applied
SampleProcessor::TraceResult SampleProcessor::traceRow(const tuplex::Row &row) {
TraceResult tr;
// input row is received, apply all operators in this processor to it
python::lockGIL();
PyObject* rowObj = python::rowToPython(row);
for(auto op : _operators) {
// if UDFOperator, decode whether it's dict mode or not
// extract UDF relevant information
PyObject *TUPLEX = nullptr;
bool dictMode = false;
std::vector<std::string> columns;
if(hasUDF(op)) {
TUPLEX = _TUPLEXs.at(op->getID());
UDFOperator* udfop = dynamic_cast<UDFOperator*>(op);
assert(udfop);
dictMode = udfop->getUDF().dictMode();
columns = udfop->parent()->columns(); // get the parents (output) columns, they're the current operators input columns.
}
switch(op->type()) {
case LogicalOperatorType::FILEINPUT:
case LogicalOperatorType::PARALLELIZE:
case LogicalOperatorType::TAKE:
case LogicalOperatorType::FILEOUTPUT:
break; // ignore
case LogicalOperatorType::MAP: {
// there's always output for map, i.e. apply
auto pcr = applyMap(dictMode, TUPLEX, rowObj, columns);
// check what result is
if(ExceptionCode::SUCCESS == pcr.exceptionCode)
rowObj = pcr.res;
else {
tr.exceptionTraceback = formatTraceback(pcr.functionName,
pcr.exceptionClass,
pcr.exceptionMessage,
pcr.exceptionLineNo);
tr.outputRow = python::pythonToRow(rowObj);
tr.ec = pcr.exceptionCode;
tr.lastOperatorID = op->getID();
python::unlockGIL();
return tr;
}
break;
}
case LogicalOperatorType::MAPCOLUMN: {
auto idx = dynamic_cast<MapColumnOperator*>(op)->getColumnIndex();
auto pcr = applyMapColumn(dictMode, TUPLEX, rowObj, idx);
// check what result is
if(ExceptionCode::SUCCESS == pcr.exceptionCode)
rowObj = pcr.res;
else {
tr.exceptionTraceback = formatTraceback(pcr.functionName,
pcr.exceptionClass,
pcr.exceptionMessage,
pcr.exceptionLineNo);
tr.outputRow = python::pythonToRow(rowObj);
tr.ec = pcr.exceptionCode;
tr.lastOperatorID = op->getID();
python::unlockGIL();
return tr;
}
break;
}
case LogicalOperatorType::WITHCOLUMN: {
auto idx = dynamic_cast<WithColumnOperator*>(op)->getColumnIndex();
auto pcr = applyWithColumn(dictMode, TUPLEX, rowObj, columns, idx);
// check what result is
if(ExceptionCode::SUCCESS == pcr.exceptionCode)
rowObj = pcr.res;
else {
tr.exceptionTraceback = formatTraceback(pcr.functionName,
pcr.exceptionClass,
pcr.exceptionMessage,
pcr.exceptionLineNo);
tr.outputRow = python::pythonToRow(rowObj);
tr.ec = pcr.exceptionCode;
tr.lastOperatorID = op->getID();
python::unlockGIL();
return tr;
}
break;
}
case LogicalOperatorType::FILTER: {
// special case: reuse map for this
auto pcr = applyMap(dictMode, TUPLEX, rowObj, columns);
// check what result is
if(ExceptionCode::SUCCESS == pcr.exceptionCode) {
// check what the result is
auto res = python::pythonToRow(pcr.res);
assert(res.getNumColumns() == 1);
// if false, then filtered out. I.e. stop & return trace result!
if(!res.getBoolean(0)) {
tr.outputRow = Row();
tr.ec = ExceptionCode::SUCCESS;
tr.lastOperatorID = op->getID();
python::unlockGIL();
return tr;
}
// else nothing todo, continue going through the pipeline :)
}
else {
tr.exceptionTraceback = formatTraceback(pcr.functionName,
pcr.exceptionClass,
pcr.exceptionMessage,
pcr.exceptionLineNo);
tr.outputRow = python::pythonToRow(rowObj);
tr.ec = pcr.exceptionCode;
tr.lastOperatorID = op->getID();
python::unlockGIL();
return tr;
}
break;
}
case LogicalOperatorType::UNKNOWN:
case LogicalOperatorType::RESOLVE:
case LogicalOperatorType::IGNORE:
case LogicalOperatorType::JOIN:
case LogicalOperatorType::AGGREGATE:
case LogicalOperatorType::CACHE:
default: {
break;
}
}
}
python::unlockGIL();
return tr;
}
// @TODO: rewrite this function to use apply operator AND work for functions with dictionaries...
// ==> i.e. the extractSqft example
ExceptionSample SampleProcessor::generateExceptionSample(const Row& row, bool excludeAvailableResolvers) noexcept {
using namespace std;
ExceptionSample es;
// for some reason GILState blocks here, use restore thread thus...
auto tr = traceRow(row); // always trace without accounting for the resolver
// check if the given operator is actually a resolver and whether exception was thrown in res
// ==> try to apply res
if(!excludeAvailableResolvers && tr.ec != ExceptionCode::SUCCESS) {
// apply resolvers if necessary!
Row work_row = tr.outputRow;
// get last operator ID
int index = 0;
while(index < _operators.size() && _operators[index]->getID() != tr.lastOperatorID)
index++;
assert(index < _operators.size());
assert(_operators[index]->getID() == tr.lastOperatorID);
// now check if resolver is present, if so try to resolve or find the one which causes the exception!
while(index + 1 < _operators.size() && _operators[index + 1]->type() == LogicalOperatorType::RESOLVE) {
auto op = dynamic_cast<ResolveOperator*>(_operators[index + 1]);
assert(op && op->type() == LogicalOperatorType::RESOLVE);
#warning " for some reason in the resolve t"
// get resolver UDF
auto TUPLEX = _TUPLEXs.at(op->getID());
assert(TUPLEX);
auto dictMode = op->getUDF().dictMode();
auto columns = op->getNormalParent()->columns(); // get the parents (output) columns, they're the current operators input columns.
// check whether for this exception code a resolver exists, if not => continue!
if(op->ecCode() == tr.ec) {
python::lockGIL();
// just apply function, enough for the traceback...
// ==> function is not traced through resolvers!
auto pyRow = python::rowToPython(work_row);
auto pcr = dictMode ? python::callFunctionWithDictEx(TUPLEX, pyRow, columns) :
python::callFunctionEx(TUPLEX, pyRow);
// trace result!
tr.exceptionTraceback = formatTraceback(pcr.functionName,
pcr.exceptionClass,
pcr.exceptionMessage,
pcr.exceptionLineNo);
tr.outputRow = work_row;
tr.ec = pcr.exceptionCode;
tr.lastOperatorID = op->getID();
python::unlockGIL();
}
index++;
}
}
es.rows.push_back(tr.outputRow);
es.first_row_traceback = tr.exceptionTraceback;
// // compute over rows
// assert(!inRows.empty());
//
// vector<python::PythonCallResult> results;
// for(int j = 0; j < inRows.size(); ++j) {
// python::PythonCallResult pcr;
// auto work_row = inRows[j];
// for(int i = 0; i < max_idx + 1; ++i) {
// // only apply map/resolve, i.e. the trafos
// LogicalOperator* op = _operators[i];
//
// // skip parallelize and csv
// if(op->type() == LogicalOperatorType::CSV || op->type() == LogicalOperatorType::PARALLELIZE) {
// assert(operatorID != op->getID()); // make sure THEY ARE NOT THE CAUSE OF THE ERROR
// continue;
// }
//
// pcr = applyOperator(op, python::rowToPython(work_row));
// if(pcr.exceptionCode == ExceptionCode::SUCCESS)
// work_row = python::pythonToRow(pcr.res);
// else {
// // make sure op is the exception throwing operator!
// assert(op->getID() == operatorID);
// break;
// }
// }
//
// if(0 == j)
// es.first_row_traceback = formatTraceback(pcr.functionName,
// pcr.exceptionClass,
// pcr.exceptionMessage,
// pcr.exceptionLineNo);
//
// // push back to sample
// es.rows.emplace_back(work_row);
// }
// bool first_row = true;
// for(auto row : inRows) {
// Row work_row = row;
// ExceptionCode ec;
// PyObject *pyRes = nullptr;
// // go with row through pipeline
// for(int i = 0; i < max_idx + 1; ++i) {
// // only apply map/resolve, i.e. the trafos
// LogicalOperator* op = _operators[i];
//
//
//
// if(op->type() == LogicalOperatorType::MAP ||
// op->type() == LogicalOperatorType::RESOLVE) {
// pyRes = python::callFunction(_TUPLEXs.at(op->getID()), python::rowToPython(work_row), ec);
//
//
//#warning "there might also an issue here when it comes to resolvers because the first error-free resolver is applied!!!"
//
// // only overwrite if successful, i.e. resolvers may also throw exceptions...
// // note though, that resolvers apply only to the operator before, not the resolver before!
// if(ec == ExceptionCode::SUCCESS)
// work_row = python::pythonToRow(pyRes);
// }
//
// // apply MAPCOLUMN
// if(op->type() == LogicalOperatorType::MAPCOLUMN) {
// auto idx = dynamic_cast<MapColumnOperator*>(op)->getColumnIndex();
// PyObject* pyRow = python::rowToPython(work_row);
// PyObject *pyElement = PyTuple_GetItem(pyRow, idx);
// PyObject *pyArg = PyTuple_New(1);
// PyTuple_SET_ITEM(pyArg, 0, pyElement);
//
// pyRes = python::callFunction(_TUPLEXs.at(op->getID()), pyArg, ec);
// if(ec == ExceptionCode::SUCCESS) {
// auto pyRowRes = PyTuple_New(PyTuple_Size(pyRow));
// for(unsigned i = 0; i < PyTuple_Size(pyRow); ++i) {
// if(i != idx)
// PyTuple_SET_ITEM(pyRowRes, i, PyTuple_GET_ITEM(pyRow, i));
// else
// PyTuple_SET_ITEM(pyRowRes, i, pyRes);
// }
// work_row = python::pythonToRow(pyRowRes); // transformed row
// }
//
// Py_XDECREF(pyRow);
// }
//
// // apply WITHCOLUMN
// if(op->type() == LogicalOperatorType::WITHCOLUMN) {
//
// auto wop = dynamic_cast<WithColumnOperator*>(op);
// auto idx = wop->getColumnIndex();
// auto num_columns = wop->getColumns().size();
// PyObject* pyRow = python::rowToPython(work_row);
// Py_XINCREF(pyRow); // count +1, because call Function consumes 1
// pyRes = python::callFunction(_TUPLEXs.at(op->getID()), pyRow, ec);
//
// if(ec == ExceptionCode::SUCCESS) {
// auto pyRowRes = PyTuple_New(num_columns);
// for(unsigned i = 0; i < num_columns; i++) {
// if(i != idx) {
// assert(i < PyTuple_Size(pyRow));
// PyTuple_SET_ITEM(pyRowRes, i, PyTuple_GET_ITEM(pyRow, i));
// }
// else
// PyTuple_SET_ITEM(pyRowRes, i, pyRes);
// }
//
// work_row = python::pythonToRow(pyRowRes); // transformed row
// }
//
// // decref row & res (though they should be at 0)
// Py_XDECREF(pyRow);
// Py_XDECREF(pyRes);
// }
//
//
// // in debug mode check that filter is valid...
// // i.e. we know that the tuple went through the pipeline till operator with operatorID
//#ifndef NDEBUG
// if(op->type() == LogicalOperatorType::FILTER) {
// pyRes = python::callFunction(_TUPLEXs.at(op->getID()), python::rowToPython(work_row), ec);
// assert(ec == ExceptionCode::SUCCESS);
// assert(pyRes == Py_True || pyRes == Py_False);
// }
//#endif
// }
//
// es.rows.emplace_back(work_row);
//
// // now generate traceback + exceptions...
// if(first_row) {
// // apply max_idx operator & capture exception for first row
// // for other rows, simply save result before
// auto op = _operators[max_idx];
// python::PythonCallResult pcr;
//
// // what signature do operators take?
// // special case resolve, check on parent type
// LogicalOperatorType optype = op->type();
//
// // check what type of function signature it is
// bool singleColumnArg = false;
// int singleColumnIndex = -1;
// if(op->type() == LogicalOperatorType::MAPCOLUMN) {
// singleColumnArg = true;
// singleColumnIndex = dynamic_cast<MapColumnOperator*>(op)->getColumnIndex();
// }
//
// // check if resolve operator, then check if non-resolve parent is mapColumn
// if(op->type() == LogicalOperatorType::RESOLVE) {
// auto rop = dynamic_cast<ResolveOperator*>(op);
// auto parent = rop->getNormalParent();
// if(parent->type() == LogicalOperatorType::MAPCOLUMN) {
// singleColumnArg = true;
// singleColumnIndex = dynamic_cast<MapColumnOperator*>(parent)->getColumnIndex();
// }
// }
//
// if(singleColumnArg) {
// assert(singleColumnIndex >= 0);
// // extract element
// PyObject *pyRow = python::rowToPython(work_row);
// assert(PyTuple_Check(pyRow));
//
// // get element
// assert(singleColumnIndex < PyTuple_Size(pyRow));
// PyObject *pyElement = PyTuple_GetItem(pyRow, singleColumnIndex); // steals reference from Row
// PyObject *pySingleArg = PyTuple_New(1);
// PyTuple_SET_ITEM(pySingleArg, 0, pyElement); // steals reference from pyElement
// pcr = python::callFunctionEx(_TUPLEXs.at(op->getID()), pySingleArg);
//
// // element & arg steal reference from row. Therefore sufficient to release row only
// Py_XDECREF(pyRow);
//
// } else {
// // the whole row is the argument, i.e. call on it!
// pcr = python::callFunctionEx(_TUPLEXs.at(op->getID()), python::rowToPython(work_row));
// }
//
// es.first_row_traceback = formatTraceback(pcr.functionName,
// pcr.exceptionClass,
// pcr.exceptionMessage,
// pcr.exceptionLineNo);
//
// // in debug mode, validate result @Todo
// first_row = false;
// }
// }
return es;
}
std::vector<std::string> SampleProcessor::getColumnNames(int64_t operatorID) {
// find operator & return column names
auto it = std::find_if(_operators.begin(), _operators.end(), [operatorID](LogicalOperator* op) {
return op->getID() == operatorID;
});
if(it != _operators.end())
return (*it)->getDataSet()->columns();
// warn?
Logger::instance().defaultLogger().warn("accesing unknown operator " + std::to_string(operatorID) + " in sample processor");
return std::vector<std::string>();
}
LogicalOperator* SampleProcessor::getOperator(int64_t operatorID) {
// find operator & return column names
auto it = std::find_if(_operators.begin(), _operators.end(), [operatorID](LogicalOperator* op) {
return op->getID() == operatorID;
});
if(it != _operators.end())
return *it;
Logger::instance().defaultLogger().warn("accesing unknonw operator " + std::to_string(operatorID) + " in sample processor");
return nullptr;
}
int SampleProcessor::getOperatorIndex(int64_t operatorID) {
// find operator & return column names
auto it = std::find_if(_operators.begin(), _operators.end(), [operatorID](LogicalOperator* op) {
return op->getID() == operatorID;
});
if(it != _operators.end())
return it - _operators.begin();
Logger::instance().defaultLogger().warn("accesing unknonw operator " + std::to_string(operatorID) + " in sample processor");
return -1;
}
} | 17,485 |
622 | /*
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.pytorch.integration;
import ai.djl.ModelException;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.Shape;
import ai.djl.pytorch.engine.PtNDArray;
import ai.djl.pytorch.engine.PtNDManager;
import ai.djl.pytorch.engine.PtSymbolBlock;
import ai.djl.pytorch.jni.IValue;
import ai.djl.repository.zoo.Criteria;
import ai.djl.repository.zoo.ZooModel;
import ai.djl.training.util.ProgressBar;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.testng.Assert;
import org.testng.annotations.Test;
public class IValueTest {
@Test
public void testIValue() {
try (PtNDManager manager = (PtNDManager) NDManager.newBaseManager()) {
PtNDArray array1 = (PtNDArray) manager.zeros(new Shape(1));
PtNDArray array2 = (PtNDArray) manager.ones(new Shape(1));
try (IValue ivalue = IValue.from(array1)) {
Assert.assertTrue(ivalue.isTensor());
Assert.assertEquals(ivalue.getType(), "Tensor");
NDArray ret = ivalue.toTensor(manager);
Assert.assertEquals(ret, array1);
NDList list = ivalue.toNDList(manager);
Assert.assertEquals(list.size(), 1);
Assert.assertEquals(list.head(), array1);
}
try (IValue ivalue = IValue.from(true)) {
Assert.assertTrue(ivalue.isBoolean());
Assert.assertEquals(ivalue.getType(), "bool");
Assert.assertTrue(ivalue.toBoolean());
}
try (IValue ivalue = IValue.from(1)) {
Assert.assertTrue(ivalue.isLong());
Assert.assertEquals(ivalue.getType(), "int");
Assert.assertEquals(ivalue.toLong(), 1);
}
try (IValue ivalue = IValue.from(1d)) {
Assert.assertTrue(ivalue.isDouble());
Assert.assertEquals(ivalue.getType(), "float");
Assert.assertEquals(ivalue.toDouble(), 1d);
}
try (IValue ivalue = IValue.from("test")) {
Assert.assertTrue(ivalue.isString());
Assert.assertEquals(ivalue.getType(), "str");
Assert.assertEquals(ivalue.toStringValue(), "test");
}
try (IValue ivalue = IValue.listFrom(true, false)) {
Assert.assertTrue(ivalue.isList());
Assert.assertEquals(ivalue.getType(), "bool[]");
Assert.assertTrue(ivalue.isBooleanList());
Assert.assertEquals(ivalue.toBooleanArray(), new boolean[] {true, false});
}
try (IValue ivalue = IValue.listFrom(1, 2)) {
Assert.assertTrue(ivalue.isLongList());
Assert.assertEquals(ivalue.getType(), "int[]");
Assert.assertEquals(ivalue.toLongArray(), new long[] {1, 2});
}
try (IValue ivalue = IValue.listFrom(1d, 2d)) {
Assert.assertTrue(ivalue.isDoubleList());
Assert.assertEquals(ivalue.getType(), "float[]");
Assert.assertEquals(ivalue.toDoubleArray(), new double[] {1d, 2d});
}
try (IValue ivalue = IValue.listFrom(array1, array2)) {
Assert.assertTrue(ivalue.isTensorList());
Assert.assertEquals(ivalue.getType(), "Tensor[]");
NDArray[] ret = ivalue.toTensorArray(manager);
Assert.assertEquals(ret.length, 2);
NDList list = ivalue.toNDList(manager);
Assert.assertEquals(list.size(), 2);
Assert.assertEquals(list.head(), array1);
IValue[] iValues = ivalue.toIValueArray();
Assert.assertEquals(iValues.length, 2);
Assert.assertTrue(iValues[0].isTensor());
Arrays.stream(iValues).forEach(IValue::close);
}
Map<String, PtNDArray> map = new ConcurrentHashMap<>();
map.put("data1", array1);
map.put("data2", array2);
try (IValue ivalue = IValue.stringMapFrom(map)) {
Assert.assertTrue(ivalue.isMap());
Assert.assertEquals(ivalue.getType(), "Dict(str, Tensor)");
Map<String, IValue> ret = ivalue.toIValueMap();
Assert.assertEquals(ret.size(), 2);
NDList list = ivalue.toNDList(manager);
Assert.assertEquals(list.size(), 2);
Assert.assertEquals(list.get("data1"), array1);
}
try (IValue iv1 = IValue.from(1);
IValue iv2 = IValue.from(2);
IValue ivalue = IValue.listFrom(iv1, iv2)) {
Assert.assertTrue(ivalue.isList());
Assert.assertEquals(ivalue.getType(), "int[]");
IValue[] ret = ivalue.toIValueArray();
Assert.assertEquals(ret[1].toLong(), 2);
}
try (IValue iv1 = IValue.listFrom(array1, array2);
IValue iv2 = IValue.from(array2);
IValue ivalue = IValue.listFrom(iv1, iv2)) {
Assert.assertTrue(ivalue.isList());
NDList list = ivalue.toNDList(manager);
Assert.assertEquals(list.size(), 3);
}
Assert.assertThrows(() -> IValue.listFrom(new IValue[0]));
try (IValue iv1 = IValue.from(array1);
IValue iv2 = IValue.from(array2);
IValue ivalue = IValue.tupleFrom(iv1, iv2)) {
NDList list = ivalue.toNDList(manager);
Assert.assertEquals(list.size(), 2);
}
// Test List<List<int>>
try (IValue iv1 = IValue.listFrom(1, 2);
IValue iv2 = IValue.listFrom(2, 1);
IValue ivalue = IValue.listFrom(iv1, iv2)) {
Assert.assertTrue(ivalue.isList());
Assert.assertEquals(ivalue.getType(), "int[][]");
IValue[] ret = ivalue.toIValueArray();
Assert.assertTrue(ret[1].isList());
}
// Test python Tuple: (int[], str, float)
try (IValue iv1 = IValue.listFrom(1, 2);
IValue iv2 = IValue.from("data1");
IValue iv3 = IValue.from(1f);
IValue ivalue = IValue.tupleFrom(iv1, iv2, iv3)) {
Assert.assertEquals(ivalue.getType(), "(int[], str, float)");
Assert.assertTrue(ivalue.isTuple());
IValue[] ret = ivalue.toIValueTuple();
Assert.assertTrue(ret[0].isList());
Assert.assertEquals(ret[2].toDouble(), 1d);
}
}
}
@Test
public void testIValueModel() throws IOException, ModelException {
Criteria<NDList, NDList> criteria =
Criteria.builder()
.setTypes(NDList.class, NDList.class)
.optModelUrls("https://resources.djl.ai/test-models/ivalue_jit.zip")
.optProgress(new ProgressBar())
.build();
try (ZooModel<NDList, NDList> model = criteria.loadModel()) {
PtSymbolBlock block = (PtSymbolBlock) model.getBlock();
IValue tokens = IValue.listFrom(1, 2, 3);
IValue cls = IValue.from(0);
IValue sep = IValue.from(4);
IValue ret = block.forward(tokens, cls, sep);
long[] actual = ret.toLongArray();
Assert.assertEquals(actual, new long[] {0, 1, 2, 3, 4});
tokens.close();
cls.close();
sep.close();
ret.close();
}
}
}
| 4,298 |
575 | <gh_stars>100-1000
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/apps/app_service/dip_px_util.h"
#include "base/check_op.h"
#include "ui/base/layout.h"
#include "ui/display/display.h"
#include "ui/display/screen.h"
#include "ui/gfx/geometry/size.h"
// TODO(crbug.com/826982): plumb through enough information to use one of
// Screen::GetDisplayNearest{Window/View/Point}. That way in multi-monitor
// setups where one screen is hidpi and the other one isn't, we don't always do
// the wrong thing.
namespace {
float GetPrimaryDisplayScaleFactor() {
display::Screen* screen = display::Screen::GetScreen();
if (!screen) {
return 1.0f;
}
return screen->GetPrimaryDisplay().device_scale_factor();
}
int ConvertBetweenDipAndPx(int value,
bool quantize_to_supported_scale_factor,
bool invert) {
float scale = GetPrimaryDisplayScaleFactor();
if (quantize_to_supported_scale_factor) {
scale = ui::GetScaleForScaleFactor(ui::GetSupportedScaleFactor(scale));
}
DCHECK_NE(0.0f, scale);
if (invert) {
scale = 1 / scale;
}
return gfx::ScaleToFlooredSize(gfx::Size(value, value), scale).width();
}
} // namespace
namespace apps_util {
int ConvertDipToPx(int dip, bool quantize_to_supported_scale_factor) {
return ConvertBetweenDipAndPx(dip, quantize_to_supported_scale_factor, false);
}
int ConvertPxToDip(int px, bool quantize_to_supported_scale_factor) {
return ConvertBetweenDipAndPx(px, quantize_to_supported_scale_factor, true);
}
ui::ScaleFactor GetPrimaryDisplayUIScaleFactor() {
return ui::GetSupportedScaleFactor(GetPrimaryDisplayScaleFactor());
}
} // namespace apps_util
| 650 |
5,649 | #include "dvlnet/zerotier_lwip.h"
#include <lwip/igmp.h>
#include <lwip/mld6.h>
#include <lwip/sockets.h>
#include <lwip/tcpip.h>
#include <SDL.h>
#ifdef USE_SDL1
#include "utils/sdl2_to_1_2_backports.h"
#else
#include "utils/sdl2_backports.h"
#endif
#include "utils/log.hpp"
#include "dvlnet/zerotier_native.h"
namespace devilution {
namespace net {
void print_ip6_addr(void *x)
{
char ipstr[INET6_ADDRSTRLEN];
auto *in = static_cast<sockaddr_in6 *>(x);
lwip_inet_ntop(AF_INET6, &(in->sin6_addr), ipstr, INET6_ADDRSTRLEN);
Log("ZeroTier: ZTS_EVENT_ADDR_NEW_IP6, addr={}", ipstr);
}
void zt_ip6setup()
{
ip6_addr_t mcaddr;
memcpy(mcaddr.addr, dvl_multicast_addr, 16);
mcaddr.zone = 0;
LOCK_TCPIP_CORE();
mld6_joingroup(IP6_ADDR_ANY6, &mcaddr);
UNLOCK_TCPIP_CORE();
}
} // namespace net
} // namespace devilution
| 453 |
523 | <filename>app/src/main/java/io/github/droidkaigi/confsched2017/model/Topic.java
package io.github.droidkaigi.confsched2017.model;
import android.support.annotation.Nullable;
import com.github.gfx.android.orma.annotation.Column;
import com.github.gfx.android.orma.annotation.PrimaryKey;
import com.github.gfx.android.orma.annotation.Table;
import com.google.gson.annotations.SerializedName;
@Table
public class Topic {
@PrimaryKey(auto = false)
@Column(indexed = true)
@SerializedName("id")
public int id;
@Column(indexed = true)
@SerializedName("name")
public String name;
@Column
@Nullable
@SerializedName("other")
public String other;
}
| 254 |
631 | /*****************************************************************************
* *
* OpenNI 1.x Alpha *
* Copyright (C) 2012 PrimeSense Ltd. *
* *
* This file is part of OpenNI. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
*****************************************************************************/
package org.openni;
/**
* Enables a user generator to recognize when the user is posed in a specific position. <BR><BR>
*
* This is used most commonly to recognize when a user has entered a "calibration pose" for the skeleton. It
* is used less frequently if a no-calibration skeleton is available.
*
*/
public class PoseDetectionCapability extends CapabilityBase
{
public PoseDetectionCapability(ProductionNode node) throws StatusException
{
super(node);
// Events
poseDetectedEvent = new Observable<PoseDetectionEventArgs>()
{
@Override
protected int registerNative(OutArg<Long> phCallback) throws StatusException
{
return NativeMethods.xnRegisterToPoseDetected(toNative(), this, "callback", phCallback);
}
@Override
protected void unregisterNative(long hCallback)
{
NativeMethods.xnUnregisterFromPoseDetected(toNative(), hCallback);
}
@SuppressWarnings("unused")
public void callback(String pose, int user)
{
notify(new PoseDetectionEventArgs(pose, user));
}
};
outOfPoseEvent = new Observable<PoseDetectionEventArgs>()
{
@Override
protected int registerNative(OutArg<Long> phCallback) throws StatusException
{
return NativeMethods.xnRegisterToOutOfPose(toNative(), this, "callback", phCallback);
}
@Override
protected void unregisterNative(long hCallback)
{
NativeMethods.xnUnregisterFromOutOfPose(toNative(), hCallback);
}
@SuppressWarnings("unused")
public void callback(String pose, int user)
{
notify(new PoseDetectionEventArgs(pose, user));
}
};
poseDetectionInProgressEvent = new Observable<PoseDetectionInProgressEventArgs>()
{
@Override
protected int registerNative(OutArg<Long> phCallback) throws StatusException
{
return NativeMethods.xnRegisterToPoseDetectionInProgress(toNative(), this, "callback", phCallback);
}
@Override
protected void unregisterNative(long hCallback)
{
NativeMethods.xnUnregisterFromPoseDetectionInProgress(toNative(), hCallback);
}
@SuppressWarnings("unused")
public void callback(String pose, int user, int status)
{
notify(new PoseDetectionInProgressEventArgs(pose, user, PoseDetectionStatus.fromNative(status)));
}
};
}
public int getNumberOfPoses()
{
return NativeMethods.xnGetNumberOfPoses(toNative());
}
public boolean isPoseSupported(String pose)
{
return NativeMethods.xnIsPoseSupported(toNative(),pose);
}
public void getPoseStatus(int user, String pose, OutArg<Long> poseTime, OutArg<PoseDetectionStatus> eStatus, OutArg<PoseDetectionState> eState ) throws StatusException
{
OutArg<Integer> eInnerStatus = new OutArg<Integer>();
OutArg<Integer> eInnerState = new OutArg<Integer>();
int status = NativeMethods.xnGetPoseStatus(toNative(), user, pose, poseTime, eInnerStatus,eInnerState);
eStatus.value = PoseDetectionStatus.fromNative(eInnerStatus.value);
eState.value = PoseDetectionState.fromNative(eInnerState.value);
WrapperUtils.throwOnError(status);
}
public String[] getAllAvailablePoses() throws StatusException
{
OutArg<String[]> poses = new OutArg<String[]>();
int status = NativeMethods.xnGetAllAvailablePoses(toNative(), poses);
WrapperUtils.throwOnError(status);
return poses.value;
}
/**
* @deprecated Out of date. Use startPoseDetection() instead.
*/
@Deprecated
public void StartPoseDetection(String pose, int user) throws StatusException
{
int status = NativeMethods.xnStartPoseDetection(toNative(), pose, user);
WrapperUtils.throwOnError(status);
}
/**
* @deprecated Out of date. Use stopPoseDetection() instead.
*/
@Deprecated
public void StopPoseDetection(int user) throws StatusException
{
int status = NativeMethods.xnStopPoseDetection(toNative(), user);
WrapperUtils.throwOnError(status);
}
public void startPoseDetection(String pose, int user) throws StatusException
{
int status = NativeMethods.xnStartPoseDetection(toNative(), pose, user);
WrapperUtils.throwOnError(status);
}
public void stopPoseDetection(int user) throws StatusException
{
int status = NativeMethods.xnStopPoseDetection(toNative(), user);
WrapperUtils.throwOnError(status);
}
public void stopSinglePoseDetection(int user, String pose) throws StatusException
{
int status = NativeMethods.xnStopSinglePoseDetection(toNative(), user, pose);
WrapperUtils.throwOnError(status);
}
// Events
public IObservable<PoseDetectionEventArgs> getPoseDetectedEvent()
{
return poseDetectedEvent;
}
public IObservable<PoseDetectionEventArgs> getOutOfPoseEvent()
{
return outOfPoseEvent;
}
public IObservable<PoseDetectionInProgressEventArgs> getPoseDetectionInProgressEvent()
{
return poseDetectionInProgressEvent;
}
private Observable<PoseDetectionEventArgs> poseDetectedEvent;
private Observable<PoseDetectionEventArgs> outOfPoseEvent;
private Observable<PoseDetectionInProgressEventArgs> poseDetectionInProgressEvent;
}
| 2,831 |
3,031 | # encoding: utf-8
"""Unit test suite for docx.image.jpeg module"""
from __future__ import absolute_import, print_function
import pytest
from docx.compat import BytesIO
from docx.image.constants import JPEG_MARKER_CODE, MIME_TYPE
from docx.image.helpers import BIG_ENDIAN, StreamReader
from docx.image.jpeg import (
_App0Marker,
_App1Marker,
Exif,
Jfif,
_JfifMarkers,
Jpeg,
_Marker,
_MarkerFactory,
_MarkerFinder,
_MarkerParser,
_SofMarker,
)
from docx.image.tiff import Tiff
from ..unitutil.mock import (
ANY,
call,
class_mock,
initializer_mock,
instance_mock,
method_mock,
)
class DescribeJpeg(object):
def it_knows_its_content_type(self):
jpeg = Jpeg(None, None, None, None)
assert jpeg.content_type == MIME_TYPE.JPEG
def it_knows_its_default_ext(self):
jpeg = Jpeg(None, None, None, None)
assert jpeg.default_ext == 'jpg'
class DescribeExif(object):
def it_can_construct_from_an_exif_stream(self, from_exif_fixture):
# fixture ----------------------
stream_, _JfifMarkers_, cx, cy, horz_dpi, vert_dpi = (
from_exif_fixture
)
# exercise ---------------------
exif = Exif.from_stream(stream_)
# verify -----------------------
_JfifMarkers_.from_stream.assert_called_once_with(stream_)
assert isinstance(exif, Exif)
assert exif.px_width == cx
assert exif.px_height == cy
assert exif.horz_dpi == horz_dpi
assert exif.vert_dpi == vert_dpi
class DescribeJfif(object):
def it_can_construct_from_a_jfif_stream(self, from_jfif_fixture):
stream_, _JfifMarkers_, cx, cy, horz_dpi, vert_dpi = (
from_jfif_fixture
)
jfif = Jfif.from_stream(stream_)
_JfifMarkers_.from_stream.assert_called_once_with(stream_)
assert isinstance(jfif, Jfif)
assert jfif.px_width == cx
assert jfif.px_height == cy
assert jfif.horz_dpi == horz_dpi
assert jfif.vert_dpi == vert_dpi
# fixtures -------------------------------------------------------
@pytest.fixture
def from_exif_fixture(self, stream_, _JfifMarkers_, jfif_markers_):
px_width, px_height = 111, 222
horz_dpi, vert_dpi = 333, 444
jfif_markers_.sof.px_width = px_width
jfif_markers_.sof.px_height = px_height
jfif_markers_.app1.horz_dpi = horz_dpi
jfif_markers_.app1.vert_dpi = vert_dpi
return (
stream_, _JfifMarkers_, px_width, px_height, horz_dpi, vert_dpi
)
@pytest.fixture
def from_jfif_fixture(self, stream_, _JfifMarkers_, jfif_markers_):
px_width, px_height = 111, 222
horz_dpi, vert_dpi = 333, 444
jfif_markers_.sof.px_width = px_width
jfif_markers_.sof.px_height = px_height
jfif_markers_.app0.horz_dpi = horz_dpi
jfif_markers_.app0.vert_dpi = vert_dpi
return (
stream_, _JfifMarkers_, px_width, px_height, horz_dpi, vert_dpi
)
@pytest.fixture
def _JfifMarkers_(self, request, jfif_markers_):
_JfifMarkers_ = class_mock(request, 'docx.image.jpeg._JfifMarkers')
_JfifMarkers_.from_stream.return_value = jfif_markers_
return _JfifMarkers_
@pytest.fixture
def jfif_markers_(self, request):
return instance_mock(request, _JfifMarkers)
@pytest.fixture
def stream_(self, request):
return instance_mock(request, BytesIO)
class Describe_JfifMarkers(object):
def it_can_construct_from_a_jfif_stream(
self, stream_, _MarkerParser_, _JfifMarkers__init_, soi_, app0_, sof_, sos_
):
marker_lst = [soi_, app0_, sof_, sos_]
jfif_markers = _JfifMarkers.from_stream(stream_)
_MarkerParser_.from_stream.assert_called_once_with(stream_)
_JfifMarkers__init_.assert_called_once_with(ANY, marker_lst)
assert isinstance(jfif_markers, _JfifMarkers)
def it_can_find_the_APP0_marker(self, app0_fixture):
jfif_markers, app0_ = app0_fixture
app0 = jfif_markers.app0
assert app0 is app0_
def it_can_find_the_APP1_marker(self, app1_fixture):
jfif_markers, app1_ = app1_fixture
app1 = jfif_markers.app1
assert app1 is app1_
def it_raises_if_it_cant_find_the_APP0_marker(self, no_app0_fixture):
jfif_markers = no_app0_fixture
with pytest.raises(KeyError):
jfif_markers.app0
def it_raises_if_it_cant_find_the_APP1_marker(self, no_app1_fixture):
jfif_markers = no_app1_fixture
with pytest.raises(KeyError):
jfif_markers.app1
def it_can_find_the_SOF_marker(self, sof_fixture):
jfif_markers, sof_ = sof_fixture
sof = jfif_markers.sof
assert sof is sof_
def it_raises_if_it_cant_find_the_SOF_marker(self, no_sof_fixture):
jfif_markers = no_sof_fixture
with pytest.raises(KeyError):
jfif_markers.sof
# fixtures -------------------------------------------------------
@pytest.fixture
def app0_(self, request):
return instance_mock(
request, _App0Marker, marker_code=JPEG_MARKER_CODE.APP0
)
@pytest.fixture
def app1_(self, request):
return instance_mock(
request, _App1Marker, marker_code=JPEG_MARKER_CODE.APP1
)
@pytest.fixture
def app0_fixture(self, soi_, app0_, eoi_):
markers = (soi_, app0_, eoi_)
jfif_markers = _JfifMarkers(markers)
return jfif_markers, app0_
@pytest.fixture
def app1_fixture(self, soi_, app1_, eoi_):
markers = (soi_, app1_, eoi_)
jfif_markers = _JfifMarkers(markers)
return jfif_markers, app1_
@pytest.fixture
def eoi_(self, request):
return instance_mock(
request, _SofMarker, marker_code=JPEG_MARKER_CODE.EOI
)
@pytest.fixture
def _JfifMarkers__init_(self, request):
return initializer_mock(request, _JfifMarkers)
@pytest.fixture
def marker_parser_(self, request, markers_all_):
marker_parser_ = instance_mock(request, _MarkerParser)
marker_parser_.iter_markers.return_value = markers_all_
return marker_parser_
@pytest.fixture
def _MarkerParser_(self, request, marker_parser_):
_MarkerParser_ = class_mock(request, 'docx.image.jpeg._MarkerParser')
_MarkerParser_.from_stream.return_value = marker_parser_
return _MarkerParser_
@pytest.fixture
def markers_all_(self, request, soi_, app0_, sof_, sos_, eoi_):
return [soi_, app0_, sof_, sos_, eoi_]
@pytest.fixture
def no_app0_fixture(self, soi_, eoi_):
markers = (soi_, eoi_)
return _JfifMarkers(markers)
@pytest.fixture
def no_app1_fixture(self, soi_, eoi_):
markers = (soi_, eoi_)
return _JfifMarkers(markers)
@pytest.fixture
def no_sof_fixture(self, soi_, eoi_):
markers = (soi_, eoi_)
return _JfifMarkers(markers)
@pytest.fixture
def sof_(self, request):
return instance_mock(
request, _SofMarker, marker_code=JPEG_MARKER_CODE.SOF0
)
@pytest.fixture
def sof_fixture(self, soi_, sof_, eoi_):
markers = (soi_, sof_, eoi_)
jfif_markers = _JfifMarkers(markers)
return jfif_markers, sof_
@pytest.fixture
def soi_(self, request):
return instance_mock(
request, _Marker, marker_code=JPEG_MARKER_CODE.SOI
)
@pytest.fixture
def sos_(self, request):
return instance_mock(
request, _Marker, marker_code=JPEG_MARKER_CODE.SOS
)
@pytest.fixture
def stream_(self, request):
return instance_mock(request, BytesIO)
class Describe_Marker(object):
def it_can_construct_from_a_stream_and_offset(self, from_stream_fixture):
stream, marker_code, offset, _Marker__init_, length = from_stream_fixture
marker = _Marker.from_stream(stream, marker_code, offset)
_Marker__init_.assert_called_once_with(ANY, marker_code, offset, length)
assert isinstance(marker, _Marker)
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
(JPEG_MARKER_CODE.SOI, 2, 0),
(JPEG_MARKER_CODE.APP0, 4, 16),
])
def from_stream_fixture(self, request, _Marker__init_):
marker_code, offset, length = request.param
bytes_ = b'\xFF\xD8\xFF\xE0\x00\x10'
stream_reader = StreamReader(BytesIO(bytes_), BIG_ENDIAN)
return stream_reader, marker_code, offset, _Marker__init_, length
@pytest.fixture
def _Marker__init_(self, request):
return initializer_mock(request, _Marker)
class Describe_App0Marker(object):
def it_can_construct_from_a_stream_and_offset(self, _App0Marker__init_):
bytes_ = b'\x00\x10JFIF\x00\x01\x01\x01\x00\x2A\x00\x18'
marker_code, offset, length = JPEG_MARKER_CODE.APP0, 0, 16
density_units, x_density, y_density = 1, 42, 24
stream = StreamReader(BytesIO(bytes_), BIG_ENDIAN)
app0_marker = _App0Marker.from_stream(stream, marker_code, offset)
_App0Marker__init_.assert_called_once_with(
ANY, marker_code, offset, length, density_units, x_density, y_density
)
assert isinstance(app0_marker, _App0Marker)
def it_knows_the_image_dpi(self, dpi_fixture):
density_units, x_density, y_density, horz_dpi, vert_dpi = dpi_fixture
app0 = _App0Marker(
None, None, None, density_units, x_density, y_density
)
assert app0.horz_dpi == horz_dpi
assert app0.vert_dpi == vert_dpi
# fixtures -------------------------------------------------------
@pytest.fixture
def _App0Marker__init_(self, request):
return initializer_mock(request, _App0Marker)
@pytest.fixture(params=[
(0, 100, 200, 72, 72),
(1, 100, 200, 100, 200),
(2, 100, 200, 254, 508),
])
def dpi_fixture(self, request):
density_units, x_density, y_density, horz_dpi, vert_dpi = (
request.param
)
return density_units, x_density, y_density, horz_dpi, vert_dpi
class Describe_App1Marker(object):
def it_can_construct_from_a_stream_and_offset(
self, _App1Marker__init_, _tiff_from_exif_segment_
):
bytes_ = b'\x00\x42Exif\x00\x00'
marker_code, offset, length = JPEG_MARKER_CODE.APP1, 0, 66
horz_dpi, vert_dpi = 42, 24
stream = StreamReader(BytesIO(bytes_), BIG_ENDIAN)
app1_marker = _App1Marker.from_stream(stream, marker_code, offset)
_tiff_from_exif_segment_.assert_called_once_with(stream, offset, length)
_App1Marker__init_.assert_called_once_with(
ANY, marker_code, offset, length, horz_dpi, vert_dpi
)
assert isinstance(app1_marker, _App1Marker)
def it_can_construct_from_non_Exif_APP1_segment(self, _App1Marker__init_):
bytes_ = b'\x00\x42Foobar'
marker_code, offset, length = JPEG_MARKER_CODE.APP1, 0, 66
stream = StreamReader(BytesIO(bytes_), BIG_ENDIAN)
app1_marker = _App1Marker.from_stream(stream, marker_code, offset)
_App1Marker__init_.assert_called_once_with(
ANY, marker_code, offset, length, 72, 72
)
assert isinstance(app1_marker, _App1Marker)
def it_gets_a_tiff_from_its_Exif_segment_to_help_construct(
self, get_tiff_fixture):
stream, offset, length = get_tiff_fixture[:3]
BytesIO_, segment_bytes, substream_ = get_tiff_fixture[3:6]
Tiff_, tiff_ = get_tiff_fixture[6:]
tiff = _App1Marker._tiff_from_exif_segment(stream, offset, length)
BytesIO_.assert_called_once_with(segment_bytes)
Tiff_.from_stream.assert_called_once_with(substream_)
assert tiff is tiff_
def it_knows_the_image_dpi(self):
horz_dpi, vert_dpi = 42, 24
app1 = _App1Marker(None, None, None, horz_dpi, vert_dpi)
assert app1.horz_dpi == horz_dpi
assert app1.vert_dpi == vert_dpi
# fixtures -------------------------------------------------------
@pytest.fixture
def _App1Marker__init_(self, request):
return initializer_mock(request, _App1Marker)
@pytest.fixture
def BytesIO_(self, request, substream_):
return class_mock(
request, 'docx.image.jpeg.BytesIO', return_value=substream_
)
@pytest.fixture
def get_tiff_fixture(self, request, BytesIO_, substream_, Tiff_, tiff_):
bytes_ = b'xfillerxMM\x00*\x00\x00\x00\x42'
stream_reader = StreamReader(BytesIO(bytes_), BIG_ENDIAN)
offset, segment_length, segment_bytes = 0, 16, bytes_[8:]
return (
stream_reader, offset, segment_length, BytesIO_, segment_bytes,
substream_, Tiff_, tiff_
)
@pytest.fixture
def substream_(self, request):
return instance_mock(request, BytesIO)
@pytest.fixture
def Tiff_(self, request, tiff_):
Tiff_ = class_mock(request, 'docx.image.jpeg.Tiff')
Tiff_.from_stream.return_value = tiff_
return Tiff_
@pytest.fixture
def tiff_(self, request):
return instance_mock(request, Tiff, horz_dpi=42, vert_dpi=24)
@pytest.fixture
def _tiff_from_exif_segment_(self, request, tiff_):
return method_mock(
request, _App1Marker, '_tiff_from_exif_segment', autospec=False,
return_value=tiff_
)
class Describe_SofMarker(object):
def it_can_construct_from_a_stream_and_offset(self, request, _SofMarker__init_):
bytes_ = b'\x00\x11\x00\x00\x2A\x00\x18'
marker_code, offset, length = JPEG_MARKER_CODE.SOF0, 0, 17
px_width, px_height = 24, 42
stream = StreamReader(BytesIO(bytes_), BIG_ENDIAN)
sof_marker = _SofMarker.from_stream(stream, marker_code, offset)
_SofMarker__init_.assert_called_once_with(
ANY, marker_code, offset, length, px_width, px_height
)
assert isinstance(sof_marker, _SofMarker)
def it_knows_the_image_width_and_height(self):
sof = _SofMarker(None, None, None, 42, 24)
assert sof.px_width == 42
assert sof.px_height == 24
# fixtures -------------------------------------------------------
@pytest.fixture
def _SofMarker__init_(self, request):
return initializer_mock(request, _SofMarker)
class Describe_MarkerFactory(object):
def it_constructs_the_appropriate_marker_object(self, call_fixture):
marker_code, stream_, offset_, marker_cls_ = call_fixture
marker = _MarkerFactory(marker_code, stream_, offset_)
marker_cls_.from_stream.assert_called_once_with(
stream_, marker_code, offset_
)
assert marker is marker_cls_.from_stream.return_value
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
JPEG_MARKER_CODE.APP0,
JPEG_MARKER_CODE.APP1,
JPEG_MARKER_CODE.SOF0,
JPEG_MARKER_CODE.SOF7,
JPEG_MARKER_CODE.SOS,
])
def call_fixture(
self, request, stream_, offset_, _App0Marker_, _App1Marker_,
_SofMarker_, _Marker_):
marker_code = request.param
if marker_code == JPEG_MARKER_CODE.APP0:
marker_cls_ = _App0Marker_
elif marker_code == JPEG_MARKER_CODE.APP1:
marker_cls_ = _App1Marker_
elif marker_code in JPEG_MARKER_CODE.SOF_MARKER_CODES:
marker_cls_ = _SofMarker_
else:
marker_cls_ = _Marker_
return marker_code, stream_, offset_, marker_cls_
@pytest.fixture
def _App0Marker_(self, request):
return class_mock(request, 'docx.image.jpeg._App0Marker')
@pytest.fixture
def _App1Marker_(self, request):
return class_mock(request, 'docx.image.jpeg._App1Marker')
@pytest.fixture
def _Marker_(self, request):
return class_mock(request, 'docx.image.jpeg._Marker')
@pytest.fixture
def offset_(self, request):
return instance_mock(request, int)
@pytest.fixture
def _SofMarker_(self, request):
return class_mock(request, 'docx.image.jpeg._SofMarker')
@pytest.fixture
def stream_(self, request):
return instance_mock(request, BytesIO)
class Describe_MarkerFinder(object):
def it_can_construct_from_a_stream(self, stream_, _MarkerFinder__init_):
marker_finder = _MarkerFinder.from_stream(stream_)
_MarkerFinder__init_.assert_called_once_with(ANY, stream_)
assert isinstance(marker_finder, _MarkerFinder)
def it_can_find_the_next_marker_after_a_given_offset(self, next_fixture):
marker_finder, start, expected_code_and_offset = next_fixture
marker_code, segment_offset = marker_finder.next(start)
assert (marker_code, segment_offset) == expected_code_and_offset
# fixtures -------------------------------------------------------
@pytest.fixture
def _MarkerFinder__init_(self, request):
return initializer_mock(request, _MarkerFinder)
@pytest.fixture(params=[
(0, JPEG_MARKER_CODE.SOI, 2),
(1, JPEG_MARKER_CODE.APP0, 4),
(2, JPEG_MARKER_CODE.APP0, 4),
(3, JPEG_MARKER_CODE.EOI, 12),
(4, JPEG_MARKER_CODE.EOI, 12),
(6, JPEG_MARKER_CODE.EOI, 12),
(8, JPEG_MARKER_CODE.EOI, 12),
])
def next_fixture(self, request):
start, marker_code, segment_offset = request.param
bytes_ = b'\xFF\xD8\xFF\xE0\x00\x01\xFF\x00\xFF\xFF\xFF\xD9'
stream_reader = StreamReader(BytesIO(bytes_), BIG_ENDIAN)
marker_finder = _MarkerFinder(stream_reader)
expected_code_and_offset = (marker_code, segment_offset)
return marker_finder, start, expected_code_and_offset
@pytest.fixture
def stream_(self, request):
return instance_mock(request, BytesIO)
class Describe_MarkerParser(object):
def it_can_construct_from_a_jfif_stream(
self, stream_, StreamReader_, _MarkerParser__init_, stream_reader_
):
marker_parser = _MarkerParser.from_stream(stream_)
StreamReader_.assert_called_once_with(stream_, BIG_ENDIAN)
_MarkerParser__init_.assert_called_once_with(ANY, stream_reader_)
assert isinstance(marker_parser, _MarkerParser)
def it_can_iterate_over_the_jfif_markers_in_its_stream(
self, iter_markers_fixture):
(marker_parser, stream_, _MarkerFinder_, marker_finder_,
_MarkerFactory_, marker_codes, offsets,
marker_lst) = iter_markers_fixture
markers = [marker for marker in marker_parser.iter_markers()]
_MarkerFinder_.from_stream.assert_called_once_with(stream_)
assert marker_finder_.next.call_args_list == [
call(0), call(2), call(20)
]
assert _MarkerFactory_.call_args_list == [
call(marker_codes[0], stream_, offsets[0]),
call(marker_codes[1], stream_, offsets[1]),
call(marker_codes[2], stream_, offsets[2]),
]
assert markers == marker_lst
# fixtures -------------------------------------------------------
@pytest.fixture
def app0_(self, request):
return instance_mock(request, _App0Marker, segment_length=16)
@pytest.fixture
def eoi_(self, request):
return instance_mock(request, _Marker, segment_length=0)
@pytest.fixture
def iter_markers_fixture(
self, stream_reader_, _MarkerFinder_, marker_finder_,
_MarkerFactory_, soi_, app0_, eoi_):
marker_parser = _MarkerParser(stream_reader_)
offsets = [2, 4, 22]
marker_lst = [soi_, app0_, eoi_]
marker_finder_.next.side_effect = [
(JPEG_MARKER_CODE.SOI, offsets[0]),
(JPEG_MARKER_CODE.APP0, offsets[1]),
(JPEG_MARKER_CODE.EOI, offsets[2]),
]
marker_codes = [
JPEG_MARKER_CODE.SOI, JPEG_MARKER_CODE.APP0, JPEG_MARKER_CODE.EOI
]
return (
marker_parser, stream_reader_, _MarkerFinder_, marker_finder_,
_MarkerFactory_, marker_codes, offsets, marker_lst
)
@pytest.fixture
def _MarkerFactory_(self, request, soi_, app0_, eoi_):
return class_mock(
request, 'docx.image.jpeg._MarkerFactory',
side_effect=[soi_, app0_, eoi_]
)
@pytest.fixture
def _MarkerFinder_(self, request, marker_finder_):
_MarkerFinder_ = class_mock(request, 'docx.image.jpeg._MarkerFinder')
_MarkerFinder_.from_stream.return_value = marker_finder_
return _MarkerFinder_
@pytest.fixture
def marker_finder_(self, request):
return instance_mock(request, _MarkerFinder)
@pytest.fixture
def _MarkerParser__init_(self, request):
return initializer_mock(request, _MarkerParser)
@pytest.fixture
def soi_(self, request):
return instance_mock(request, _Marker, segment_length=0)
@pytest.fixture
def stream_(self, request):
return instance_mock(request, BytesIO)
@pytest.fixture
def StreamReader_(self, request, stream_reader_):
return class_mock(
request, 'docx.image.jpeg.StreamReader',
return_value=stream_reader_
)
@pytest.fixture
def stream_reader_(self, request):
return instance_mock(request, StreamReader)
| 10,155 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.php.project.ui.actions.support;
import java.io.File;
import org.netbeans.modules.php.api.util.FileUtils;
import org.netbeans.modules.php.project.PhpProject;
import org.netbeans.modules.php.project.ProjectPropertiesSupport;
import org.netbeans.modules.php.project.runconfigs.RunConfigScript;
import org.netbeans.modules.php.project.runconfigs.validation.RunConfigScriptValidator;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileUtil;
import org.openide.util.Lookup;
/**
* Action implementation for SCRIPT configuration.
* It means running and debugging scripts.
* @author <NAME>
*/
class ConfigActionScript extends ConfigAction {
private final FileObject sourceRoot;
protected ConfigActionScript(PhpProject project) {
super(project);
sourceRoot = ProjectPropertiesSupport.getSourcesDirectory(project);
assert sourceRoot != null;
}
@Override
public boolean isProjectValid() {
return isValid(RunConfigScriptValidator.validateConfigAction(RunConfigScript.forProject(project), true) == null);
}
@Override
public boolean isFileValid() {
return isValid(RunConfigScriptValidator.validateConfigAction(RunConfigScript.forProject(project), false) == null);
}
private boolean isValid(boolean valid) {
if (!valid) {
showCustomizer();
}
return valid;
}
@Override
public boolean isRunFileEnabled(Lookup context) {
FileObject file = CommandUtils.fileForContextOrSelectedNodes(context, sourceRoot);
return file != null && FileUtils.isPhpFile(file);
}
@Override
public boolean isDebugFileEnabled(Lookup context) {
if (DebugStarterFactory.getInstance() == null) {
return false;
}
return isRunFileEnabled(context);
}
@Override
public void runProject() {
createFileRunner(null).run();
}
@Override
public void debugProject() {
createFileRunner(null).debug();
}
@Override
public void runFile(Lookup context) {
createFileRunner(context).run();
}
@Override
public void debugFile(Lookup context) {
createFileRunner(context).debug();
}
private File getStartFile(Lookup context) {
FileObject file;
if (context == null) {
file = FileUtil.toFileObject(RunConfigScript.forProject(project).getIndexFile());
} else {
file = CommandUtils.fileForContextOrSelectedNodes(context, sourceRoot);
}
assert file != null : "Start file must be found";
return FileUtil.toFile(file);
}
private FileRunner createFileRunner(Lookup context) {
RunConfigScript configScript = RunConfigScript.forProject(project);
return new FileRunner(getStartFile(context))
.project(project)
.command(configScript.getInterpreter())
.workDir(configScript.getWorkDir())
.phpArgs(configScript.getOptions())
.fileArgs(configScript.getArguments());
}
}
| 1,363 |
32,544 | <reponame>DBatOWL/tutorials
package com.baeldung.web.util;
public class SpecSearchCriteria {
private String key;
private SearchOperation operation;
private Object value;
private boolean orPredicate;
public SpecSearchCriteria() {
}
public SpecSearchCriteria(final String key, final SearchOperation operation, final Object value) {
super();
this.key = key;
this.operation = operation;
this.value = value;
}
public SpecSearchCriteria(final String orPredicate, final String key, final SearchOperation operation, final Object value) {
super();
this.orPredicate = orPredicate != null && orPredicate.equals(SearchOperation.OR_PREDICATE_FLAG);
this.key = key;
this.operation = operation;
this.value = value;
}
public SpecSearchCriteria(String key, String operation, String prefix, String value, String suffix) {
SearchOperation op = SearchOperation.getSimpleOperation(operation.charAt(0));
if (op != null) {
if (op == SearchOperation.EQUALITY) { // the operation may be complex operation
final boolean startWithAsterisk = prefix != null && prefix.contains(SearchOperation.ZERO_OR_MORE_REGEX);
final boolean endWithAsterisk = suffix != null && suffix.contains(SearchOperation.ZERO_OR_MORE_REGEX);
if (startWithAsterisk && endWithAsterisk) {
op = SearchOperation.CONTAINS;
} else if (startWithAsterisk) {
op = SearchOperation.ENDS_WITH;
} else if (endWithAsterisk) {
op = SearchOperation.STARTS_WITH;
}
}
}
this.key = key;
this.operation = op;
this.value = value;
}
public String getKey() {
return key;
}
public void setKey(final String key) {
this.key = key;
}
public SearchOperation getOperation() {
return operation;
}
public void setOperation(final SearchOperation operation) {
this.operation = operation;
}
public Object getValue() {
return value;
}
public void setValue(final Object value) {
this.value = value;
}
public boolean isOrPredicate() {
return orPredicate;
}
public void setOrPredicate(boolean orPredicate) {
this.orPredicate = orPredicate;
}
}
| 1,000 |
361 | <gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alipay.sofa.registry.server.session.remoting.handler;
import com.alipay.sofa.registry.common.model.Node.NodeType;
import com.alipay.sofa.registry.common.model.sessionserver.CancelAddressRequest;
import com.alipay.sofa.registry.core.model.Result;
import com.alipay.sofa.registry.log.Logger;
import com.alipay.sofa.registry.log.LoggerFactory;
import com.alipay.sofa.registry.remoting.Channel;
import com.alipay.sofa.registry.server.session.registry.Registry;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
/**
*
* @author shangyu.wh
* @version $Id: CancelHandler.java, v 0.1 2017-11-30 15:29 shangyu.wh Exp $
*/
public class CancelAddressRequestHandler extends AbstractServerHandler<CancelAddressRequest> {
private static final Logger LOGGER = LoggerFactory
.getLogger(CancelAddressRequestHandler.class);
private static final Logger EXCHANGE_LOGGER = LoggerFactory.getLogger("SESSION-EXCHANGE",
"[CancelAddressRequestHandler]");
@Autowired
private Registry sessionRegistry;
@Override
public HandlerType getType() {
return HandlerType.PROCESSER;
}
@Override
public Class interest() {
return CancelAddressRequest.class;
}
@Override
protected NodeType getConnectNodeType() {
return NodeType.CLIENT;
}
@Override
public Object reply(Channel channel, CancelAddressRequest cancelProcessRequest) {
Result result = new Result();
try {
EXCHANGE_LOGGER.info("request={}", cancelProcessRequest);
List<String> connectIds = cancelProcessRequest.getConnectIds();
if (connectIds == null || connectIds.isEmpty()) {
LOGGER.error("Request connectIds cannot be null or empty!");
result.setMessage("Request connectIds cannot be null or empty!");
result.setSuccess(false);
return result;
}
sessionRegistry.cancel(connectIds);
} catch (Exception e) {
LOGGER.error("Cancel Address Request error!", e);
throw new RuntimeException("Cancel Address Request error!", e);
}
result.setSuccess(true);
return result;
}
} | 1,176 |
432 | /* Profile counter container type.
Copyright (C) 2017-2018 Free Software Foundation, Inc.
Contributed by <NAME>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "profile-count.h"
#include "options.h"
#include "tree.h"
#include "basic-block.h"
#include "cfg.h"
#include "function.h"
#include "gimple.h"
#include "data-streamer.h"
#include "cgraph.h"
#include "wide-int.h"
#include "sreal.h"
/* Dump THIS to F. */
void
profile_count::dump (FILE *f) const
{
if (!initialized_p ())
fprintf (f, "uninitialized");
else
{
fprintf (f, "%" PRId64, m_val);
if (m_quality == profile_guessed_local)
fprintf (f, " (estimated locally)");
else if (m_quality == profile_guessed_global0)
fprintf (f, " (estimated locally, globally 0)");
else if (m_quality == profile_guessed_global0adjusted)
fprintf (f, " (estimated locally, globally 0 adjusted)");
else if (m_quality == profile_adjusted)
fprintf (f, " (adjusted)");
else if (m_quality == profile_afdo)
fprintf (f, " (auto FDO)");
else if (m_quality == profile_guessed)
fprintf (f, " (guessed)");
}
}
/* Dump THIS to stderr. */
void
profile_count::debug () const
{
dump (stderr);
fprintf (stderr, "\n");
}
/* Return true if THIS differs from OTHER; tolerate small diferences. */
bool
profile_count::differs_from_p (profile_count other) const
{
gcc_checking_assert (compatible_p (other));
if (!initialized_p () || !other.initialized_p ())
return false;
if ((uint64_t)m_val - (uint64_t)other.m_val < 100
|| (uint64_t)other.m_val - (uint64_t)m_val < 100)
return false;
if (!other.m_val)
return true;
int64_t ratio = (int64_t)m_val * 100 / other.m_val;
return ratio < 99 || ratio > 101;
}
/* Stream THIS from IB. */
profile_count
profile_count::stream_in (struct lto_input_block *ib)
{
profile_count ret;
ret.m_val = streamer_read_gcov_count (ib);
ret.m_quality = (profile_quality) streamer_read_uhwi (ib);
return ret;
}
/* Stream THIS to OB. */
void
profile_count::stream_out (struct output_block *ob)
{
streamer_write_gcov_count (ob, m_val);
streamer_write_uhwi (ob, m_quality);
}
/* Stream THIS to OB. */
void
profile_count::stream_out (struct lto_output_stream *ob)
{
streamer_write_gcov_count_stream (ob, m_val);
streamer_write_uhwi_stream (ob, m_quality);
}
/* Dump THIS to F. */
void
profile_probability::dump (FILE *f) const
{
if (!initialized_p ())
fprintf (f, "uninitialized");
else
{
/* Make difference between 0.00 as a roundoff error and actual 0.
Similarly for 1. */
if (m_val == 0)
fprintf (f, "never");
else if (m_val == max_probability)
fprintf (f, "always");
else
fprintf (f, "%3.1f%%", (double)m_val * 100 / max_probability);
if (m_quality == profile_adjusted)
fprintf (f, " (adjusted)");
else if (m_quality == profile_afdo)
fprintf (f, " (auto FDO)");
else if (m_quality == profile_guessed)
fprintf (f, " (guessed)");
}
}
/* Dump THIS to stderr. */
void
profile_probability::debug () const
{
dump (stderr);
fprintf (stderr, "\n");
}
/* Return true if THIS differs from OTHER; tolerate small diferences. */
bool
profile_probability::differs_from_p (profile_probability other) const
{
if (!initialized_p () || !other.initialized_p ())
return false;
if ((uint64_t)m_val - (uint64_t)other.m_val < max_probability / 1000
|| (uint64_t)other.m_val - (uint64_t)max_probability < 1000)
return false;
if (!other.m_val)
return true;
int64_t ratio = (int64_t)m_val * 100 / other.m_val;
return ratio < 99 || ratio > 101;
}
/* Return true if THIS differs significantly from OTHER. */
bool
profile_probability::differs_lot_from_p (profile_probability other) const
{
if (!initialized_p () || !other.initialized_p ())
return false;
uint32_t d = m_val > other.m_val ? m_val - other.m_val : other.m_val - m_val;
return d > max_probability / 2;
}
/* Stream THIS from IB. */
profile_probability
profile_probability::stream_in (struct lto_input_block *ib)
{
profile_probability ret;
ret.m_val = streamer_read_uhwi (ib);
ret.m_quality = (profile_quality) streamer_read_uhwi (ib);
return ret;
}
/* Stream THIS to OB. */
void
profile_probability::stream_out (struct output_block *ob)
{
streamer_write_uhwi (ob, m_val);
streamer_write_uhwi (ob, m_quality);
}
/* Stream THIS to OB. */
void
profile_probability::stream_out (struct lto_output_stream *ob)
{
streamer_write_uhwi_stream (ob, m_val);
streamer_write_uhwi_stream (ob, m_quality);
}
/* Compute RES=(a*b + c/2)/c capping and return false if overflow happened. */
bool
slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
{
FIXED_WIDE_INT (128) tmp = a;
bool overflow;
tmp = wi::udiv_floor (wi::umul (tmp, b, &overflow) + (c / 2), c);
gcc_checking_assert (!overflow);
if (wi::fits_uhwi_p (tmp))
{
*res = tmp.to_uhwi ();
return true;
}
*res = (uint64_t) -1;
return false;
}
/* Return count as frequency within FUN scaled in range 0 to REG_FREQ_MAX
Used for legacy code and should not be used anymore. */
int
profile_count::to_frequency (struct function *fun) const
{
if (!initialized_p ())
return BB_FREQ_MAX;
if (*this == profile_count::zero ())
return 0;
gcc_assert (REG_BR_PROB_BASE == BB_FREQ_MAX
&& fun->cfg->count_max.initialized_p ());
profile_probability prob = probability_in (fun->cfg->count_max);
if (!prob.initialized_p ())
return REG_BR_PROB_BASE;
return prob.to_reg_br_prob_base ();
}
/* Return count as frequency within FUN scaled in range 0 to CGRAPH_FREQ_MAX
where CGRAPH_FREQ_BASE means that count equals to entry block count.
Used for legacy code and should not be used anymore. */
int
profile_count::to_cgraph_frequency (profile_count entry_bb_count) const
{
if (!initialized_p () || !entry_bb_count.initialized_p ())
return CGRAPH_FREQ_BASE;
if (*this == profile_count::zero ())
return 0;
gcc_checking_assert (entry_bb_count.initialized_p ());
uint64_t scale;
if (!safe_scale_64bit (!entry_bb_count.m_val ? m_val + 1 : m_val,
CGRAPH_FREQ_BASE, MAX (1, entry_bb_count.m_val), &scale))
return CGRAPH_FREQ_MAX;
return MIN (scale, CGRAPH_FREQ_MAX);
}
/* Return THIS/IN as sreal value. */
sreal
profile_count::to_sreal_scale (profile_count in, bool *known) const
{
if (!initialized_p () || !in.initialized_p ())
{
if (known)
*known = false;
return 1;
}
if (known)
*known = true;
if (*this == profile_count::zero ())
return 0;
if (!in.m_val)
{
if (!m_val)
return 1;
return m_val * 4;
}
return (sreal)m_val / (sreal)in.m_val;
}
/* We want to scale profile across function boundary from NUM to DEN.
Take care of the side case when DEN is zeros. We still want to behave
sanely here which means
- scale to profile_count::zero () if NUM is profile_count::zero
- do not affect anything if NUM == DEN
- preserve counter value but adjust quality in other cases. */
void
profile_count::adjust_for_ipa_scaling (profile_count *num,
profile_count *den)
{
/* Scaling is no-op if NUM and DEN are the same. */
if (*num == *den)
return;
/* Scaling to zero is always zero. */
if (*num == profile_count::zero ())
return;
/* If den is non-zero we are safe. */
if (den->force_nonzero () == *den)
return;
/* Force both to non-zero so we do not push profiles to 0 when
both num == 0 and den == 0. */
*den = den->force_nonzero ();
*num = num->force_nonzero ();
}
/* THIS is a count of bb which is known to be executed IPA times.
Combine this information into bb counter. This means returning IPA
if it is nonzero, not changing anything if IPA is uninitialized
and if IPA is zero, turning THIS into corresponding local profile with
global0. */
profile_count
profile_count::combine_with_ipa_count (profile_count ipa)
{
ipa = ipa.ipa ();
if (ipa.nonzero_p ())
return ipa;
if (!ipa.initialized_p () || *this == profile_count::zero ())
return *this;
if (ipa == profile_count::zero ())
return this->global0 ();
return this->global0adjusted ();
}
/* The profiling runtime uses gcov_type, which is usually 64bit integer.
Conversions back and forth are used to read the coverage and get it
into internal representation. */
profile_count
profile_count::from_gcov_type (gcov_type v)
{
profile_count ret;
gcc_checking_assert (v >= 0);
if (dump_file && v >= (gcov_type)max_count)
fprintf (dump_file,
"Capping gcov count %" PRId64 " to max_count %" PRId64 "\n",
(int64_t) v, (int64_t) max_count);
ret.m_val = MIN (v, (gcov_type)max_count);
ret.m_quality = profile_precise;
return ret;
}
/* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
happens with COUNT2 probablity. Return probablity that either *THIS or
OTHER happens. */
profile_probability
profile_probability::combine_with_count (profile_count count1,
profile_probability other,
profile_count count2) const
{
/* If probabilities are same, we are done.
If counts are nonzero we can distribute accordingly. In remaining
cases just avreage the values and hope for the best. */
if (*this == other || count1 == count2
|| (count2 == profile_count::zero ()
&& !(count1 == profile_count::zero ())))
return *this;
if (count1 == profile_count::zero () && !(count2 == profile_count::zero ()))
return other;
else if (count1.nonzero_p () || count2.nonzero_p ())
return *this * count1.probability_in (count1 + count2)
+ other * count2.probability_in (count1 + count2);
else
return *this * profile_probability::even ()
+ other * profile_probability::even ();
}
| 3,945 |
1,379 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from config import METRIC_TYPE
from config import MILVUS_HOST
from config import MILVUS_PORT
from config import VECTOR_DIMENSION
from logs import LOGGER
from pymilvus import Collection
from pymilvus import CollectionSchema
from pymilvus import connections
from pymilvus import DataType
from pymilvus import FieldSchema
from pymilvus import utility
class MilvusHelper:
"""
the basic operations of PyMilvus
# This example shows how to:
# 1. connect to Milvus server
# 2. create a collection
# 3. insert entities
# 4. create index
# 5. search
# 6. delete a collection
"""
def __init__(self):
try:
self.collection = None
connections.connect(host=MILVUS_HOST, port=MILVUS_PORT)
LOGGER.debug(
f"Successfully connect to Milvus with IP:{MILVUS_HOST} and PORT:{MILVUS_PORT}"
)
except Exception as e:
LOGGER.error(f"Failed to connect Milvus: {e}")
sys.exit(1)
def set_collection(self, collection_name):
try:
if self.has_collection(collection_name):
self.collection = Collection(name=collection_name)
else:
raise Exception(
f"There is no collection named:{collection_name}")
except Exception as e:
LOGGER.error(f"Failed to set collection in Milvus: {e}")
sys.exit(1)
def has_collection(self, collection_name):
# Return if Milvus has the collection
try:
return utility.has_collection(collection_name)
except Exception as e:
LOGGER.error(f"Failed to check state of collection in Milvus: {e}")
sys.exit(1)
def create_collection(self, collection_name):
# Create milvus collection if not exists
try:
if not self.has_collection(collection_name):
field1 = FieldSchema(
name="id",
dtype=DataType.INT64,
descrition="int64",
is_primary=True,
auto_id=True)
field2 = FieldSchema(
name="embedding",
dtype=DataType.FLOAT_VECTOR,
descrition="speaker embeddings",
dim=VECTOR_DIMENSION,
is_primary=False)
schema = CollectionSchema(
fields=[field1, field2], description="embeddings info")
self.collection = Collection(
name=collection_name, schema=schema)
LOGGER.debug(f"Create Milvus collection: {collection_name}")
else:
self.set_collection(collection_name)
return "OK"
except Exception as e:
LOGGER.error(f"Failed to create collection in Milvus: {e}")
sys.exit(1)
def insert(self, collection_name, vectors):
# Batch insert vectors to milvus collection
try:
self.create_collection(collection_name)
data = [vectors]
self.set_collection(collection_name)
mr = self.collection.insert(data)
ids = mr.primary_keys
self.collection.load()
LOGGER.debug(
f"Insert vectors to Milvus in collection: {collection_name} with {len(vectors)} rows"
)
return ids
except Exception as e:
LOGGER.error(f"Failed to insert data to Milvus: {e}")
sys.exit(1)
def create_index(self, collection_name):
# Create IVF_FLAT index on milvus collection
try:
self.set_collection(collection_name)
default_index = {
"index_type": "IVF_SQ8",
"metric_type": METRIC_TYPE,
"params": {
"nlist": 16384
}
}
status = self.collection.create_index(
field_name="embedding", index_params=default_index)
if not status.code:
LOGGER.debug(
f"Successfully create index in collection:{collection_name} with param:{default_index}"
)
return status
else:
raise Exception(status.message)
except Exception as e:
LOGGER.error(f"Failed to create index: {e}")
sys.exit(1)
def delete_collection(self, collection_name):
# Delete Milvus collection
try:
self.set_collection(collection_name)
self.collection.drop()
LOGGER.debug("Successfully drop collection!")
return "ok"
except Exception as e:
LOGGER.error(f"Failed to drop collection: {e}")
sys.exit(1)
def search_vectors(self, collection_name, vectors, top_k):
# Search vector in milvus collection
try:
self.set_collection(collection_name)
search_params = {
"metric_type": METRIC_TYPE,
"params": {
"nprobe": 16
}
}
res = self.collection.search(
vectors,
anns_field="embedding",
param=search_params,
limit=top_k)
LOGGER.debug(f"Successfully search in collection: {res}")
return res
except Exception as e:
LOGGER.error(f"Failed to search vectors in Milvus: {e}")
sys.exit(1)
def count(self, collection_name):
# Get the number of milvus collection
try:
self.set_collection(collection_name)
num = self.collection.num_entities
LOGGER.debug(
f"Successfully get the num:{num} of the collection:{collection_name}"
)
return num
except Exception as e:
LOGGER.error(f"Failed to count vectors in Milvus: {e}")
sys.exit(1)
| 3,186 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-2cph-fjvp-h5ww",
"modified": "2022-05-01T07:07:56Z",
"published": "2022-05-01T07:07:56Z",
"aliases": [
"CVE-2006-3315"
],
"details": "PHP remote file inclusion vulnerability in page.php in an unspecified RahnemaCo.com product, possibly eShop, allows remote attackers to execute arbitrary PHP code via a URL in the osCsid parameter.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2006-3315"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/27365"
},
{
"type": "WEB",
"url": "http://archives.neohapsis.com/archives/bugtraq/2006-06/0387.html"
},
{
"type": "WEB",
"url": "http://securityreason.com/securityalert/1176"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/18435"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 491 |
14,668 | <gh_stars>1000+
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "device/fido/cros/discovery.h"
#include "base/bind.h"
#include "base/logging.h"
#include "chromeos/dbus/u2f/u2f_client.h"
#include "components/device_event_log/device_event_log.h"
namespace device {
FidoChromeOSDiscovery::FidoChromeOSDiscovery(
base::RepeatingCallback<uint32_t()> generate_request_id_callback,
absl::optional<CtapGetAssertionRequest> get_assertion_request)
: FidoDiscoveryBase(FidoTransportProtocol::kInternal),
generate_request_id_callback_(generate_request_id_callback),
get_assertion_request_(std::move(get_assertion_request)),
weak_factory_(this) {}
FidoChromeOSDiscovery::~FidoChromeOSDiscovery() {}
void FidoChromeOSDiscovery::set_require_power_button_mode(bool require) {
require_power_button_mode_ = require;
}
void FidoChromeOSDiscovery::Start() {
DCHECK(!authenticator_);
if (!observer()) {
return;
}
chromeos::U2FClient::IsU2FServiceAvailable(
base::BindOnce(&FidoChromeOSDiscovery::OnU2FServiceAvailable,
weak_factory_.GetWeakPtr()));
}
void FidoChromeOSDiscovery::OnU2FServiceAvailable(bool u2f_service_available) {
if (!u2f_service_available) {
FIDO_LOG(DEBUG) << "Device does not support ChromeOSAuthenticator";
observer()->DiscoveryStarted(this, /*success=*/false);
return;
}
if (get_assertion_request_) {
ChromeOSAuthenticator::HasLegacyU2fCredentialForGetAssertionRequest(
*get_assertion_request_,
base::BindOnce(&FidoChromeOSDiscovery::OnHasLegacyU2fCredential,
weak_factory_.GetWeakPtr()));
return;
}
CheckAuthenticators();
}
void FidoChromeOSDiscovery::CheckAuthenticators() {
ChromeOSAuthenticator::IsPowerButtonModeEnabled(base::BindOnce(
&FidoChromeOSDiscovery::CheckUVPlatformAuthenticatorAvailable,
weak_factory_.GetWeakPtr()));
}
void FidoChromeOSDiscovery::CheckUVPlatformAuthenticatorAvailable(
bool is_enabled) {
ChromeOSAuthenticator::IsUVPlatformAuthenticatorAvailable(base::BindOnce(
&FidoChromeOSDiscovery::MaybeAddAuthenticator, weak_factory_.GetWeakPtr(),
/*power_button_enabled=*/is_enabled));
}
void FidoChromeOSDiscovery::MaybeAddAuthenticator(bool power_button_enabled,
bool uv_available) {
if (require_power_button_mode_) {
uv_available = false;
}
if (!uv_available && !power_button_enabled) {
observer()->DiscoveryStarted(this, /*success=*/false);
return;
}
authenticator_ = std::make_unique<ChromeOSAuthenticator>(
generate_request_id_callback_,
ChromeOSAuthenticator::Config{
.uv_available = uv_available,
.power_button_enabled = power_button_enabled});
observer()->DiscoveryStarted(this, /*success=*/true, {authenticator_.get()});
}
void FidoChromeOSDiscovery::OnHasLegacyU2fCredential(bool has_credential) {
DCHECK(!authenticator_);
ChromeOSAuthenticator::IsUVPlatformAuthenticatorAvailable(base::BindOnce(
&FidoChromeOSDiscovery::MaybeAddAuthenticator, weak_factory_.GetWeakPtr(),
/*power_button_enabled=*/has_credential));
}
} // namespace device
| 1,252 |
364 | package com.linkedin.dagli.xgboost;
import com.linkedin.dagli.dag.DAG;
import com.linkedin.dagli.dag.DAG1x1;
import com.linkedin.dagli.dag.DAG2x1;
import com.linkedin.dagli.generator.Constant;
import com.linkedin.dagli.math.distribution.DiscreteDistribution;
import com.linkedin.dagli.math.vector.DenseFloatArrayVector;
import com.linkedin.dagli.placeholder.Placeholder;
import com.linkedin.dagli.util.collection.LinkedStack;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
public class XGBoostToyTest {
private static final int FEATURE_COUNT = 5;
private static List<DenseFloatArrayVector> getData(int count, long seed) {
ArrayList<DenseFloatArrayVector> result = new ArrayList<>(count);
Random r = new Random(seed);
for (int i = 0; i < count; i++) {
float[] vec = new float[FEATURE_COUNT];
for (int j = 0; j < vec.length; j++) {
vec[j] = r.nextBoolean() ? 1 : 0;
}
result.add(DenseFloatArrayVector.wrap(vec));
}
return result;
}
private static boolean getLabel(DenseFloatArrayVector vector) {
return vector.norm(1) % 2 == 0;
}
private static List<String> getLabels(List<DenseFloatArrayVector> vectors) {
return vectors.stream().map(vec -> getLabel(vec) ? "YES" : "NO").collect(Collectors.toList());
}
//public static void main(String[] args) {
@Test
public void test() {
Placeholder<DenseFloatArrayVector> vectorPlaceholder = new Placeholder<>();
Placeholder<String> labelPlaceholder = new Placeholder<>();
XGBoostClassification<String> xgBoostClassification =
new XGBoostClassification<String>().withFeaturesInput(vectorPlaceholder).withLabelInput(labelPlaceholder)
.withThreadCount(1);
DAG2x1<String, DenseFloatArrayVector, DiscreteDistribution<String>> dag =
DAG.withPlaceholders(labelPlaceholder, vectorPlaceholder).withOutput(xgBoostClassification);
List<DenseFloatArrayVector> data = getData(100000, 0);
List<String> labels = getLabels(data);
DAG1x1.Prepared<DenseFloatArrayVector, DiscreteDistribution<String>> preparedDAG =
dag.prepare(labels, data).withGeneratorAsInput1(Constant.nullValue());
List<DenseFloatArrayVector> evalData = getData(100000, 1);
for (DenseFloatArrayVector vec : evalData) {
preparedDAG.apply(vec);
}
}
@Test
public void testEarlyStopping() {
Placeholder<DenseFloatArrayVector> vectorPlaceholder = new Placeholder<>();
Placeholder<String> labelPlaceholder = new Placeholder<>();
XGBoostClassification<String> xgBoostClassification =
new XGBoostClassification<String>().withFeaturesInput(vectorPlaceholder).withLabelInput(labelPlaceholder)
.withEarlyStopping(true)
.withRounds(100)
.withThreadCount(1);
DAG2x1<String, DenseFloatArrayVector, DiscreteDistribution<String>> dag =
DAG.withPlaceholders(labelPlaceholder, vectorPlaceholder).withOutput(xgBoostClassification);
List<DenseFloatArrayVector> data = getData(100000, 0);
List<String> labels = getLabels(data);
DAG2x1.Prepared<String, DenseFloatArrayVector, DiscreteDistribution<String>> preparedDAG =
dag.prepare(labels, data);
// Take advantage of the fact that we expect training to end after two rounds, leaving the "version"
// less than 100 (which is actually ~50 rounds).
assertTrue(preparedDAG.producers(XGBoostClassification.Prepared.class)
.map(LinkedStack::peek)
.findFirst()
.get()
.getBooster()
.getVersion() < 100);
}
} | 1,353 |
518 | <reponame>e-ntro-py/desktop-app
{
"name": "KanbanFlow",
"category": "Task & Project Management",
"start_url": "https://kanbanflow.com/login",
"icons": [
{
"src": "https://cdn.filestackcontent.com/Nycf9amQw6cwBfl3eT3M"
},
{
"src": "https://cdn.filestackcontent.com/2YYMJoHeTQGFW4M7QJZj",
"platform": "browserx"
}
],
"theme_color": "#ff7c03",
"scope": "https://kanbanflow.com"
}
| 206 |
1,014 | <reponame>MobileAir/FinanceDatabase
[
"Basic Materials",
"Communication Services",
"Consumer Cyclical",
"Energy",
"Financial",
"Financial Services",
"Healthcare",
"Industrials",
"Real Estate",
"Technology",
"Utilities"
] | 115 |
931 | <reponame>siddhi-244/CompetitiveProgrammingQuestionBank
// C++ program to demonstrate Hierarchical Inheritance
// Hierarchical Inheritance - It is defined as the inheritance in which more that one derived classes
// are inherited from a single base class.
#include<bits/stdc++.h>
using namespace std;
// base class
class Animal{
public:
void eat(){
cout<<"I can eat!"<<endl;
}
void sleep(){
cout<<"I can sleep!"<<endl;
}
};
// derived class 1
class Cat:public Animal{
public:
void meow(){
cout<<"I can meow! meow! meow!"<<endl;
}
};
// derived class 2
class Cow:public Animal{
public:
void moo(){
cout<<"I can moo! moo! moo!"<<endl;
}
};
int main(){
// Create object of Cat class
Cat cat1;
cout<<"Cat Class:"<<endl;
//Calling members of the base class
cat1.eat();
cat1.sleep();
//Calling members of the derived class
cat1.meow();
// Create object of Cow class
Cow cow1;
cout<<"\nCow Class:"<<endl;
//Calling members of the base class
cow1.eat();
cow1.sleep();
//Calling members of the derived class
cow1.moo();
return 0;
} | 465 |
2,039 | /*-
*
* * Copyright 2015 Skymind,Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use
* this file except in compliance with the License. * You may obtain a copy of the License at * *
* http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. * See the License for the specific language governing permissions and * limitations under
* the License.
*
*
*/
package org.nd4j.context;
import lombok.extern.slf4j.Slf4j;
import java.io.IOException;
import java.io.InputStream;
import java.io.Serializable;
import java.util.Properties;
/**
* Holds properties for nd4j to be used across different modules
*
* @author <NAME>
*/
@Slf4j
public class Nd4jContext implements Serializable {
private Properties conf;
private static Nd4jContext INSTANCE = new Nd4jContext();
private Nd4jContext() {
conf = new Properties();
conf.putAll(System.getProperties());
}
public static Nd4jContext getInstance() {
return INSTANCE;
}
/**
* Load the additional properties from an input stream and load all system properties
*
* @param inputStream
*/
public void updateProperties(InputStream inputStream) {
try {
conf.load(inputStream);
conf.putAll(System.getProperties());
} catch (IOException e) {
log.warn("Error loading system properties from input stream", e);
}
}
/**
* Get the configuration for nd4j
*
* @return
*/
public Properties getConf() {
return conf;
}
}
| 620 |
458 | <reponame>niranjanchintu/hyscale
/**
* Copyright 2019 <NAME>, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hyscale.commons.utils;
import java.util.Base64;
import org.apache.commons.lang3.StringUtils;
import io.hyscale.commons.constants.ToolConstants;
import io.hyscale.commons.models.Credentials;
public class EncodeDecodeUtil {
private EncodeDecodeUtil() {
}
private static final char DEFAULT_SEPARATOR = ':';
public static String getEncodedCredentials(String userName, String password) {
if (StringUtils.isBlank(userName) || StringUtils.isBlank(password)) {
return null;
}
String tokenString = userName + ToolConstants.COLON + password;
return Base64.getEncoder().encodeToString(tokenString.getBytes());
}
/**
*
* @param encodedCredentials should be base64 encoding of username + separator + password
* @param separator
* @return Credentials
*/
public static Credentials getDecodedCredentials(String encodedCredentials, char separator) {
if (StringUtils.isBlank(encodedCredentials)) {
return null;
}
Credentials credentials = new Credentials();
String decodedAuth = EncodeDecodeUtil.decode(encodedCredentials);
int delimiter = decodedAuth.indexOf(separator);
if (delimiter > 0) {
credentials.setUsername(decodedAuth.substring(0,delimiter));
credentials.setPassword(decodedAuth.substring(delimiter+1));
}
return credentials;
}
public static Credentials getDecodedCredentials(String encodedCredentials) {
return getDecodedCredentials(encodedCredentials, DEFAULT_SEPARATOR);
}
public static String decode(String encodedString) {
if (StringUtils.isBlank(encodedString)) {
return encodedString;
}
return new String(Base64.getDecoder().decode(encodedString));
}
}
| 908 |
307 | <reponame>davoustp/classloader-leak-prevention<gh_stars>100-1000
package se.jiderhamn.classloader.leak.prevention.cleanup;
import java.lang.management.ManagementFactory;
import javax.management.Notification;
import javax.management.NotificationEmitter;
import javax.management.NotificationListener;
/**
* Test case for {@link MXBeanNotificationListenersCleanUp}
* @author <NAME>
*/
public class MXBeanNotificationListenersCleanUpTest extends ClassLoaderPreMortemCleanUpTestBase<MXBeanNotificationListenersCleanUp> {
@Override
protected void triggerLeak() throws Exception {
((NotificationEmitter) ManagementFactory.getMemoryMXBean()).addNotificationListener(
new CustomNotificationListener(), null, null);
}
static class CustomNotificationListener implements NotificationListener {
@Override
public void handleNotification(Notification notification, Object handback) {
}
}
} | 276 |
700 | /* $OpenBSD: msg.c,v 1.20 2020/10/18 11:32:01 djm Exp $ */
/*
* Copyright (c) 2002 <NAME>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "includes.h"
#include <sys/types.h>
#include <sys/uio.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <stdarg.h>
#include "sshbuf.h"
#include "ssherr.h"
#include "log.h"
#include "atomicio.h"
#include "msg.h"
#include "misc.h"
int
ssh_msg_send(int fd, u_char type, struct sshbuf *m)
{
u_char buf[5];
u_int mlen = sshbuf_len(m);
debug3_f("type %u", (unsigned int)type & 0xff);
put_u32(buf, mlen + 1);
buf[4] = type; /* 1st byte of payload is mesg-type */
if (atomicio(vwrite, fd, buf, sizeof(buf)) != sizeof(buf)) {
error_f("write: %s", strerror(errno));
return (-1);
}
if (atomicio(vwrite, fd, sshbuf_mutable_ptr(m), mlen) != mlen) {
error_f("write: %s", strerror(errno));
return (-1);
}
return (0);
}
int
ssh_msg_recv(int fd, struct sshbuf *m)
{
u_char buf[4], *p;
u_int msg_len;
int r;
debug3("ssh_msg_recv entering");
if (atomicio(read, fd, buf, sizeof(buf)) != sizeof(buf)) {
if (errno != EPIPE)
error_f("read header: %s", strerror(errno));
return (-1);
}
msg_len = get_u32(buf);
if (msg_len > sshbuf_max_size(m)) {
error_f("read: bad msg_len %u", msg_len);
return (-1);
}
sshbuf_reset(m);
if ((r = sshbuf_reserve(m, msg_len, &p)) != 0) {
error_fr(r, "reserve");
return -1;
}
if (atomicio(read, fd, p, msg_len) != msg_len) {
error_f("read: %s", strerror(errno));
return (-1);
}
return (0);
}
| 1,063 |
363 | /* Copyright 2013 predic8 GmbH, www.predic8.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package com.predic8.membrane.core.interceptor.oauth2client;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.predic8.membrane.annot.MCAttribute;
import com.predic8.membrane.annot.MCChildElement;
import com.predic8.membrane.annot.MCElement;
import com.predic8.membrane.core.Constants;
import com.predic8.membrane.core.Router;
import com.predic8.membrane.core.exchange.AbstractExchange;
import com.predic8.membrane.core.exchange.Exchange;
import com.predic8.membrane.core.exchange.snapshots.AbstractExchangeSnapshot;
import com.predic8.membrane.core.http.Header;
import com.predic8.membrane.core.http.Request;
import com.predic8.membrane.core.http.Response;
import com.predic8.membrane.core.interceptor.AbstractInterceptorWithSession;
import com.predic8.membrane.core.interceptor.LogInterceptor;
import com.predic8.membrane.core.interceptor.Outcome;
import com.predic8.membrane.core.interceptor.oauth2.OAuth2AnswerParameters;
import com.predic8.membrane.core.interceptor.oauth2.OAuth2Statistics;
import com.predic8.membrane.core.interceptor.oauth2.ParamNames;
import com.predic8.membrane.core.interceptor.oauth2.authorizationservice.AuthorizationService;
import com.predic8.membrane.core.interceptor.oauth2.tokengenerators.JwtGenerator;
import com.predic8.membrane.core.interceptor.server.WebServerInterceptor;
import com.predic8.membrane.core.interceptor.session.Session;
import com.predic8.membrane.core.interceptor.session.SessionManager;
import com.predic8.membrane.core.rules.RuleKey;
import com.predic8.membrane.core.util.URIFactory;
import com.predic8.membrane.core.util.URLParamUtil;
import com.predic8.membrane.core.util.Util;
import org.apache.commons.codec.binary.Base64;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Required;
import javax.annotation.concurrent.GuardedBy;
import java.io.IOException;
import java.math.BigInteger;
import java.security.SecureRandom;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
* @description Allows only authorized HTTP requests to pass through. Unauthorized requests get a redirect to the
* authorization server as response.
* @topic 6. Security
*/
@MCElement(name = "oauth2Resource2")
public class OAuth2Resource2Interceptor extends AbstractInterceptorWithSession {
public static final String OAUTH2_ANSWER = "oauth2Answer";
public static final String OA2REDIRECT = "oa2redirect";
public static final String OA2REDIRECT_PREFIX = "_redirect_for_oa2redirect_";
private static Logger log = LoggerFactory.getLogger(OAuth2Resource2Interceptor.class.getName());
private final Cache<String, Object> synchronizers = CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.MINUTES).build();
@GuardedBy("publicURLs")
private List<String> publicURLs = new ArrayList<>();
private AuthorizationService auth;
private OAuth2Statistics statistics;
private Cache<String,Boolean> validTokens = CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).build();
private int revalidateTokenAfter = -1;
private WebServerInterceptor wsi;
private URIFactory uriFactory;
private boolean firstInitWhenDynamicAuthorizationService;
private boolean initPublicURLsOnTheFly = false;
private OriginalExchangeStore originalExchangeStore;
@Override
public void init() throws Exception {
super.init();
if (originalExchangeStore == null)
originalExchangeStore = new CookieOriginialExchangeStore();
}
public String getPublicURL() {
synchronized (publicURLs) {
return String.join(" ", publicURLs);
}
}
@MCAttribute
public void setPublicURL(String publicURL) {
synchronized (publicURLs) {
publicURLs.clear();
for (String url : publicURL.split("[ \t]+"))
publicURLs.add(url);
}
}
public AuthorizationService getAuthService() {
return auth;
}
@Required
@MCChildElement(order = 10)
public void setAuthService(AuthorizationService auth) {
this.auth = auth;
}
public int getRevalidateTokenAfter() {
return revalidateTokenAfter;
}
/**
* @description time in seconds until a oauth2 access token is revalidatet with authorization server. This is disabled for values < 0
* @default -1
*/
@MCAttribute
public void setRevalidateTokenAfter(int revalidateTokenAfter) {
this.revalidateTokenAfter = revalidateTokenAfter;
}
@Override
public void init(Router router) throws Exception {
name = "OAuth 2 Client";
setFlow(Flow.Set.REQUEST_RESPONSE);
super.init(router);
auth.init(router);
statistics = new OAuth2Statistics();
uriFactory = router.getUriFactory();
synchronized (publicURLs) {
if (publicURLs.size() == 0)
initPublicURLsOnTheFly = true;
else for (int i = 0; i < publicURLs.size(); i++)
publicURLs.set(i, normalizePublicURL(publicURLs.get(i)));
}
firstInitWhenDynamicAuthorizationService = getAuthService().supportsDynamicRegistration();
if(!getAuthService().supportsDynamicRegistration())
firstInitWhenDynamicAuthorizationService = false;
}
@Override
public final Outcome handleRequestInternal(Exchange exc) throws Exception {
return handleRequestInternal2(exc);
}
private Outcome handleRequestInternal2(Exchange exc) throws Exception {
if(isFaviconRequest(exc)){
exc.setResponse(Response.badRequest().build());
return Outcome.RETURN;
}
Session session = getSessionManager().getSession(exc);
simplifyMultipleOAuth2Answers(session);
if(isOAuth2RedirectRequest(exc))
handleOriginalRequest(exc);
if (session == null || !session.isVerified()) {
String auth = exc.getRequest().getHeader().getFirstValue(Header.AUTHORIZATION);
if (auth != null && auth.substring(0, 7).equalsIgnoreCase("Bearer ")) {
session = getSessionManager().getSession(exc);
session.put(ParamNames.ACCESS_TOKEN, auth.substring(7));
OAuth2AnswerParameters oauth2Answer = new OAuth2AnswerParameters();
oauth2Answer.setAccessToken(auth.substring(7));
oauth2Answer.setTokenType("Bearer");
HashMap<String, String> userinfo = revalidateToken(oauth2Answer);
if (userinfo == null) {
log.debug("userinfo is null, redirecting.");
return respondWithRedirect(exc);
}
oauth2Answer.setUserinfo(userinfo);
session.put(OAUTH2_ANSWER,oauth2Answer.serialize());
processUserInfo(userinfo, session);
}
}
if (session == null) {
log.debug("session is null, redirecting.");
return respondWithRedirect(exc);
}
if (session.get(OAUTH2_ANSWER) != null && tokenNeedsRevalidation(session.get(ParamNames.ACCESS_TOKEN))) {
if (revalidateToken(OAuth2AnswerParameters.deserialize(session.get(OAUTH2_ANSWER))) == null)
session.clear();
}
if(session.get(OAUTH2_ANSWER) != null)
exc.setProperty(Exchange.OAUTH2, OAuth2AnswerParameters.deserialize(session.get(OAUTH2_ANSWER)));
if (refreshingOfAccessTokenIsNeeded(session)) {
synchronized (getTokenSynchronizer(session)) {
try {
refreshAccessToken(session);
exc.setProperty(Exchange.OAUTH2, OAuth2AnswerParameters.deserialize(session.get(OAUTH2_ANSWER)));
} catch (Exception e) {
log.warn("Failed to refresh access token, clearing session and restarting OAuth2 flow.", e);
session.clearAuthentication();
}
}
}
if (session.isVerified()) {
applyBackendAuthorization(exc, session);
statistics.successfulRequest();
return Outcome.CONTINUE;
}
if (handleRequest(exc, getPublicURL(exc), session)) {
if(exc.getResponse() == null && exc.getRequest() != null && session.isVerified() && session.get().containsKey(OAUTH2_ANSWER)) {
exc.setProperty(Exchange.OAUTH2, OAuth2AnswerParameters.deserialize(session.get(OAUTH2_ANSWER)));
return Outcome.CONTINUE;
}
if (exc.getResponse().getStatusCode() >= 400)
session.clear();
return Outcome.RETURN;
}
log.debug("session present, but not verified, redirecting.");
return respondWithRedirect(exc);
}
/**
* Tries to avoid very long cookies by dropping all OAUTH2_ANSWERS except the first one.
*
* (The SessionManager.mergeCookies produces a value with "{...answer1...},{...answer2...}".
* We locate the ',' in between the JSON objects and split the string.)
*/
private void simplifyMultipleOAuth2Answers(Session session) {
if (session == null)
return;
String answer = session.get(OAUTH2_ANSWER);
if (answer == null)
return;
int indexOfTopLevelComma = getIndexOfTopLevelComma(answer);
if (indexOfTopLevelComma == -1)
return;
answer = answer.substring(0, indexOfTopLevelComma);
session.put(OAUTH2_ANSWER, answer);
}
private int getIndexOfTopLevelComma(String answer) {
int curlyBraceLevel = 0;
boolean inString = false;
boolean escapeNext = false;
for (int i = 0; i < answer.length(); i++) {
if (escapeNext) {
escapeNext = false;
continue;
}
char c = answer.charAt(i);
if (inString) {
switch (c) {
case '\"':
inString = false;
break;
case '\\':
escapeNext = true;
break;
}
} else {
switch (c) {
case '{':
curlyBraceLevel++;
break;
case '}':
curlyBraceLevel--;
break;
case ',':
if (curlyBraceLevel == 0)
return i;
break;
case '"':
inString = true;
break;
}
}
}
return -1;
}
private Object getTokenSynchronizer(Session session) {
OAuth2AnswerParameters oauth2Params = null;
try {
oauth2Params = OAuth2AnswerParameters.deserialize(session.get(OAUTH2_ANSWER));
} catch (IOException e) {
throw new RuntimeException(e);
}
String rt = oauth2Params.getRefreshToken();
if (rt == null)
return new Object();
try {
return synchronizers.get(rt, () -> new Object());
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
private void handleOriginalRequest(Exchange exc) throws Exception {
Map<String, String> params = URLParamUtil.getParams(uriFactory, exc);
String oa2redirect = params.get(OA2REDIRECT);
Session session = getSessionManager().getSession(exc);
AbstractExchange originalExchange = new ObjectMapper().readValue(session.get(oa2redictKeyNameInSession(oa2redirect)).toString(),AbstractExchangeSnapshot.class).toAbstractExchange();
session.remove(oa2redictKeyNameInSession(oa2redirect));
doOriginalRequest(exc,originalExchange);
}
private boolean isOAuth2RedirectRequest(Exchange exc) {
return exc.getOriginalRequestUri().contains(OA2REDIRECT);
}
private void refreshAccessToken(Session session) throws Exception {
if(!refreshingOfAccessTokenIsNeeded(session))
return;
OAuth2AnswerParameters oauth2Params = OAuth2AnswerParameters.deserialize(session.get(OAUTH2_ANSWER));
Exchange refreshTokenExchange = new Request.Builder()
.post(auth.getTokenEndpoint())
.header(Header.CONTENT_TYPE, "application/x-www-form-urlencoded")
.header(Header.ACCEPT, "application/json")
.header(Header.USER_AGENT, Constants.USERAGENT)
.header(Header.AUTHORIZATION, "Basic " + new String(Base64.encodeBase64((auth.getClientId() + ":" + auth.getClientSecret()).getBytes())))
.body("&grant_type=refresh_token"
+ "&refresh_token=" + oauth2Params.getRefreshToken())
.buildExchange();
Response refreshTokenResponse = auth.doRequest(refreshTokenExchange);
if (!refreshTokenResponse.isOk()) {
refreshTokenResponse.getBody().read();
throw new RuntimeException("Statuscode from authorization server for refresh token request: " + refreshTokenResponse.getStatusCode());
}
HashMap<String, String> json = Util.parseSimpleJSONResponse(refreshTokenResponse);
if (json.get("access_token") == null || json.get("refresh_token") == null) {
refreshTokenResponse.getBody().read();
throw new RuntimeException("Statuscode was ok but no access_token and refresh_token was received: " + refreshTokenResponse.getStatusCode());
}
oauth2Params.setAccessToken(json.get("access_token"));
oauth2Params.setRefreshToken(json.get("refresh_token"));
oauth2Params.setExpiration(json.get("expires_in"));
LocalDateTime now = LocalDateTime.now();
oauth2Params.setReceivedAt(now.withSecond(now.getSecond() / 30 * 30).withNano(0));
if (json.containsKey("id_token")) {
if (idTokenIsValid(json.get("id_token")))
oauth2Params.setIdToken(json.get("id_token"));
else
oauth2Params.setIdToken("INVALID");
}
session.put(OAUTH2_ANSWER, oauth2Params.serialize());
}
private boolean refreshingOfAccessTokenIsNeeded(Session session) throws IOException {
if(session.get(OAUTH2_ANSWER) == null)
return false;
OAuth2AnswerParameters oauth2Params = OAuth2AnswerParameters.deserialize(session.get(OAUTH2_ANSWER));
if(oauth2Params.getRefreshToken() == null || oauth2Params.getRefreshToken().isEmpty() || oauth2Params.getExpiration() == null || oauth2Params.getExpiration().isEmpty())
return false;
return LocalDateTime.now().isAfter(oauth2Params.getReceivedAt().plusSeconds(Long.parseLong(oauth2Params.getExpiration())).minusSeconds(5)); // refresh token 5 seconds before expiration
}
private HashMap<String, String> revalidateToken(OAuth2AnswerParameters params) throws Exception {
Exchange e2 = new Request.Builder()
.get(auth.getUserInfoEndpoint())
.header("Authorization", params.getTokenType() + " " + params.getAccessToken())
.header("User-Agent", Constants.USERAGENT)
.header(Header.ACCEPT, "application/json")
.buildExchange();
Response response2 = auth.doRequest(e2);
if (response2.getStatusCode() != 200) {
statistics.accessTokenInvalid();
return null;
} else {
statistics.accessTokenValid();
return Util.parseSimpleJSONResponse(response2);
}
}
private boolean tokenNeedsRevalidation(String token) {
if(revalidateTokenAfter < 0)
return false;
return validTokens.getIfPresent(token) == null;
}
@Override
protected Outcome handleResponseInternal(Exchange exc) throws Exception {
return Outcome.CONTINUE;
}
private String getPublicURL(Exchange exc) throws Exception {
String xForwardedProto = exc.getRequest().getHeader().getFirstValue(Header.X_FORWARDED_PROTO);
boolean isHTTPS = xForwardedProto != null ? "https".equals(xForwardedProto) : exc.getRule().getSslInboundContext() != null;
String publicURL = (isHTTPS ? "https://" : "http://") + exc.getOriginalHostHeader();
RuleKey key = exc.getRule().getKey();
if (!key.isPathRegExp() && key.getPath() != null)
publicURL += key.getPath();
publicURL = normalizePublicURL(publicURL);
synchronized (publicURLs) {
if (publicURLs.contains(publicURL))
return publicURL;
if (!initPublicURLsOnTheFly)
return publicURLs.get(0);
}
String newURL = null;
if(initPublicURLsOnTheFly)
newURL = addPublicURL(publicURL);
if(firstInitWhenDynamicAuthorizationService && newURL != null)
getAuthService().dynamicRegistration(exc, getPublicURLs());
return publicURL;
}
/**
* @return the new public URL, if a new one was added. null if the URL is not new.
*/
private String addPublicURL(String publicURL) {
synchronized (publicURLs) {
if (publicURLs.contains(publicURL))
return null;
publicURLs.add(publicURL);
}
return publicURL;
}
private List<String> getPublicURLs() {
synchronized(publicURLs) {
return new ArrayList<>(publicURLs);
}
}
private String normalizePublicURL(String url) {
if(!url.endsWith("/"))
url += "/";
return url;
}
private boolean isFaviconRequest(Exchange exc) {
return exc.getRequestURI().startsWith("/favicon.ico");
}
private void applyBackendAuthorization(Exchange exc, Session s) {
Header h = exc.getRequest().getHeader();
for (Map.Entry<String, Object> e : s.get().entrySet())
if (e.getKey().startsWith("header")) {
String headerName = e.getKey().substring(6);
h.removeFields(headerName);
h.add(headerName, e.getValue().toString());
}
}
private Outcome respondWithRedirect(Exchange exc) throws Exception {
String state = new BigInteger(130, new SecureRandom()).toString(32);
exc.setResponse(Response.redirect(auth.getLoginURL(state, getPublicURL(exc), exc.getRequestURI()),false).build());
readBodyFromStreamIntoMemory(exc);
Session session = getSessionManager().getSession(exc);
originalExchangeStore.store(exc, session, state, exc);
if(session.get().containsKey(ParamNames.STATE))
state = session.get(ParamNames.STATE) + SessionManager.SESSION_VALUE_SEPARATOR + state;
session.put(ParamNames.STATE,state);
return Outcome.RETURN;
}
private String oa2redictKeyNameInSession(String oa2redirect) {
return OA2REDIRECT_PREFIX + oa2redirect;
}
private void readBodyFromStreamIntoMemory(Exchange exc) {
exc.getRequest().getBodyAsStringDecoded();
}
public boolean handleRequest(Exchange exc, String publicURL, Session session) throws Exception {
String path = uriFactory.create(exc.getDestinations().get(0)).getPath();
if(path == null)
return false;
if(path.endsWith("/oauth2callback")) {
try {
Map<String, String> params = URLParamUtil.getParams(uriFactory, exc);
String state2 = params.get("state");
String stateFromUri = getSecurityTokenFromState(state2);
if(!csrfTokenMatches(session, stateFromUri))
throw new RuntimeException("CSRF token mismatch.");
// state in session can be "merged" -> save the selected state in session overwriting the possibly merged value
session.put(ParamNames.STATE,stateFromUri);
AbstractExchangeSnapshot originalRequest = originalExchangeStore.reconstruct(exc, session, stateFromUri);
String url = originalRequest.getRequest().getUri();
if (url == null)
url = "/";
originalExchangeStore.remove(exc, session, stateFromUri);
if (log.isDebugEnabled())
log.debug("CSRF token match.");
String code = params.get("code");
if (code == null)
throw new RuntimeException("No code received.");
Exchange e = new Request.Builder()
.post(auth.getTokenEndpoint())
.header(Header.CONTENT_TYPE, "application/x-www-form-urlencoded")
.header(Header.ACCEPT, "application/json")
.header(Header.USER_AGENT, Constants.USERAGENT)
.header(Header.AUTHORIZATION, "Basic " + new String(Base64.encodeBase64((auth.getClientId() + ":" + auth.getClientSecret()).getBytes())))
.body("code=" + code
+ "&redirect_uri=" + publicURL + "oauth2callback"
+ "&grant_type=authorization_code")
.buildExchange();
LogInterceptor logi = null;
if (log.isDebugEnabled()) {
logi = new LogInterceptor();
logi.setHeaderOnly(false);
logi.handleRequest(e);
}
Response response = auth.doRequest(e);
if (response.getStatusCode() != 200) {
response.getBody().read();
throw new RuntimeException("Authorization server returned " + response.getStatusCode() + ".");
}
if (log.isDebugEnabled())
logi.handleResponse(e);
HashMap<String, String> json = Util.parseSimpleJSONResponse(response);
if (!json.containsKey("access_token"))
throw new RuntimeException("No access_token received.");
String token = (String) json.get("access_token"); // and also "scope": "", "token_type": "bearer"
OAuth2AnswerParameters oauth2Answer = new OAuth2AnswerParameters();
session.put("access_token",token); // saving for logout
oauth2Answer.setAccessToken(token);
oauth2Answer.setTokenType(json.get("token_type"));
oauth2Answer.setExpiration(json.get("expires_in"));
oauth2Answer.setRefreshToken(json.get("refresh_token"));
LocalDateTime now = LocalDateTime.now();
oauth2Answer.setReceivedAt(now.withSecond(now.getSecond() / 30 * 30).withNano(0));
if(json.containsKey("id_token")) {
if (idTokenIsValid(json.get("id_token")))
oauth2Answer.setIdToken(json.get("id_token"));
else
oauth2Answer.setIdToken("INVALID");
}
validTokens.put(token,true);
Exchange e2 = new Request.Builder()
.get(auth.getUserInfoEndpoint())
.header("Authorization", json.get("token_type") + " " + token)
.header("User-Agent", Constants.USERAGENT)
.header(Header.ACCEPT, "application/json")
.buildExchange();
if (log.isDebugEnabled()) {
logi.setHeaderOnly(false);
logi.handleRequest(e2);
}
Response response2 = auth.doRequest(e2);
if (log.isDebugEnabled())
logi.handleResponse(e2);
if (response2.getStatusCode() != 200) {
statistics.accessTokenInvalid();
throw new RuntimeException("User data could not be retrieved.");
}
statistics.accessTokenValid();
HashMap<String, String> json2 = Util.parseSimpleJSONResponse(response2);
oauth2Answer.setUserinfo(json2);
session.put(OAUTH2_ANSWER,oauth2Answer.serialize());
processUserInfo(json2, session);
doRedirect(exc,originalRequest);
originalExchangeStore.postProcess(exc);
return true;
} catch (Exception e) {
e.printStackTrace();
exc.setResponse(Response.badRequest().body(e.getMessage()).build());
originalExchangeStore.postProcess(exc);
return true;
}
}
return false;
}
private String getSecurityTokenFromState(String state2) {
if (state2 == null)
throw new RuntimeException("No CSRF token.");
Map<String, String> param = URLParamUtil.parseQueryString(state2);
if (param == null || !param.containsKey("security_token"))
throw new RuntimeException("No CSRF token.");
return param.get("security_token");
}
private boolean csrfTokenMatches(Session session, String state2) {
Optional<Object> sessionRaw = Optional.ofNullable(session.get(ParamNames.STATE));
if(!sessionRaw.isPresent())
return false;
return Arrays
.asList(sessionRaw.get().toString().split(SessionManager.SESSION_VALUE_SEPARATOR))
.stream()
.filter(s -> s.equals(state2))
.count() == 1;
}
private void doRedirect(Exchange exc, AbstractExchangeSnapshot originalRequest) throws JsonProcessingException {
if(originalRequest.getRequest().getMethod().equals("GET")){
exc.setResponse(Response.redirect(originalRequest.getOriginalRequestUri(),false).build());
}else {
String oa2redirect = new BigInteger(130, new SecureRandom()).toString(32);
Session session = getSessionManager().getSession(exc);
session.put(oa2redictKeyNameInSession(oa2redirect),new ObjectMapper().writeValueAsString(originalRequest));
String delimiter = originalRequest.getOriginalRequestUri().contains("?") ? "&" : "?";
exc.setResponse(Response.redirect(originalRequest.getOriginalRequestUri() + delimiter + OA2REDIRECT + "=" + oa2redirect, false).build());
}
}
private void doOriginalRequest(Exchange exc, AbstractExchange originalRequest) throws Exception {
originalRequest.getRequest().getHeader().add("Cookie",exc.getRequest().getHeader().getFirstValue("Cookie"));
exc.setRequest(originalRequest.getRequest());
exc.getDestinations().clear();
String xForwardedProto = originalRequest.getRequest().getHeader().getFirstValue(Header.X_FORWARDED_PROTO);
String xForwardedHost = originalRequest.getRequest().getHeader().getFirstValue(Header.X_FORWARDED_HOST);
String originalRequestUri = originalRequest.getOriginalRequestUri();
exc.getDestinations().add(xForwardedProto + "://" + xForwardedHost + originalRequestUri);
exc.setOriginalRequestUri(originalRequestUri);
exc.setOriginalHostHeader(xForwardedHost);
}
private void processUserInfo(Map<String, String> userInfo, Session session) {
if (!userInfo.containsKey(auth.getSubject()))
throw new RuntimeException("User object does not contain " + auth.getSubject() + " key.");
Map<String, Object> userAttributes = session.get();
String userIdPropertyFixed = auth.getSubject().substring(0, 1).toUpperCase() + auth.getSubject().substring(1);
String username = userInfo.get(auth.getSubject());
userAttributes.put("headerX-Authenticated-" + userIdPropertyFixed, username);
session.authorize(username);
}
private boolean idTokenIsValid(String idToken) throws Exception {
//TODO maybe change this to return claims and also save them in the oauth2AnswerParameters
try {
JwtGenerator.getClaimsFromSignedIdToken(idToken, getAuthService().getIssuer(), getAuthService().getClientId(), getAuthService().getJwksEndpoint(), auth);
return true;
}catch(Exception e){
return false;
}
}
@Override
public String getShortDescription() {
return "Client of the oauth2 authentication process.\n" + statistics.toString();
}
public OriginalExchangeStore getOriginalExchangeStore() {
return originalExchangeStore;
}
@MCChildElement(order=20)
public void setOriginalExchangeStore(OriginalExchangeStore originalExchangeStore) {
this.originalExchangeStore = originalExchangeStore;
}
}
| 12,769 |
10,225 | package org.acme;
import io.quarkus.test.junit.QuarkusTest;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import static io.restassured.RestAssured.given;
import static org.hamcrest.CoreMatchers.is;
@Tag("foo")
@QuarkusTest
public class HelloResourceFooTest {
@Test
void testHelloEndpoint() throws Exception {
for (int i = 0; i < 5; i++) {
if (i > 0) {
Thread.sleep(1_000L); // Make it wait to cause build conflicts
}
given()
.when().get("/hello")
.then()
.statusCode(200)
.body(is("Hello foo 2"));
}
}
}
| 334 |
1,069 | <reponame>Leopere/django-th
from .feedsservice import Feeds
| 24 |
14,668 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "services/shape_detection/text_detection_impl_win.h"
#include <windows.foundation.collections.h>
#include <windows.globalization.h>
#include <memory>
#include <string>
#include "base/bind.h"
#include "base/logging.h"
#include "base/win/core_winrt_util.h"
#include "base/win/post_async_results.h"
#include "base/win/scoped_hstring.h"
#include "base/win/windows_version.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "mojo/public/cpp/bindings/self_owned_receiver.h"
#include "services/shape_detection/detection_utils_win.h"
#include "services/shape_detection/text_detection_impl.h"
#include "ui/gfx/geometry/rect_f.h"
namespace shape_detection {
using ABI::Windows::Foundation::IAsyncOperation;
using ABI::Windows::Foundation::Collections::IVectorView;
using ABI::Windows::Globalization::ILanguageFactory;
using ABI::Windows::Graphics::Imaging::ISoftwareBitmap;
using ABI::Windows::Graphics::Imaging::ISoftwareBitmapStatics;
using ABI::Windows::Media::Ocr::IOcrEngine;
using ABI::Windows::Media::Ocr::IOcrEngineStatics;
using ABI::Windows::Media::Ocr::IOcrLine;
using ABI::Windows::Media::Ocr::IOcrResult;
using ABI::Windows::Media::Ocr::IOcrWord;
using ABI::Windows::Media::Ocr::OcrLine;
using ABI::Windows::Media::Ocr::OcrResult;
using ABI::Windows::Media::Ocr::OcrWord;
using base::win::GetActivationFactory;
using base::win::ScopedHString;
using Microsoft::WRL::ComPtr;
// static
void TextDetectionImpl::Create(
mojo::PendingReceiver<mojom::TextDetection> receiver) {
// OcrEngine class is only available in Win 10 onwards (v10.0.10240.0) that
// documents in
// https://docs.microsoft.com/en-us/uwp/api/windows.media.ocr.ocrengine.
if (base::win::GetVersion() < base::win::Version::WIN10) {
DVLOG(1) << "Optical character recognition not supported before Windows 10";
return;
}
DCHECK_GE(base::win::OSInfo::GetInstance()->version_number().build, 10240u);
// Loads functions dynamically at runtime to prevent library dependencies.
if (!(base::win::ResolveCoreWinRTDelayload() &&
ScopedHString::ResolveCoreWinRTStringDelayload())) {
DLOG(ERROR) << "Failed loading functions from combase.dll";
return;
}
// Text Detection specification only supports Latin-1 text as documented in
// https://wicg.github.io/shape-detection-api/text.html#text-detection-api.
// TODO(junwei.fu): https://crbug.com/794097 consider supporting other Latin
// script language.
ScopedHString language_hstring = ScopedHString::Create("en");
if (!language_hstring.is_valid())
return;
ComPtr<ILanguageFactory> language_factory;
HRESULT hr =
GetActivationFactory<ILanguageFactory,
RuntimeClass_Windows_Globalization_Language>(
&language_factory);
if (FAILED(hr)) {
DLOG(ERROR) << "ILanguage factory failed: "
<< logging::SystemErrorCodeToString(hr);
return;
}
ComPtr<ABI::Windows::Globalization::ILanguage> language;
hr = language_factory->CreateLanguage(language_hstring.get(), &language);
if (FAILED(hr)) {
DLOG(ERROR) << "Create language failed: "
<< logging::SystemErrorCodeToString(hr);
return;
}
ComPtr<IOcrEngineStatics> engine_factory;
hr = GetActivationFactory<IOcrEngineStatics,
RuntimeClass_Windows_Media_Ocr_OcrEngine>(
&engine_factory);
if (FAILED(hr)) {
DLOG(ERROR) << "IOcrEngineStatics factory failed: "
<< logging::SystemErrorCodeToString(hr);
return;
}
boolean is_supported = false;
hr = engine_factory->IsLanguageSupported(language.Get(), &is_supported);
if (FAILED(hr) || !is_supported)
return;
ComPtr<IOcrEngine> ocr_engine;
hr = engine_factory->TryCreateFromLanguage(language.Get(), &ocr_engine);
if (FAILED(hr)) {
DLOG(ERROR) << "Create engine failed from language: "
<< logging::SystemErrorCodeToString(hr);
return;
}
ComPtr<ISoftwareBitmapStatics> bitmap_factory;
hr = GetActivationFactory<
ISoftwareBitmapStatics,
RuntimeClass_Windows_Graphics_Imaging_SoftwareBitmap>(&bitmap_factory);
if (FAILED(hr)) {
DLOG(ERROR) << "ISoftwareBitmapStatics factory failed: "
<< logging::SystemErrorCodeToString(hr);
return;
}
auto impl = std::make_unique<TextDetectionImplWin>(std::move(ocr_engine),
std::move(bitmap_factory));
auto* impl_ptr = impl.get();
impl_ptr->SetReceiver(
mojo::MakeSelfOwnedReceiver(std::move(impl), std::move(receiver)));
}
TextDetectionImplWin::TextDetectionImplWin(
ComPtr<IOcrEngine> ocr_engine,
ComPtr<ISoftwareBitmapStatics> bitmap_factory)
: ocr_engine_(std::move(ocr_engine)),
bitmap_factory_(std::move(bitmap_factory)) {
DCHECK(ocr_engine_);
DCHECK(bitmap_factory_);
}
TextDetectionImplWin::~TextDetectionImplWin() = default;
void TextDetectionImplWin::Detect(const SkBitmap& bitmap,
DetectCallback callback) {
if (FAILED(BeginDetect(bitmap))) {
// No detection taking place; run |callback| with an empty array of results.
std::move(callback).Run(std::vector<mojom::TextDetectionResultPtr>());
return;
}
// Hold on the callback until AsyncOperation completes.
recognize_text_callback_ = std::move(callback);
// This prevents the Detect function from being called before the
// AsyncOperation completes.
receiver_->PauseIncomingMethodCallProcessing();
}
HRESULT TextDetectionImplWin::BeginDetect(const SkBitmap& bitmap) {
ComPtr<ISoftwareBitmap> win_bitmap =
CreateWinBitmapFromSkBitmap(bitmap, bitmap_factory_.Get());
if (!win_bitmap)
return E_FAIL;
// Recognize text asynchronously.
ComPtr<IAsyncOperation<OcrResult*>> async_op;
const HRESULT hr = ocr_engine_->RecognizeAsync(win_bitmap.Get(), &async_op);
if (FAILED(hr)) {
DLOG(ERROR) << "Recognize text asynchronously failed: "
<< logging::SystemErrorCodeToString(hr);
return hr;
}
// Use WeakPtr to bind the callback so that the once callback will not be run
// if this object has been already destroyed. |win_bitmap| needs to be kept
// alive until OnTextDetected().
return base::win::PostAsyncResults(
std::move(async_op),
base::BindOnce(&TextDetectionImplWin::OnTextDetected,
weak_factory_.GetWeakPtr(), std::move(win_bitmap)));
}
std::vector<mojom::TextDetectionResultPtr>
TextDetectionImplWin::BuildTextDetectionResult(ComPtr<IOcrResult> ocr_result) {
std::vector<mojom::TextDetectionResultPtr> results;
if (!ocr_result)
return results;
ComPtr<IVectorView<OcrLine*>> ocr_lines;
HRESULT hr = ocr_result->get_Lines(&ocr_lines);
if (FAILED(hr)) {
DLOG(ERROR) << "Get Lines failed: " << logging::SystemErrorCodeToString(hr);
return results;
}
uint32_t count;
hr = ocr_lines->get_Size(&count);
if (FAILED(hr)) {
DLOG(ERROR) << "get_Size failed: " << logging::SystemErrorCodeToString(hr);
return results;
}
results.reserve(count);
for (uint32_t i = 0; i < count; ++i) {
ComPtr<IOcrLine> line;
hr = ocr_lines->GetAt(i, &line);
if (FAILED(hr))
break;
HSTRING text;
hr = line->get_Text(&text);
if (FAILED(hr))
break;
// Gets bounding box with the words detected in the current line of Text.
ComPtr<IVectorView<OcrWord*>> ocr_words;
hr = line->get_Words(&ocr_words);
if (FAILED(hr))
break;
uint32_t words_count;
hr = ocr_words->get_Size(&words_count);
if (FAILED(hr))
break;
auto result = shape_detection::mojom::TextDetectionResult::New();
for (uint32_t word_num = 0; word_num < words_count; ++word_num) {
ComPtr<IOcrWord> word;
hr = ocr_words->GetAt(word_num, &word);
if (FAILED(hr))
break;
ABI::Windows::Foundation::Rect bounds;
hr = word->get_BoundingRect(&bounds);
if (FAILED(hr))
break;
result->bounding_box = gfx::UnionRects(
result->bounding_box,
gfx::RectF(bounds.X, bounds.Y, bounds.Width, bounds.Height));
}
result->raw_value = ScopedHString(text).GetAsUTF8();
results.push_back(std::move(result));
}
return results;
}
// |win_bitmap| is passed here so that it is kept alive until the AsyncOperation
// completes because RecognizeAsync does not hold a reference.
void TextDetectionImplWin::OnTextDetected(
ComPtr<ISoftwareBitmap> /* win_bitmap */,
ComPtr<IOcrResult> ocr_result) {
std::move(recognize_text_callback_)
.Run(BuildTextDetectionResult(std::move(ocr_result)));
receiver_->ResumeIncomingMethodCallProcessing();
}
} // namespace shape_detection
| 3,383 |
575 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_APP_LIST_SEARCH_MIXER_H_
#define CHROME_BROWSER_UI_APP_LIST_SEARCH_MIXER_H_
#include <stddef.h>
#include <memory>
#include <string>
#include <vector>
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "chrome/browser/ui/app_list/search/search_result_ranker/app_launch_data.h"
class AppListModelUpdater;
class ChromeSearchResult;
class Profile;
namespace app_list {
namespace test {
FORWARD_DECLARE_TEST(MixerTest, Publish);
}
class ChipRanker;
class SearchController;
class SearchProvider;
class SearchResultRanker;
enum class RankingItemType;
// Mixer collects results from providers, sorts them and publishes them to the
// SearchResults UI model. The targeted results have 6 slots to hold the
// result.
class Mixer {
public:
explicit Mixer(AppListModelUpdater* model_updater);
~Mixer();
// Adds a new mixer group. A "soft" maximum of |max_results| results will be
// chosen from this group (if 0, will allow unlimited results from this
// group). If there aren't enough results from all groups, more than
// |max_results| may be chosen from this group. Returns the group's group_id.
size_t AddGroup(size_t max_results);
// Associates a provider with a mixer group.
void AddProviderToGroup(size_t group_id, SearchProvider* provider);
// Collects the results, sorts and publishes them.
void MixAndPublish(size_t num_max_results, const std::u16string& query);
// Sets a SearchResultRanker to re-rank non-app search results before they are
// published.
void SetNonAppSearchResultRanker(std::unique_ptr<SearchResultRanker> ranker);
void InitializeRankers(Profile* profile, SearchController* search_controller);
SearchResultRanker* search_result_ranker() {
if (!search_result_ranker_)
return nullptr;
return search_result_ranker_.get();
}
// Sets a ChipRanker to re-rank chip results before they are published.
void SetChipRanker(std::unique_ptr<ChipRanker> ranker);
// Handle a training signal.
void Train(const AppLaunchData& app_launch_data);
// Used for sorting and mixing results.
struct SortData {
SortData();
SortData(ChromeSearchResult* result, double score);
bool operator<(const SortData& other) const;
ChromeSearchResult* result; // Not owned.
double score;
};
typedef std::vector<Mixer::SortData> SortedResults;
private:
FRIEND_TEST_ALL_PREFIXES(test::MixerTest, Publish);
class Group;
typedef std::vector<std::unique_ptr<Group>> Groups;
void FetchResults(const std::u16string& query);
AppListModelUpdater* const model_updater_; // Not owned.
Groups groups_;
// Adaptive models used for re-ranking search results.
std::unique_ptr<SearchResultRanker> search_result_ranker_;
std::unique_ptr<ChipRanker> chip_ranker_;
DISALLOW_COPY_AND_ASSIGN(Mixer);
};
} // namespace app_list
#endif // CHROME_BROWSER_UI_APP_LIST_SEARCH_MIXER_H_
| 996 |
3,603 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.jdbc;
import io.trino.spi.connector.ConnectorSession;
import java.sql.Connection;
import java.sql.SQLException;
import static java.util.Objects.requireNonNull;
public final class ConfiguringConnectionFactory
implements ConnectionFactory
{
private final ConnectionFactory delegate;
private final Configurator configurator;
public ConfiguringConnectionFactory(ConnectionFactory delegate, Configurator configurator)
{
this.delegate = requireNonNull(delegate, "delegate is null");
this.configurator = requireNonNull(configurator, "configurator is null");
}
@Override
public Connection openConnection(ConnectorSession session)
throws SQLException
{
Connection connection = delegate.openConnection(session);
try {
configurator.configure(connection);
}
catch (SQLException | RuntimeException e) {
try (connection) {
throw e;
}
}
return connection;
}
@Override
public void close()
throws SQLException
{
delegate.close();
}
@FunctionalInterface
public interface Configurator
{
void configure(Connection connection)
throws SQLException;
}
}
| 653 |
348 | {"nom":"Tellières-le-Plessis","dpt":"Orne","inscrits":64,"abs":14,"votants":50,"blancs":5,"nuls":2,"exp":43,"res":[{"panneau":"1","voix":23},{"panneau":"2","voix":20}]} | 74 |
422 | //
// Getdown - application installer, patcher and launcher
// Copyright (C) 2004-2018 Getdown authors
// https://github.com/threerings/getdown/blob/master/LICENSE
package com.threerings.getdown.launcher;
import java.awt.Dimension;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.util.MissingResourceException;
import java.util.ResourceBundle;
import javax.swing.BorderFactory;
import javax.swing.JButton;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import com.samskivert.swing.GroupLayout;
import com.samskivert.swing.Spacer;
import com.samskivert.swing.VGroupLayout;
import com.threerings.getdown.util.MessageUtil;
import static com.threerings.getdown.Log.log;
/**
* Displays a confirmation that the user wants to abort installation.
*/
public final class AbortPanel extends JFrame
implements ActionListener
{
public AbortPanel (Getdown getdown, ResourceBundle msgs)
{
_getdown = getdown;
_msgs = msgs;
setLayout(new VGroupLayout());
setResizable(false);
setTitle(get("m.abort_title"));
JLabel message = new JLabel(get("m.abort_confirm"));
message.setBorder(BorderFactory.createEmptyBorder(5, 5, 5, 5));
add(message);
add(new Spacer(5, 5));
JPanel row = GroupLayout.makeButtonBox(GroupLayout.CENTER);
JButton button;
row.add(button = new JButton(get("m.abort_ok")));
button.setActionCommand("ok");
button.addActionListener(this);
row.add(button = new JButton(get("m.abort_cancel")));
button.setActionCommand("cancel");
button.addActionListener(this);
getRootPane().setDefaultButton(button);
add(row);
}
// documentation inherited
@Override
public Dimension getPreferredSize ()
{
// this is annoyingly hardcoded, but we can't just force the width
// or the JLabel will claim a bogus height thinking it can lay its
// text out all on one line which will booch the whole UI's
// preferred size
return new Dimension(300, 200);
}
// documentation inherited from interface
public void actionPerformed (ActionEvent e)
{
String cmd = e.getActionCommand();
if ("ok".equals(cmd)) {
System.exit(0);
} else {
setVisible(false);
}
}
/** Used to look up localized messages. */
protected String get (String key)
{
// if this string is tainted, we don't translate it, instead we
// simply remove the taint character and return it to the caller
if (MessageUtil.isTainted(key)) {
return MessageUtil.untaint(key);
}
try {
return _msgs.getString(key);
} catch (MissingResourceException mre) {
log.warning("Missing translation message '" + key + "'.");
return key;
}
}
protected Getdown _getdown;
protected ResourceBundle _msgs;
}
| 1,197 |
1,145 | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PRODUCTION_CONTAINERS_NSCON_CONFIGURATOR_NS_CONFIGURATOR_FACTORY_MOCK_H_
#define PRODUCTION_CONTAINERS_NSCON_CONFIGURATOR_NS_CONFIGURATOR_FACTORY_MOCK_H_
#include "nscon/configurator/ns_configurator_factory.h"
#include "base/macros.h"
#include "gmock/gmock.h"
namespace containers {
namespace nscon {
class MockNsConfiguratorFactory : public NsConfiguratorFactory {
public:
~MockNsConfiguratorFactory() override {}
MockNsConfiguratorFactory() : NsConfiguratorFactory(nullptr /* ns_util */) {}
MOCK_CONST_METHOD1(Get, ::util::StatusOr<NsConfigurator *>(int ns));
MOCK_CONST_METHOD0(GetFilesystemConfigurator,
::util::StatusOr<NsConfigurator *>());
MOCK_CONST_METHOD0(GetMachineConfigurator,
::util::StatusOr<NsConfigurator *>());
private:
DISALLOW_COPY_AND_ASSIGN(MockNsConfiguratorFactory);
};
} // namespace nscon
} // namespace containers
#endif // PRODUCTION_CONTAINERS_NSCON_CONFIGURATOR_NS_CONFIGURATOR_FACTORY_MOCK_H_
| 556 |
1,751 | <reponame>rapidoid/rapidoid<filename>rapidoid-commons/src/main/java/org/rapidoid/var/impl/AbstractVar.java<gh_stars>1000+
/*-
* #%L
* rapidoid-commons
* %%
* Copyright (C) 2014 - 2020 <NAME> and contributors
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package org.rapidoid.var.impl;
import org.rapidoid.RapidoidThing;
import org.rapidoid.annotation.Authors;
import org.rapidoid.annotation.Since;
import org.rapidoid.collection.Coll;
import org.rapidoid.u.U;
import org.rapidoid.var.Var;
import java.util.Set;
@Authors("<NAME>")
@Since("2.0.0")
public abstract class AbstractVar<T> extends RapidoidThing implements Var<T> {
private static final long serialVersionUID = 6006051524799076017L;
private final String name;
private volatile Object rawValue;
private final Set<String> errors = Coll.synchronizedSet();
public AbstractVar(String name) {
this.name = name;
}
@Override
public String toString() {
return U.str(get());
}
@Override
public String name() {
return name;
}
@Override
public Set<String> errors() {
return errors;
}
@Override
public void set(T value) {
this.rawValue = value;
try {
doSet(value);
errors().clear();
} catch (Exception e) {
error(e);
}
}
protected abstract void doSet(T value);
@Override
public void error(Exception e) {
if (e instanceof NumberFormatException) {
errors().add("Invalid number!");
} else {
errors().add(U.or(e.getMessage(), "Invalid value!"));
}
}
@Override
public Object getRawValue() {
return rawValue;
}
}
| 847 |
344 | /*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
#include "modules/audio_coding/neteq/buffer_level_filter.h"
#include "test/gmock.h"
namespace webrtc {
class MockBufferLevelFilter : public BufferLevelFilter {
public:
MOCK_METHOD(void,
Update,
(size_t buffer_size_samples, int time_stretched_samples));
MOCK_METHOD(int, filtered_current_level, (), (const));
};
} // namespace webrtc
#endif // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
| 363 |
1,515 | package com.google.android.play.core.splitinstall;
import android.os.RemoteException;
import com.google.android.play.core.remote.RemoteTask;
import com.google.android.play.core.tasks.TaskWrapper;
import java.util.List;
final class DeferredInstallTask extends RemoteTask {
private final SplitInstallService mSplitInstallService;
private final TaskWrapper<Void> mTask;
private final List<String> moduleNames;
DeferredInstallTask(SplitInstallService installService, TaskWrapper task, List<String> moduleNames, TaskWrapper<Void> taskSame) {
super(task);
this.mSplitInstallService = installService;
this.moduleNames = moduleNames;
this.mTask = taskSame;
}
@Override
protected void execute() {
try {
mSplitInstallService.mSplitRemoteManager.getIInterface().deferredInstall(
this.mSplitInstallService.mPackageName,
SplitInstallService.wrapModuleNames(moduleNames),
SplitInstallService.wrapVersionCode(),
new DeferredInstallCallback(mSplitInstallService, mTask)
);
} catch (RemoteException e) {
SplitInstallService.playCore.error(e, "deferredInstall(%s)", this.moduleNames);
this.mTask.setException(new RuntimeException(e));
}
}
}
| 517 |
1,057 | package slowy;
class App {
private static boolean isDivisible(int n, int d) {
return n % d == 0;
}
private static boolean isSimplePrime(int n) {
return n <= 2;
}
private static boolean isPrime(int n) {
if (isSimplePrime(n)) return true;
for (int d = 3; d < n; d += 2) {
if (isDivisible(n, d)) return false;
}
return true;
}
public static void main(String[] args) throws java.io.IOException {
System.out.println("Press ENTER to start.");
System.in.read();
int count = 0;
for (int n = 2; n < 200000; ++n) {
if (isPrime(n)) ++count;
}
System.out.println(String.format("Primes found: %d", count));
System.out.println("Press ENTER to exit.");
System.in.read();
}
}
| 387 |
1,382 | <reponame>cropinghigh/pipewire<gh_stars>1000+
/* PipeWire
*
* Copyright © 2021 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "pwtest.h"
#include <pipewire/conf.h>
PWTEST(config_load_abspath)
{
char path[PATH_MAX];
int r;
FILE *fp;
struct pw_properties *props;
char *basename;
pwtest_mkstemp(path);
fp = fopen(path, "w");
fputs("data = x", fp);
fclose(fp);
/* Load with NULL prefix and abs path */
props = pw_properties_new("ignore", "me", NULL);
r = pw_conf_load_conf(NULL, path, props);
pwtest_neg_errno_ok(r);
pwtest_str_eq(pw_properties_get(props, "data"), "x");
pw_properties_free(props);
/* Load with non-NULL abs prefix and abs path */
props = pw_properties_new("ignore", "me", NULL);
r = pw_conf_load_conf("/dummy", path, props);
pwtest_neg_errno_ok(r);
pwtest_str_eq(pw_properties_get(props, "data"), "x");
pw_properties_free(props);
/* Load with non-NULL relative prefix and abs path */
props = pw_properties_new("ignore", "me", NULL);
r = pw_conf_load_conf("dummy", path, props);
pwtest_neg_errno_ok(r);
pwtest_str_eq(pw_properties_get(props, "data"), "x");
pw_properties_free(props);
/* Load with non-NULL abs prefix and relative path */
basename = rindex(path, '/'); /* basename(3) and dirname(3) are terrible */
pwtest_ptr_notnull(basename);
*basename = '\0';
basename++;
props = pw_properties_new("ignore", "me", NULL);
r = pw_conf_load_conf(path, basename, props);
pwtest_neg_errno_ok(r);
pwtest_str_eq(pw_properties_get(props, "data"), "x");
pw_properties_free(props);
return PWTEST_PASS;
}
PWTEST(config_load_nullname)
{
struct pw_properties *props = pw_properties_new("ignore", "me", NULL);
int r;
r = pw_conf_load_conf(NULL, NULL, props);
pwtest_neg_errno(r, -EINVAL);
r = pw_conf_load_conf("/dummy", NULL, props);
pwtest_neg_errno(r, -EINVAL);
pw_properties_free(props);
return PWTEST_PASS;
}
PWTEST_SUITE(context)
{
pwtest_add(config_load_abspath, PWTEST_NOARG);
pwtest_add(config_load_nullname, PWTEST_NOARG);
return PWTEST_PASS;
}
| 1,146 |
1,056 | <reponame>timfel/netbeans<filename>java/form/src/org/netbeans/modules/form/PropertyPicker.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.form;
import java.beans.*;
import java.util.*;
import org.openide.awt.Mnemonics;
/** The PropertyPicker is a form which allows user to choose from property set
* of specified required class.
*
* @author <NAME>
*/
public class PropertyPicker extends javax.swing.JPanel {
public static final int CANCEL = 0;
public static final int OK = 1;
static final long serialVersionUID =5689122601606238081L;
/**
* Initializes the Form.
*
* @param formModel form model.
* @param componentToSelect component whose property should be selected.
* @param requiredType required type of the property.
*/
public PropertyPicker(FormModel formModel, RADComponent componentToSelect, Class requiredType) {
this.requiredType = requiredType;
initComponents();
java.util.List<RADComponent> componentsList = formModel.getComponentList();
Collections.sort(componentsList, new ParametersPicker.ComponentComparator());
components = new RADComponent[componentsList.size()];
componentsList.toArray(components);
int selIndex = -1;
for (Iterator<RADComponent> it = componentsList.iterator(); it.hasNext(); ) {
RADComponent radComp = it.next();
if (componentToSelect != null && componentToSelect == radComp)
selIndex = componentsCombo.getItemCount();
if (radComp == formModel.getTopRADComponent())
componentsCombo.addItem(
FormUtils.getBundleString("CTL_FormTopContainerName")); // NOI18N
else
componentsCombo.addItem(radComp.getName());
}
if (selIndex >= 0)
componentsCombo.setSelectedIndex(selIndex);
propertyList.setSelectionMode(javax.swing.ListSelectionModel.SINGLE_SELECTION);
updatePropertyList();
// localize components
Mnemonics.setLocalizedText(componentLabel, FormUtils.getBundleString("CTL_CW_Component")); // NOI18N
Mnemonics.setLocalizedText(listLabel, FormUtils.getBundleString("CTL_CW_PropertyList")); // NOI18N
componentsCombo.getAccessibleContext().setAccessibleDescription(
FormUtils.getBundleString("ACSD_CTL_CW_Component")); // NOI18N
propertyList.getAccessibleContext().setAccessibleDescription(
FormUtils.getBundleString("ACSD_CTL_CW_PropertyList")); // NOI18N
getAccessibleContext().setAccessibleDescription(
FormUtils.getBundleString("ACSD_PropertyPicker")); // NOI18N
// HelpCtx.setHelpIDString(this, "gui.connecting.code"); // NOI18N
}
public boolean isPickerValid() {
return pickerValid;
}
private void setPickerValid(boolean v) {
boolean old = pickerValid;
pickerValid = v;
firePropertyChange("pickerValid", old, pickerValid); // NOI18N
}
RADComponent getSelectedComponent() {
return selectedComponent;
}
void setSelectedComponent(RADComponent selectedComponent) {
if (selectedComponent != null)
componentsCombo.setSelectedItem(selectedComponent.getName());
}
PropertyPickerItem getSelectedProperty() {
if ((selectedComponent == null) ||(propertyList.getSelectedIndex() == -1))
return null;
return items [propertyList.getSelectedIndex()];
}
void setSelectedProperty(PropertyDescriptor selectedProperty) {
if (selectedProperty == null) {
propertyList.setSelectedIndex(-1);
} else {
propertyList.setSelectedValue(selectedProperty.getName(), true);
}
}
// ----------------------------------------------------------------------------
// private methods
private void updatePropertyList() {
RADComponent sel = getSelectedComponent();
if (sel == null) {
propertyList.setListData(new Object [0]);
propertyList.revalidate();
propertyList.repaint();
} else {
PropertyDescriptor[] descs = sel.getBeanInfo().getPropertyDescriptors();
Map<String,PropertyPickerItem> filtered = new HashMap<String,PropertyPickerItem>();
for (int i = 0; i < descs.length; i ++) {
if ((descs[i].getReadMethod() != null) && // filter out non-readable properties
(descs[i].getPropertyType() != null) && // indexed properties return null from getPropertyType
requiredType.isAssignableFrom(descs[i].getPropertyType())) {
PropertyPickerItem item = createItem(descs[i]);
filtered.put(item.getPropertyName(), item);
}
}
if(sel == sel.getFormModel().getTopRADComponent() ) {
String[] names = FormEditor.getFormJavaSource(sel.getFormModel()).getPropertyReadMethodNames(requiredType);
for (int i = 0; i < names.length; i++) {
PropertyPickerItem item = createItem(names[i]);
if(!filtered.keySet().contains(item.getPropertyName())){
filtered.put(item.getPropertyName(), item);
}
}
}
items = new PropertyPickerItem[filtered.size()];
filtered.values().toArray(items);
// sort the properties by name
Arrays.sort(items, new Comparator<PropertyPickerItem>() {
@Override
public int compare(PropertyPickerItem o1, PropertyPickerItem o2) {
return o1.getPropertyName().compareTo(o2.getPropertyName());
}
});
String[] listItems = new String [items.length];
for (int i = 0; i < listItems.length; i++)
listItems[i] = items[i].getPropertyName();
propertyList.setListData(listItems);
propertyList.revalidate();
propertyList.repaint();
}
}
private PropertyPickerItem createItem(final PropertyDescriptor desc) {
return new PropertyPickerItem() {
@Override
public String getPropertyName() {
return desc.getName();
}
@Override
public String getReadMethodName() {
return desc.getReadMethod().getName();
}
@Override
public PropertyDescriptor getPropertyDescriptor() {
return desc;
}
};
}
private PropertyPickerItem createItem(final String name) {
return new PropertyPickerItem() {
@Override
public String getPropertyName() {
return FormJavaSource.extractPropertyName(name);
}
@Override
public String getReadMethodName() {
return FormUtils.getMethodName(name, NO_PARAMETERS);
}
@Override
public PropertyDescriptor getPropertyDescriptor() {
return null;
}
};
}
private void updateState() {
setPickerValid((getSelectedComponent() != null) &&(getSelectedProperty() != null));
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the FormEditor.
*/
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
java.awt.GridBagConstraints gridBagConstraints;
componentsCombo = new javax.swing.JComboBox();
propertiesScrollPane = new javax.swing.JScrollPane();
propertyList = new javax.swing.JList();
componentLabel = new javax.swing.JLabel();
listLabel = new javax.swing.JLabel();
setBorder(javax.swing.BorderFactory.createEmptyBorder(12, 12, 0, 11));
setLayout(new java.awt.GridBagLayout());
componentsCombo.addItemListener(new java.awt.event.ItemListener() {
public void itemStateChanged(java.awt.event.ItemEvent evt) {
componentsComboItemStateChanged(evt);
}
});
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 1;
gridBagConstraints.gridy = 0;
gridBagConstraints.fill = java.awt.GridBagConstraints.HORIZONTAL;
gridBagConstraints.ipadx = 128;
gridBagConstraints.anchor = java.awt.GridBagConstraints.WEST;
gridBagConstraints.weightx = 1.0;
gridBagConstraints.insets = new java.awt.Insets(0, 0, 5, 0);
add(componentsCombo, gridBagConstraints);
propertyList.addListSelectionListener(new javax.swing.event.ListSelectionListener() {
public void valueChanged(javax.swing.event.ListSelectionEvent evt) {
propertyListValueChanged(evt);
}
});
propertiesScrollPane.setViewportView(propertyList);
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 0;
gridBagConstraints.gridy = 2;
gridBagConstraints.gridwidth = 2;
gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH;
gridBagConstraints.weightx = 1.0;
gridBagConstraints.weighty = 1.0;
add(propertiesScrollPane, gridBagConstraints);
componentLabel.setLabelFor(componentsCombo);
componentLabel.setText("Component:");
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 0;
gridBagConstraints.gridy = 0;
gridBagConstraints.anchor = java.awt.GridBagConstraints.WEST;
gridBagConstraints.insets = new java.awt.Insets(0, 0, 5, 6);
add(componentLabel, gridBagConstraints);
listLabel.setLabelFor(propertyList);
listLabel.setText("Properties");
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 0;
gridBagConstraints.gridy = 1;
gridBagConstraints.anchor = java.awt.GridBagConstraints.WEST;
gridBagConstraints.insets = new java.awt.Insets(0, 0, 2, 0);
add(listLabel, gridBagConstraints);
}// </editor-fold>//GEN-END:initComponents
private void propertyListValueChanged(javax.swing.event.ListSelectionEvent evt) {//GEN-FIRST:event_propertyListValueChanged
updateState();
}//GEN-LAST:event_propertyListValueChanged
private void componentsComboItemStateChanged(java.awt.event.ItemEvent evt) {//GEN-FIRST:event_componentsComboItemStateChanged
if (componentsCombo.getSelectedIndex() == -1)
selectedComponent = null;
else
selectedComponent = components[componentsCombo.getSelectedIndex()];
updatePropertyList();
}//GEN-LAST:event_componentsComboItemStateChanged
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JLabel componentLabel;
private javax.swing.JComboBox componentsCombo;
private javax.swing.JLabel listLabel;
private javax.swing.JScrollPane propertiesScrollPane;
private javax.swing.JList propertyList;
// End of variables declaration//GEN-END:variables
private boolean pickerValid = false;
private RADComponent[] components;
private Class<?> requiredType;
private PropertyPickerItem[] items;
private RADComponent selectedComponent;
private static Class[] NO_PARAMETERS = new Class[0];
interface PropertyPickerItem {
public String getPropertyName();
public String getReadMethodName();
public PropertyDescriptor getPropertyDescriptor();
}
}
| 4,899 |
2,151 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/extensions/api/identity/identity_get_auth_token_function.h"
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/extensions/api/identity/identity_api.h"
#include "chrome/browser/extensions/api/identity/identity_constants.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/signin/account_tracker_service_factory.h"
#include "chrome/browser/signin/chrome_signin_client_factory.h"
#include "chrome/browser/signin/profile_oauth2_token_service_factory.h"
#include "chrome/browser/ui/webui/signin/login_ui_service.h"
#include "chrome/browser/ui/webui/signin/login_ui_service_factory.h"
#include "chrome/common/extensions/api/identity.h"
#include "components/signin/core/browser/account_tracker_service.h"
#include "components/signin/core/browser/profile_management_switches.h"
#include "components/signin/core/browser/profile_oauth2_token_service.h"
#include "content/public/common/service_manager_connection.h"
#include "extensions/common/extension_l10n_util.h"
#include "google_apis/gaia/gaia_urls.h"
#include "services/identity/public/cpp/scope_set.h"
#include "services/identity/public/mojom/constants.mojom.h"
#include "services/service_manager/public/cpp/connector.h"
#if defined(OS_CHROMEOS)
#include "chrome/browser/app_mode/app_mode_utils.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/chromeos/login/session/user_session_manager.h"
#include "chrome/browser/chromeos/policy/browser_policy_connector_chromeos.h"
#include "chrome/browser/chromeos/settings/device_oauth2_token_service.h"
#include "chrome/browser/chromeos/settings/device_oauth2_token_service_factory.h"
#include "components/user_manager/user_manager.h"
#include "google_apis/gaia/gaia_constants.h"
#endif
namespace extensions {
namespace {
#if defined(OS_CHROMEOS)
// The list of apps that are allowed to use the Identity API to retrieve the
// token from the device robot account in a public session.
const char* const kPublicSessionAllowedOrigins[] = {
// Chrome Remote Desktop - Chromium branding.
"chrome-extension://ljacajndfccfgnfohlgkdphmbnpkjflk/",
// Chrome Remote Desktop - Official branding.
"chrome-extension://gbchcmhmhahfdphkhkmpfmihenigjmpp/"};
#endif
} // namespace
namespace identity = api::identity;
IdentityGetAuthTokenFunction::IdentityGetAuthTokenFunction()
:
#if defined(OS_CHROMEOS)
OAuth2TokenService::Consumer("extensions_identity_api"),
#endif
interactive_(false),
should_prompt_for_scopes_(false),
should_prompt_for_signin_(false) {
}
IdentityGetAuthTokenFunction::~IdentityGetAuthTokenFunction() {
TRACE_EVENT_ASYNC_END0("identity", "IdentityGetAuthTokenFunction", this);
}
bool IdentityGetAuthTokenFunction::RunAsync() {
TRACE_EVENT_ASYNC_BEGIN1("identity",
"IdentityGetAuthTokenFunction",
this,
"extension",
extension()->id());
if (GetProfile()->IsOffTheRecord()) {
error_ = identity_constants::kOffTheRecord;
return false;
}
std::unique_ptr<identity::GetAuthToken::Params> params(
identity::GetAuthToken::Params::Create(*args_));
EXTENSION_FUNCTION_VALIDATE(params.get());
interactive_ = params->details.get() &&
params->details->interactive.get() &&
*params->details->interactive;
should_prompt_for_scopes_ = interactive_;
should_prompt_for_signin_ = interactive_;
const OAuth2Info& oauth2_info = OAuth2Info::GetOAuth2Info(extension());
// Check that the necessary information is present in the manifest.
oauth2_client_id_ = GetOAuth2ClientId();
if (oauth2_client_id_.empty()) {
error_ = identity_constants::kInvalidClientId;
return false;
}
std::set<std::string> scopes(oauth2_info.scopes.begin(),
oauth2_info.scopes.end());
std::string gaia_id;
if (params->details.get()) {
if (params->details->account.get())
gaia_id = params->details->account->id;
if (params->details->scopes.get()) {
scopes = std::set<std::string>(params->details->scopes->begin(),
params->details->scopes->end());
}
}
if (scopes.size() == 0) {
error_ = identity_constants::kInvalidScopes;
return false;
}
// From here on out, results must be returned asynchronously.
StartAsyncRun();
GetIdentityManager()->GetPrimaryAccountInfo(base::BindOnce(
&IdentityGetAuthTokenFunction::OnReceivedPrimaryAccountInfo, this, scopes,
gaia_id));
return true;
}
void IdentityGetAuthTokenFunction::OnReceivedPrimaryAccountInfo(
const std::set<std::string>& scopes,
const std::string& extension_gaia_id,
const base::Optional<AccountInfo>& account_info,
const ::identity::AccountState& account_state) {
std::string primary_gaia_id;
if (account_info)
primary_gaia_id = account_info->gaia;
// Detect and handle the case where the extension is using an account other
// than the primary account.
if (!extension_gaia_id.empty() && extension_gaia_id != primary_gaia_id) {
if (!signin::IsExtensionsMultiAccount()) {
// TODO(courage): should this be a different error?
CompleteFunctionWithError(identity_constants::kUserNotSignedIn);
return;
}
// Get the AccountInfo for the account that the extension wishes to use.
identity_manager_->GetAccountInfoFromGaiaId(
extension_gaia_id,
base::BindOnce(
&IdentityGetAuthTokenFunction::OnReceivedExtensionAccountInfo, this,
false /* not primary account */, scopes));
return;
}
// The extension is using the primary account.
OnReceivedExtensionAccountInfo(true /* primary account */, scopes,
account_info, account_state);
}
void IdentityGetAuthTokenFunction::OnReceivedExtensionAccountInfo(
bool is_primary_account,
const std::set<std::string>& scopes,
const base::Optional<AccountInfo>& account_info,
const ::identity::AccountState& account_state) {
std::string account_id;
if (account_info)
account_id = account_info->account_id;
if (!is_primary_account && account_id.empty()) {
// It is not possible to sign in the user to an account other than the
// primary account, so just error out here.
CompleteFunctionWithError(identity_constants::kUserNotSignedIn);
return;
}
token_key_.reset(
new ExtensionTokenKey(extension()->id(), account_id, scopes));
#if defined(OS_CHROMEOS)
policy::BrowserPolicyConnectorChromeOS* connector =
g_browser_process->platform_part()->browser_policy_connector_chromeos();
bool is_kiosk = user_manager::UserManager::Get()->IsLoggedInAsKioskApp();
bool is_public_session =
user_manager::UserManager::Get()->IsLoggedInAsPublicAccount();
if (connector->IsEnterpriseManaged() && (is_kiosk || is_public_session)) {
if (is_public_session && !IsOriginWhitelistedInPublicSession()) {
CompleteFunctionWithError(identity_constants::kUserNotSignedIn);
return;
}
StartMintTokenFlow(IdentityMintRequestQueue::MINT_TYPE_NONINTERACTIVE);
return;
}
#endif
if (!account_state.has_refresh_token) {
if (!ShouldStartSigninFlow()) {
CompleteFunctionWithError(identity_constants::kUserNotSignedIn);
return;
}
// Display a login prompt.
StartSigninFlow();
} else {
StartMintTokenFlow(IdentityMintRequestQueue::MINT_TYPE_NONINTERACTIVE);
}
}
void IdentityGetAuthTokenFunction::StartAsyncRun() {
// Balanced in CompleteAsyncRun
AddRef();
identity_api_shutdown_subscription_ =
extensions::IdentityAPI::GetFactoryInstance()
->Get(GetProfile())
->RegisterOnShutdownCallback(base::Bind(
&IdentityGetAuthTokenFunction::OnIdentityAPIShutdown, this));
}
void IdentityGetAuthTokenFunction::CompleteAsyncRun(bool success) {
identity_api_shutdown_subscription_.reset();
SendResponse(success);
Release(); // Balanced in StartAsyncRun
}
void IdentityGetAuthTokenFunction::CompleteFunctionWithResult(
const std::string& access_token) {
SetResult(std::make_unique<base::Value>(access_token));
CompleteAsyncRun(true);
}
void IdentityGetAuthTokenFunction::CompleteFunctionWithError(
const std::string& error) {
TRACE_EVENT_ASYNC_STEP_PAST1("identity",
"IdentityGetAuthTokenFunction",
this,
"CompleteFunctionWithError",
"error",
error);
error_ = error;
CompleteAsyncRun(false);
}
bool IdentityGetAuthTokenFunction::ShouldStartSigninFlow() {
if (!should_prompt_for_signin_)
return false;
ProfileOAuth2TokenService* token_service =
ProfileOAuth2TokenServiceFactory::GetForProfile(GetProfile());
bool account_needs_reauth =
!token_service->RefreshTokenIsAvailable(token_key_->account_id) ||
token_service->RefreshTokenHasError(token_key_->account_id);
return account_needs_reauth;
}
void IdentityGetAuthTokenFunction::StartSigninFlow() {
DCHECK(ShouldStartSigninFlow());
// All cached tokens are invalid because the user is not signed in.
IdentityAPI* id_api =
extensions::IdentityAPI::GetFactoryInstance()->Get(GetProfile());
id_api->EraseAllCachedTokens();
// If the signin flow fails, don't display the login prompt again.
should_prompt_for_signin_ = false;
#if defined(OS_CHROMEOS)
// In normal mode (i.e. non-kiosk mode), the user has to log out to
// re-establish credentials. Let the global error popup handle everything.
// In kiosk mode, interactive sign-in is not supported.
SigninFailed();
return;
#endif
if (g_browser_process->IsShuttingDown()) {
// The login prompt cannot be displayed when the browser process is shutting
// down.
SigninFailed();
return;
}
// Start listening for the primary account being available and display a
// login prompt.
GetIdentityManager()->GetPrimaryAccountWhenAvailable(
base::BindOnce(&IdentityGetAuthTokenFunction::OnPrimaryAccountAvailable,
base::Unretained(this)));
ShowLoginPopup();
}
void IdentityGetAuthTokenFunction::StartMintTokenFlow(
IdentityMintRequestQueue::MintType type) {
mint_token_flow_type_ = type;
// Flows are serialized to prevent excessive traffic to GAIA, and
// to consolidate UI pop-ups.
IdentityAPI* id_api =
extensions::IdentityAPI::GetFactoryInstance()->Get(GetProfile());
if (!should_prompt_for_scopes_) {
// Caller requested no interaction.
if (type == IdentityMintRequestQueue::MINT_TYPE_INTERACTIVE) {
// GAIA told us to do a consent UI.
CompleteFunctionWithError(identity_constants::kNoGrant);
return;
}
if (!id_api->mint_queue()->empty(
IdentityMintRequestQueue::MINT_TYPE_INTERACTIVE, *token_key_)) {
// Another call is going through a consent UI.
CompleteFunctionWithError(identity_constants::kNoGrant);
return;
}
}
id_api->mint_queue()->RequestStart(type, *token_key_, this);
}
void IdentityGetAuthTokenFunction::CompleteMintTokenFlow() {
IdentityMintRequestQueue::MintType type = mint_token_flow_type_;
extensions::IdentityAPI::GetFactoryInstance()
->Get(GetProfile())
->mint_queue()
->RequestComplete(type, *token_key_, this);
}
void IdentityGetAuthTokenFunction::StartMintToken(
IdentityMintRequestQueue::MintType type) {
TRACE_EVENT_ASYNC_STEP_PAST1("identity",
"IdentityGetAuthTokenFunction",
this,
"StartMintToken",
"type",
type);
const OAuth2Info& oauth2_info = OAuth2Info::GetOAuth2Info(extension());
IdentityAPI* id_api = IdentityAPI::GetFactoryInstance()->Get(GetProfile());
IdentityTokenCacheValue cache_entry = id_api->GetCachedToken(*token_key_);
IdentityTokenCacheValue::CacheValueStatus cache_status =
cache_entry.status();
if (type == IdentityMintRequestQueue::MINT_TYPE_NONINTERACTIVE) {
switch (cache_status) {
case IdentityTokenCacheValue::CACHE_STATUS_NOTFOUND:
#if defined(OS_CHROMEOS)
// Always force minting token for ChromeOS kiosk app and public session.
if (user_manager::UserManager::Get()->IsLoggedInAsPublicAccount() &&
!IsOriginWhitelistedInPublicSession()) {
CompleteFunctionWithError(identity_constants::kUserNotSignedIn);
return;
}
if (user_manager::UserManager::Get()->IsLoggedInAsKioskApp() ||
user_manager::UserManager::Get()->IsLoggedInAsPublicAccount()) {
gaia_mint_token_mode_ = OAuth2MintTokenFlow::MODE_MINT_TOKEN_FORCE;
policy::BrowserPolicyConnectorChromeOS* connector =
g_browser_process->platform_part()
->browser_policy_connector_chromeos();
if (connector->IsEnterpriseManaged()) {
StartDeviceLoginAccessTokenRequest();
} else {
StartLoginAccessTokenRequest();
}
return;
}
#endif
if (oauth2_info.auto_approve)
// oauth2_info.auto_approve is protected by a whitelist in
// _manifest_features.json hence only selected extensions take
// advantage of forcefully minting the token.
gaia_mint_token_mode_ = OAuth2MintTokenFlow::MODE_MINT_TOKEN_FORCE;
else
gaia_mint_token_mode_ = OAuth2MintTokenFlow::MODE_MINT_TOKEN_NO_FORCE;
StartLoginAccessTokenRequest();
break;
case IdentityTokenCacheValue::CACHE_STATUS_TOKEN:
CompleteMintTokenFlow();
CompleteFunctionWithResult(cache_entry.token());
break;
case IdentityTokenCacheValue::CACHE_STATUS_ADVICE:
CompleteMintTokenFlow();
should_prompt_for_signin_ = false;
issue_advice_ = cache_entry.issue_advice();
StartMintTokenFlow(IdentityMintRequestQueue::MINT_TYPE_INTERACTIVE);
break;
}
} else {
DCHECK(type == IdentityMintRequestQueue::MINT_TYPE_INTERACTIVE);
if (cache_status == IdentityTokenCacheValue::CACHE_STATUS_TOKEN) {
CompleteMintTokenFlow();
CompleteFunctionWithResult(cache_entry.token());
} else {
ShowOAuthApprovalDialog(issue_advice_);
}
}
}
void IdentityGetAuthTokenFunction::OnMintTokenSuccess(
const std::string& access_token, int time_to_live) {
TRACE_EVENT_ASYNC_STEP_PAST0("identity",
"IdentityGetAuthTokenFunction",
this,
"OnMintTokenSuccess");
IdentityTokenCacheValue token(access_token,
base::TimeDelta::FromSeconds(time_to_live));
IdentityAPI::GetFactoryInstance()->Get(GetProfile())->SetCachedToken(
*token_key_, token);
CompleteMintTokenFlow();
CompleteFunctionWithResult(access_token);
}
void IdentityGetAuthTokenFunction::OnMintTokenFailure(
const GoogleServiceAuthError& error) {
TRACE_EVENT_ASYNC_STEP_PAST1("identity",
"IdentityGetAuthTokenFunction",
this,
"OnMintTokenFailure",
"error",
error.ToString());
CompleteMintTokenFlow();
switch (error.state()) {
case GoogleServiceAuthError::SERVICE_ERROR:
if (ShouldStartSigninFlow()) {
StartSigninFlow();
return;
}
break;
case GoogleServiceAuthError::INVALID_GAIA_CREDENTIALS:
case GoogleServiceAuthError::ACCOUNT_DELETED:
case GoogleServiceAuthError::ACCOUNT_DISABLED:
// TODO(courage): flush ticket and retry once
if (ShouldStartSigninFlow()) {
StartSigninFlow();
return;
}
break;
default:
// Return error to caller.
break;
}
CompleteFunctionWithError(
std::string(identity_constants::kAuthFailure) + error.ToString());
}
void IdentityGetAuthTokenFunction::OnIssueAdviceSuccess(
const IssueAdviceInfo& issue_advice) {
TRACE_EVENT_ASYNC_STEP_PAST0("identity",
"IdentityGetAuthTokenFunction",
this,
"OnIssueAdviceSuccess");
IdentityAPI::GetFactoryInstance()->Get(GetProfile())->SetCachedToken(
*token_key_, IdentityTokenCacheValue(issue_advice));
CompleteMintTokenFlow();
should_prompt_for_signin_ = false;
// Existing grant was revoked and we used NO_FORCE, so we got info back
// instead. Start a consent UI if we can.
issue_advice_ = issue_advice;
StartMintTokenFlow(IdentityMintRequestQueue::MINT_TYPE_INTERACTIVE);
}
void IdentityGetAuthTokenFunction::OnPrimaryAccountAvailable(
const AccountInfo& account_info,
const ::identity::AccountState& account_state) {
TRACE_EVENT_ASYNC_STEP_PAST0("identity", "IdentityGetAuthTokenFunction", this,
"OnPrimaryAccountAvailable");
// If there was no account associated this profile before the
// sign-in, we may not have an account_id in the token_key yet.
if (token_key_->account_id.empty()) {
token_key_->account_id = account_info.account_id;
}
StartMintTokenFlow(IdentityMintRequestQueue::MINT_TYPE_NONINTERACTIVE);
}
void IdentityGetAuthTokenFunction::SigninFailed() {
TRACE_EVENT_ASYNC_STEP_PAST0("identity",
"IdentityGetAuthTokenFunction",
this,
"SigninFailed");
CompleteFunctionWithError(identity_constants::kUserNotSignedIn);
}
void IdentityGetAuthTokenFunction::OnGaiaFlowFailure(
GaiaWebAuthFlow::Failure failure,
GoogleServiceAuthError service_error,
const std::string& oauth_error) {
CompleteMintTokenFlow();
std::string error;
switch (failure) {
case GaiaWebAuthFlow::WINDOW_CLOSED:
error = identity_constants::kUserRejected;
break;
case GaiaWebAuthFlow::INVALID_REDIRECT:
error = identity_constants::kInvalidRedirect;
break;
case GaiaWebAuthFlow::SERVICE_AUTH_ERROR:
// If this is really an authentication error and not just a transient
// network error, then we show signin UI if appropriate.
if (service_error.state() != GoogleServiceAuthError::CONNECTION_FAILED &&
service_error.state() !=
GoogleServiceAuthError::SERVICE_UNAVAILABLE) {
if (ShouldStartSigninFlow()) {
StartSigninFlow();
return;
}
}
error = std::string(identity_constants::kAuthFailure) +
service_error.ToString();
break;
case GaiaWebAuthFlow::OAUTH_ERROR:
error = MapOAuth2ErrorToDescription(oauth_error);
break;
case GaiaWebAuthFlow::LOAD_FAILED:
error = identity_constants::kPageLoadFailure;
break;
default:
NOTREACHED() << "Unexpected error from gaia web auth flow: " << failure;
error = identity_constants::kInvalidRedirect;
break;
}
CompleteFunctionWithError(error);
}
void IdentityGetAuthTokenFunction::OnGaiaFlowCompleted(
const std::string& access_token,
const std::string& expiration) {
TRACE_EVENT_ASYNC_STEP_PAST0("identity",
"IdentityGetAuthTokenFunction",
this,
"OnGaiaFlowCompleted");
int time_to_live;
if (!expiration.empty() && base::StringToInt(expiration, &time_to_live)) {
IdentityTokenCacheValue token_value(
access_token, base::TimeDelta::FromSeconds(time_to_live));
IdentityAPI::GetFactoryInstance()->Get(GetProfile())->SetCachedToken(
*token_key_, token_value);
}
CompleteMintTokenFlow();
CompleteFunctionWithResult(access_token);
}
void IdentityGetAuthTokenFunction::OnGetAccessTokenComplete(
const base::Optional<std::string>& access_token,
base::Time expiration_time,
const GoogleServiceAuthError& error) {
// By the time we get here we should no longer have an outstanding O2TS
// request (either because we never made a request to O2TS directly or because
// the request was already fulfilled).
DCHECK(!login_token_request_);
if (access_token) {
TRACE_EVENT_ASYNC_STEP_PAST1("identity", "IdentityGetAuthTokenFunction",
this, "OnGetAccessTokenComplete", "account",
token_key_->account_id);
StartGaiaRequest(access_token.value());
} else {
TRACE_EVENT_ASYNC_STEP_PAST1("identity", "IdentityGetAuthTokenFunction",
this, "OnGetAccessTokenComplete", "error",
error.ToString());
OnGaiaFlowFailure(GaiaWebAuthFlow::SERVICE_AUTH_ERROR, error,
std::string());
}
}
#if defined(OS_CHROMEOS)
void IdentityGetAuthTokenFunction::OnGetTokenSuccess(
const OAuth2TokenService::Request* request,
const std::string& access_token,
const base::Time& expiration_time) {
login_token_request_.reset();
OnGetAccessTokenComplete(access_token, expiration_time,
GoogleServiceAuthError::AuthErrorNone());
}
void IdentityGetAuthTokenFunction::OnGetTokenFailure(
const OAuth2TokenService::Request* request,
const GoogleServiceAuthError& error) {
login_token_request_.reset();
OnGetAccessTokenComplete(base::nullopt, base::Time(), error);
}
#endif
void IdentityGetAuthTokenFunction::OnIdentityAPIShutdown() {
gaia_web_auth_flow_.reset();
login_token_request_.reset();
identity_manager_.reset();
// Note that if |token_key_| hasn't yet been populated then this instance has
// definitely not made a request with the MintQueue.
if (token_key_) {
extensions::IdentityAPI::GetFactoryInstance()
->Get(GetProfile())
->mint_queue()
->RequestCancel(*token_key_, this);
}
CompleteFunctionWithError(identity_constants::kCanceled);
}
#if defined(OS_CHROMEOS)
void IdentityGetAuthTokenFunction::StartDeviceLoginAccessTokenRequest() {
chromeos::DeviceOAuth2TokenService* service =
chromeos::DeviceOAuth2TokenServiceFactory::Get();
// Since robot account refresh tokens are scoped down to [any-api] only,
// request access token for [any-api] instead of login.
OAuth2TokenService::ScopeSet scopes;
scopes.insert(GaiaConstants::kAnyApiOAuth2Scope);
login_token_request_ =
service->StartRequest(service->GetRobotAccountId(),
scopes,
this);
}
bool IdentityGetAuthTokenFunction::IsOriginWhitelistedInPublicSession() {
DCHECK(extension());
GURL extension_url = extension()->url();
for (size_t i = 0; i < arraysize(kPublicSessionAllowedOrigins); i++) {
URLPattern allowed_origin(URLPattern::SCHEME_ALL,
kPublicSessionAllowedOrigins[i]);
if (allowed_origin.MatchesSecurityOrigin(extension_url)) {
return true;
}
}
return false;
}
#endif
void IdentityGetAuthTokenFunction::StartLoginAccessTokenRequest() {
#if defined(OS_CHROMEOS)
if (chrome::IsRunningInForcedAppMode()) {
std::string app_client_id;
std::string app_client_secret;
if (chromeos::UserSessionManager::GetInstance()->
GetAppModeChromeClientOAuthInfo(&app_client_id,
&app_client_secret)) {
ProfileOAuth2TokenService* service =
ProfileOAuth2TokenServiceFactory::GetForProfile(GetProfile());
login_token_request_ =
service->StartRequestForClient(token_key_->account_id,
app_client_id,
app_client_secret,
OAuth2TokenService::ScopeSet(),
this);
return;
}
}
#endif
GetIdentityManager()->GetAccessToken(
token_key_->account_id, ::identity::ScopeSet(), "extensions_identity_api",
base::BindOnce(&IdentityGetAuthTokenFunction::OnGetAccessTokenComplete,
base::Unretained(this)));
}
void IdentityGetAuthTokenFunction::StartGaiaRequest(
const std::string& login_access_token) {
DCHECK(!login_access_token.empty());
mint_token_flow_.reset(CreateMintTokenFlow());
mint_token_flow_->Start(GetProfile()->GetRequestContext(),
login_access_token);
}
void IdentityGetAuthTokenFunction::ShowLoginPopup() {
LoginUIService* login_ui_service =
LoginUIServiceFactory::GetForProfile(GetProfile());
login_ui_service->ShowLoginPopup();
}
void IdentityGetAuthTokenFunction::ShowOAuthApprovalDialog(
const IssueAdviceInfo& issue_advice) {
const std::string locale = extension_l10n_util::CurrentLocaleOrDefault();
gaia_web_auth_flow_.reset(new GaiaWebAuthFlow(
this, GetProfile(), token_key_.get(), oauth2_client_id_, locale));
gaia_web_auth_flow_->Start();
}
OAuth2MintTokenFlow* IdentityGetAuthTokenFunction::CreateMintTokenFlow() {
SigninClient* signin_client =
ChromeSigninClientFactory::GetForProfile(GetProfile());
std::string signin_scoped_device_id =
signin_client->GetSigninScopedDeviceId();
OAuth2MintTokenFlow* mint_token_flow = new OAuth2MintTokenFlow(
this,
OAuth2MintTokenFlow::Parameters(
extension()->id(),
oauth2_client_id_,
std::vector<std::string>(token_key_->scopes.begin(),
token_key_->scopes.end()),
signin_scoped_device_id,
gaia_mint_token_mode_));
return mint_token_flow;
}
bool IdentityGetAuthTokenFunction::HasLoginToken() const {
ProfileOAuth2TokenService* token_service =
ProfileOAuth2TokenServiceFactory::GetForProfile(GetProfile());
return token_service->RefreshTokenIsAvailable(token_key_->account_id);
}
std::string IdentityGetAuthTokenFunction::MapOAuth2ErrorToDescription(
const std::string& error) {
const char kOAuth2ErrorAccessDenied[] = "access_denied";
const char kOAuth2ErrorInvalidScope[] = "invalid_scope";
if (error == kOAuth2ErrorAccessDenied)
return std::string(identity_constants::kUserRejected);
else if (error == kOAuth2ErrorInvalidScope)
return std::string(identity_constants::kInvalidScopes);
else
return std::string(identity_constants::kAuthFailure) + error;
}
std::string IdentityGetAuthTokenFunction::GetOAuth2ClientId() const {
const OAuth2Info& oauth2_info = OAuth2Info::GetOAuth2Info(extension());
std::string client_id = oauth2_info.client_id;
// Component apps using auto_approve may use Chrome's client ID by
// omitting the field.
if (client_id.empty() && extension()->location() == Manifest::COMPONENT &&
oauth2_info.auto_approve) {
client_id = GaiaUrls::GetInstance()->oauth2_chrome_client_id();
}
return client_id;
}
::identity::mojom::IdentityManager*
IdentityGetAuthTokenFunction::GetIdentityManager() {
if (!identity_manager_.is_bound()) {
content::BrowserContext::GetConnectorFor(GetProfile())
->BindInterface(::identity::mojom::kServiceName,
mojo::MakeRequest(&identity_manager_));
}
return identity_manager_.get();
}
} // namespace extensions
| 10,822 |
421 |
// System.Web.Services.Description.MimeXmlBinding
// System.Web.Services.Description.MimeXmlBinding.MimeXmlBinding()
// System.Web.Services.Description.MimeXmlBinding.Part
/* The following program demonstrates constructor and 'Part'property
of 'MimeXmlBinding' class. This program takes 'MimeXmlBinding_Part_3_Input_CPP.wsdl'
as input, which does not contain 'Binding' object that supports 'HttpPost'.
It sets message part property to 'Body' on which 'MimeXmlBinding' is
applied and finally writes into 'MimeXmlBinding_Part_3_Output_CPP.wsdl'.
*/
// <Snippet1>
#using <System.Xml.dll>
#using <System.Web.Services.dll>
#using <System.dll>
using namespace System;
using namespace System::Web::Services::Description;
using namespace System::Collections;
using namespace System::Xml;
int main()
{
try
{
ServiceDescription^ myDescription = ServiceDescription::Read( "MimeXmlBinding_Part_3_Input_CPP.wsdl" );
// Create the 'Binding' object.
Binding^ myBinding = gcnew Binding;
// Initialize 'Name' property of 'Binding' class.
myBinding->Name = "MimeXmlBinding_Part_3_ServiceHttpPost";
XmlQualifiedName^ myXmlQualifiedName = gcnew XmlQualifiedName( "s0:MimeXmlBinding_Part_3_ServiceHttpPost" );
myBinding->Type = myXmlQualifiedName;
// Create the 'HttpBinding' object.
HttpBinding^ myHttpBinding = gcnew HttpBinding;
myHttpBinding->Verb = "POST";
// Add the 'HttpBinding' to the 'Binding'.
myBinding->Extensions->Add( myHttpBinding );
// Create the 'OperationBinding' object.
OperationBinding^ myOperationBinding = gcnew OperationBinding;
myOperationBinding->Name = "AddNumbers";
HttpOperationBinding^ myHttpOperationBinding = gcnew HttpOperationBinding;
myHttpOperationBinding->Location = "/AddNumbers";
// Add the 'HttpOperationBinding' to 'OperationBinding'.
myOperationBinding->Extensions->Add( myHttpOperationBinding );
// Create the 'InputBinding' object.
InputBinding^ myInputBinding = gcnew InputBinding;
MimeContentBinding^ myMimeContentBinding = gcnew MimeContentBinding;
myMimeContentBinding->Type = "application/x-www-form-urlencoded";
myInputBinding->Extensions->Add( myMimeContentBinding );
// Add the 'InputBinding' to 'OperationBinding'.
myOperationBinding->Input = myInputBinding;
// <Snippet2>
// <Snippet3>
// Create an OutputBinding.
OutputBinding^ myOutputBinding = gcnew OutputBinding;
MimeXmlBinding^ myMimeXmlBinding = gcnew MimeXmlBinding;
// Initialize the Part property of the MimeXmlBinding.
myMimeXmlBinding->Part = "Body";
// Add the MimeXmlBinding to the OutputBinding.
myOutputBinding->Extensions->Add( myMimeXmlBinding );
// </Snippet3>
// </Snippet2>
// Add the 'OutPutBinding' to 'OperationBinding'.
myOperationBinding->Output = myOutputBinding;
// Add the 'OperationBinding' to 'Binding'.
myBinding->Operations->Add( myOperationBinding );
// Add the 'Binding' to 'BindingCollection' of 'ServiceDescription'.
myDescription->Bindings->Add( myBinding );
// Write the 'ServiceDescription' as a WSDL file.
myDescription->Write( "MimeXmlBinding_Part_3_Output_CPP.wsdl" );
Console::WriteLine( "WSDL file with name 'MimeXmlBinding_Part_3_Output_CPP.wsdl' is"
" created successfully." );
}
catch ( Exception^ e )
{
Console::WriteLine( "Exception: {0}", e->Message );
}
}
// </Snippet1>
| 1,430 |
17,703 | <reponame>dcillera/envoy
#pragma once
#include "envoy/compression/compressor/factory.h"
#include "envoy/extensions/compression/brotli/compressor/v3/brotli.pb.h"
#include "envoy/extensions/compression/brotli/compressor/v3/brotli.pb.validate.h"
#include "source/common/http/headers.h"
#include "source/extensions/compression/brotli/compressor/brotli_compressor_impl.h"
#include "source/extensions/compression/common/compressor/factory_base.h"
namespace Envoy {
namespace Extensions {
namespace Compression {
namespace Brotli {
namespace Compressor {
namespace {
const std::string& brotliStatsPrefix() { CONSTRUCT_ON_FIRST_USE(std::string, "brotli."); }
const std::string& brotliExtensionName() {
CONSTRUCT_ON_FIRST_USE(std::string, "envoy.compression.brotli.compressor");
}
} // namespace
class BrotliCompressorFactory : public Envoy::Compression::Compressor::CompressorFactory {
public:
BrotliCompressorFactory(
const envoy::extensions::compression::brotli::compressor::v3::Brotli& brotli);
// Envoy::Compression::Compressor::CompressorFactory
Envoy::Compression::Compressor::CompressorPtr createCompressor() override;
const std::string& statsPrefix() const override { return brotliStatsPrefix(); }
const std::string& contentEncoding() const override {
return Http::CustomHeaders::get().ContentEncodingValues.Brotli;
}
private:
static BrotliCompressorImpl::EncoderMode encoderModeEnum(
envoy::extensions::compression::brotli::compressor::v3::Brotli::EncoderMode encoder_mode);
const uint32_t chunk_size_;
const bool disable_literal_context_modeling_;
const BrotliCompressorImpl::EncoderMode encoder_mode_;
const uint32_t input_block_bits_;
const uint32_t quality_;
const uint32_t window_bits_;
};
class BrotliCompressorLibraryFactory
: public Compression::Common::Compressor::CompressorLibraryFactoryBase<
envoy::extensions::compression::brotli::compressor::v3::Brotli> {
public:
BrotliCompressorLibraryFactory() : CompressorLibraryFactoryBase(brotliExtensionName()) {}
private:
Envoy::Compression::Compressor::CompressorFactoryPtr createCompressorFactoryFromProtoTyped(
const envoy::extensions::compression::brotli::compressor::v3::Brotli& config) override;
};
DECLARE_FACTORY(BrotliCompressorLibraryFactory);
} // namespace Compressor
} // namespace Brotli
} // namespace Compression
} // namespace Extensions
} // namespace Envoy
| 808 |
1,204 | /*
* Copyright 2014 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gs.collections.impl.multimap.bag;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import com.gs.collections.api.bag.ImmutableBag;
import com.gs.collections.api.bag.MutableBag;
import com.gs.collections.api.block.function.Function2;
import com.gs.collections.api.block.procedure.Procedure2;
import com.gs.collections.api.map.MutableMap;
import com.gs.collections.api.multimap.bag.ImmutableBagMultimap;
import com.gs.collections.api.multimap.bag.MutableBagMultimap;
import com.gs.collections.api.tuple.Pair;
import com.gs.collections.impl.block.procedure.checked.CheckedObjectIntProcedure;
import com.gs.collections.impl.block.procedure.checked.CheckedProcedure2;
import com.gs.collections.impl.multimap.AbstractMutableMultimap;
public abstract class AbstractMutableBagMultimap<K, V> extends AbstractMutableMultimap<K, V, MutableBag<V>> implements MutableBagMultimap<K, V>
{
protected AbstractMutableBagMultimap()
{
}
protected AbstractMutableBagMultimap(Pair<K, V>... pairs)
{
super(pairs);
}
protected AbstractMutableBagMultimap(Iterable<Pair<K, V>> inputIterable)
{
super(inputIterable);
}
protected AbstractMutableBagMultimap(int size)
{
super(size);
}
public MutableBagMultimap<K, V> toMutable()
{
MutableBagMultimap<K, V> mutableBagMultimap = this.newEmpty();
mutableBagMultimap.putAll(this);
return mutableBagMultimap;
}
public ImmutableBagMultimap<K, V> toImmutable()
{
final MutableMap<K, ImmutableBag<V>> result = (MutableMap<K, ImmutableBag<V>>) (MutableMap<?, ?>) this.createMapWithKeyCount(this.map.size());
this.map.forEachKeyValue(new Procedure2<K, MutableBag<V>>()
{
public void value(K key, MutableBag<V> bag)
{
result.put(key, bag.toImmutable());
}
});
return new ImmutableBagMultimapImpl<K, V>(result);
}
public <K2, V2> HashBagMultimap<K2, V2> collectKeysValues(Function2<? super K, ? super V, Pair<K2, V2>> function)
{
return this.collectKeysValues(function, HashBagMultimap.<K2, V2>newMultimap());
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException
{
int keysCount = this.map.size();
out.writeInt(keysCount);
this.map.forEachKeyValue(new CheckedProcedure2<K, MutableBag<V>>()
{
public void safeValue(K key, MutableBag<V> bag) throws IOException
{
out.writeObject(key);
out.writeInt(bag.sizeDistinct());
bag.forEachWithOccurrences(new CheckedObjectIntProcedure<V>()
{
public void safeValue(V value, int count) throws IOException
{
out.writeObject(value);
out.writeInt(count);
}
});
}
});
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException
{
int keyCount = in.readInt();
this.map = this.createMapWithKeyCount(keyCount);
for (int i = 0; i < keyCount; i++)
{
K key = (K) in.readObject();
int valuesSize = in.readInt();
MutableBag<V> bag = this.createCollection();
for (int j = 0; j < valuesSize; j++)
{
V value = (V) in.readObject();
int count = in.readInt();
bag.addOccurrences(value, count);
}
this.putAll(key, bag);
}
}
public void putOccurrences(K key, V value, int occurrences)
{
if (occurrences < 0)
{
throw new IllegalArgumentException("Cannot add a negative number of occurrences");
}
if (occurrences > 0)
{
MutableBag<V> bag = this.map.getIfAbsentPutWith(key, this.createCollectionBlock(), this);
bag.addOccurrences(value, occurrences);
this.addToTotalSize(occurrences);
}
}
}
| 2,238 |
1,041 | package io.ebeaninternal.server.core.timezone;
import java.util.Calendar;
/**
* Implementation of DataTimeZone that clones the Calendar instance.
* <p>
* Used with Oracle JDBC driver as that wants to mutate the Calender.
* </p>
*/
public class OracleDataTimeZone extends SimpleDataTimeZone {
public OracleDataTimeZone(String zoneId) {
super(zoneId);
}
@Override
public Calendar getTimeZone() {
// return cloned copy for Oracle to muck around with
return (Calendar) zone.clone();
}
}
| 156 |
348 | <filename>docs/data/leg-t1/035/03502177.json<gh_stars>100-1000
{"nom":"<NAME>","circ":"2ème circonscription","dpt":"Ille-et-Vilaine","inscrits":3574,"abs":1571,"votants":2003,"blancs":29,"nuls":6,"exp":1968,"res":[{"nuance":"MDM","nom":"Mme <NAME>","voix":1056},{"nuance":"FI","nom":"<NAME>","voix":243},{"nuance":"SOC","nom":"Mme <NAME>","voix":166},{"nuance":"LR","nom":"<NAME>","voix":142},{"nuance":"FN","nom":"M. <NAME>","voix":124},{"nuance":"ECO","nom":"Mme <NAME>","voix":88},{"nuance":"REG","nom":"<NAME>","voix":48},{"nuance":"EXG","nom":"Mme <NAME>","voix":17},{"nuance":"COM","nom":"Mme <NAME>","voix":16},{"nuance":"DIV","nom":"M. <NAME>","voix":16},{"nuance":"DIV","nom":"M. <NAME>","voix":16},{"nuance":"DVD","nom":"M. <NAME>","voix":15},{"nuance":"ECO","nom":"Mme <NAME>","voix":9},{"nuance":"DIV","nom":"M. <NAME>","voix":6},{"nuance":"ECO","nom":"M. <NAME>","voix":6},{"nuance":"DIV","nom":"M. <NAME>","voix":0},{"nuance":"DVD","nom":"Mme <NAME>","voix":0}]} | 399 |
1,120 | <filename>tools/k4aviewer/k4adepthimageconverterbase.h
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef K4ADEPTHIMAGECONVERTERBASE_H
#define K4ADEPTHIMAGECONVERTERBASE_H
// System headers
//
#include <memory>
#include <vector>
// Library headers
//
// Project headers
//
#include "ik4aimageconverter.h"
#include "k4adepthpixelcolorizer.h"
#include "k4astaticimageproperties.h"
#include "k4aviewerutil.h"
#include "perfcounter.h"
namespace k4aviewer
{
template<k4a_image_format_t ImageFormat, DepthPixelVisualizationFunction VisualizationFunction>
class K4ADepthImageConverterBase : public IK4AImageConverter<ImageFormat>
{
public:
explicit K4ADepthImageConverterBase(const k4a_depth_mode_t depthMode,
const std::pair<DepthPixel, DepthPixel> expectedValueRange) :
m_dimensions(GetDepthDimensions(depthMode)),
m_expectedValueRange(expectedValueRange),
m_expectedBufferSize(static_cast<size_t>(m_dimensions.Width * m_dimensions.Height) * sizeof(BgraPixel))
{
}
ImageConversionResult ConvertImage(const k4a::image &srcImage, k4a::image *bgraImage) override
{
const size_t srcImageSize = static_cast<size_t>(m_dimensions.Width * m_dimensions.Height) * sizeof(DepthPixel);
if (srcImage.get_size() != srcImageSize)
{
return ImageConversionResult::InvalidBufferSizeError;
}
if (bgraImage->get_width_pixels() != srcImage.get_width_pixels() ||
bgraImage->get_height_pixels() != srcImage.get_height_pixels())
{
return ImageConversionResult::InvalidBufferSizeError;
}
const uint8_t *src = srcImage.get_buffer();
static PerfCounter render(std::string("Depth sensor<T") + std::to_string(int(ImageFormat)) + "> render");
PerfSample renderSample(&render);
RenderImage(src,
static_cast<size_t>(m_dimensions.Width * m_dimensions.Height) * sizeof(DepthPixel),
bgraImage->get_buffer());
renderSample.End();
return ImageConversionResult::Success;
}
ImageDimensions GetImageDimensions() const override
{
return m_dimensions;
}
~K4ADepthImageConverterBase() override = default;
K4ADepthImageConverterBase(const K4ADepthImageConverterBase &) = delete;
K4ADepthImageConverterBase(const K4ADepthImageConverterBase &&) = delete;
K4ADepthImageConverterBase &operator=(const K4ADepthImageConverterBase &) = delete;
K4ADepthImageConverterBase &operator=(const K4ADepthImageConverterBase &&) = delete;
private:
void RenderImage(const uint8_t *src, const size_t srcSize, uint8_t *dst)
{
const uint8_t *currentSrc = src;
const uint8_t *srcEnd = src + srcSize;
while (currentSrc < srcEnd)
{
const DepthPixel pixelValue = *reinterpret_cast<const DepthPixel *>(currentSrc);
BgraPixel *outputPixel = reinterpret_cast<BgraPixel *>(dst);
*outputPixel = VisualizationFunction(pixelValue, m_expectedValueRange.first, m_expectedValueRange.second);
dst += sizeof(BgraPixel);
currentSrc += sizeof(DepthPixel);
}
}
const ImageDimensions m_dimensions;
const std::pair<DepthPixel, DepthPixel> m_expectedValueRange;
const size_t m_expectedBufferSize;
};
} // namespace k4aviewer
#endif
| 1,396 |
1,444 | <reponame>GabrielSturtevant/mage
package mage.cards.s;
import mage.abilities.Ability;
import mage.abilities.common.BeginningOfUpkeepTriggeredAbility;
import mage.abilities.effects.OneShotEffect;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.*;
import mage.game.Game;
import mage.players.Player;
import java.util.UUID;
/**
* @author TheElk801
*/
public final class StormWorld extends CardImpl {
public StormWorld(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.ENCHANTMENT}, "{R}");
addSuperType(SuperType.WORLD);
// At the beginning of each player's upkeep, Storm World deals X damage to that player, where X is 4 minus the number of cards in their hand.
this.addAbility(new BeginningOfUpkeepTriggeredAbility(Zone.BATTLEFIELD, new StormWorldEffect(), TargetController.ANY, false, true));
}
private StormWorld(final StormWorld card) {
super(card);
}
@Override
public StormWorld copy() {
return new StormWorld(this);
}
}
class StormWorldEffect extends OneShotEffect {
public StormWorldEffect() {
super(Outcome.Benefit);
this.staticText = "{this} deals X damage to that player, where X is 4 minus the number of cards in their hand";
}
public StormWorldEffect(final StormWorldEffect effect) {
super(effect);
}
@Override
public StormWorldEffect copy() {
return new StormWorldEffect(this);
}
@Override
public boolean apply(Game game, Ability source) {
Player player = game.getPlayer(targetPointer.getFirst(game, source));
if (player != null) {
int damage = 4 - player.getHand().size();
if (damage > 0) {
player.damage(damage, source.getSourceId(), source, game);
}
return true;
}
return false;
}
}
| 706 |
580 | <filename>include/mpark/patterns/optional.hpp
// MPark.Patterns
//
// Copyright <NAME>, 2017
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
#ifndef MPARK_PATTERNS_OPTIONAL_HPP
#define MPARK_PATTERNS_OPTIONAL_HPP
#include <utility>
namespace mpark::patterns {
inline constexpr struct None {} none{};
template <typename Value, typename F>
auto try_match(None, Value &&value, F &&f) {
return value ? no_match : match_invoke(std::forward<F>(f));
}
template <typename Pattern>
struct Some { const Pattern &pattern; };
template <typename Pattern>
auto some(const Pattern &pattern) { return Some<Pattern>{pattern}; }
template <typename Pattern, typename Value, typename F>
auto try_match(const Some<Pattern> &some, Value &&value, F &&f) {
return value ? try_match(some.pattern,
*std::forward<Value>(value),
std::forward<F>(f))
: no_match;
}
} // namespace mpark::patterns
#endif // MPARK_PATTERNS_OPTIONAL_HPP
| 437 |
3,049 | <filename>engine/src/test/java/io/seldon/engine/service/UriCacheTest.java
package io.seldon.engine.service;
import io.seldon.protos.DeploymentProtos.Endpoint;
import org.junit.Assert;
import org.junit.Test;
public class UriCacheTest {
@Test
public void testUri() {
Endpoint endpointA = Endpoint.newBuilder().setServiceHost("hostA").setServicePort(1000).build();
Endpoint endpointA2 =
Endpoint.newBuilder().setServiceHost("hostA").setServicePort(1000).build();
Endpoint endpointB = Endpoint.newBuilder().setServiceHost("hostB").setServicePort(1000).build();
final String predictPath = "/predict";
final String predictPath2 = "/predict";
final String feedbackPath = "/feedback";
final String key1 = InternalPredictionService.getUriKey(endpointA, predictPath);
final String key2 = InternalPredictionService.getUriKey(endpointB, predictPath);
final String key3 = InternalPredictionService.getUriKey(endpointA, feedbackPath);
Assert.assertNotEquals(key1, key2);
Assert.assertNotEquals(key1, key3);
final String key4 = InternalPredictionService.getUriKey(endpointA2, predictPath);
Assert.assertEquals(key1, key4);
final String key5 = InternalPredictionService.getUriKey(endpointA, predictPath2);
Assert.assertEquals(key1, key5);
}
}
| 443 |
4,262 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.catalog;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;
import org.w3c.dom.Document;
import org.apache.camel.catalog.impl.AbstractCamelCatalog;
import org.apache.camel.catalog.impl.CatalogHelper;
import org.apache.camel.tooling.model.BaseModel;
import org.apache.camel.tooling.model.ComponentModel;
import org.apache.camel.tooling.model.DataFormatModel;
import org.apache.camel.tooling.model.EipModel;
import org.apache.camel.tooling.model.JsonMapper;
import org.apache.camel.tooling.model.LanguageModel;
import org.apache.camel.tooling.model.MainModel;
import org.apache.camel.tooling.model.OtherModel;
import org.apache.camel.util.json.JsonObject;
/**
* Default {@link CamelCatalog}.
*/
public class DefaultCamelCatalog extends AbstractCamelCatalog implements CamelCatalog {
private static final String MODELS_CATALOG = "org/apache/camel/catalog/models.properties";
private static final String ARCHETYPES_CATALOG = "org/apache/camel/catalog/archetypes/archetype-catalog.xml";
private static final String SCHEMAS_XML = "org/apache/camel/catalog/schemas";
private static final String MAIN_DIR = "org/apache/camel/catalog/main";
private final VersionHelper version = new VersionHelper();
// 3rd party components/data-formats
private final Map<String, String> extraComponents = new HashMap<>();
private final Map<String, String> extraComponentsJSonSchema = new HashMap<>();
private final Map<String, String> extraDataFormats = new HashMap<>();
private final Map<String, String> extraDataFormatsJSonSchema = new HashMap<>();
// cache of operation -> result
private final Map<String, Object> cache = new HashMap<>();
private boolean caching;
private VersionManager versionManager = new DefaultVersionManager(this);
private RuntimeProvider runtimeProvider = new DefaultRuntimeProvider(this);
/**
* Creates the {@link CamelCatalog} without caching enabled.
*/
public DefaultCamelCatalog() {
this(false);
}
/**
* Creates the {@link CamelCatalog}
*
* @param caching whether to use cache
*/
public DefaultCamelCatalog(boolean caching) {
this.caching = caching;
setJSonSchemaResolver(new CamelCatalogJSonSchemaResolver(
this, extraComponents, extraComponentsJSonSchema, extraDataFormats, extraDataFormatsJSonSchema));
}
@Override
public RuntimeProvider getRuntimeProvider() {
return runtimeProvider;
}
@Override
public void setRuntimeProvider(RuntimeProvider runtimeProvider) {
this.runtimeProvider = runtimeProvider;
// inject CamelCatalog to the provider
this.runtimeProvider.setCamelCatalog(this);
// invalidate the cache
cache.remove("findComponentNames");
cache.remove("listComponentsAsJson");
cache.remove("findDataFormatNames");
cache.remove("listDataFormatsAsJson");
cache.remove("findLanguageNames");
cache.remove("listLanguagesAsJson");
}
@Override
public void enableCache() {
caching = true;
}
@Override
public boolean isCaching() {
return caching;
}
@Override
public void setVersionManager(VersionManager versionManager) {
this.versionManager = versionManager;
}
@Override
public VersionManager getVersionManager() {
return versionManager;
}
@Override
public void addComponent(String name, String className) {
extraComponents.put(name, className);
// invalidate the cache
cache.remove("findComponentNames");
cache.remove("findComponentLabels");
cache.remove("listComponentsAsJson");
}
@Override
public void addComponent(String name, String className, String jsonSchema) {
addComponent(name, className);
if (jsonSchema != null) {
extraComponentsJSonSchema.put(name, jsonSchema);
}
}
@Override
public void addDataFormat(String name, String className) {
extraDataFormats.put(name, className);
// invalidate the cache
cache.remove("findDataFormatNames");
cache.remove("findDataFormatLabels");
cache.remove("listDataFormatsAsJson");
}
@Override
public void addDataFormat(String name, String className, String jsonSchema) {
addDataFormat(name, className);
if (jsonSchema != null) {
extraDataFormatsJSonSchema.put(name, jsonSchema);
}
}
@Override
public String getCatalogVersion() {
return version.getVersion();
}
@Override
public boolean loadVersion(String version) {
if (version.equals(versionManager.getLoadedVersion())) {
return true;
} else if (versionManager.loadVersion(version)) {
// invalidate existing cache if we loaded a new version
cache.clear();
return true;
}
return false;
}
@Override
public String getLoadedVersion() {
return versionManager.getLoadedVersion();
}
@Override
public String getRuntimeProviderLoadedVersion() {
return versionManager.getRuntimeProviderLoadedVersion();
}
@Override
public boolean loadRuntimeProviderVersion(String groupId, String artifactId, String version) {
return versionManager.loadRuntimeProviderVersion(groupId, artifactId, version);
}
@Override
public List<String> findComponentNames() {
return cache("findComponentNames", () -> Stream.of(runtimeProvider.findComponentNames(), extraComponents.keySet())
.flatMap(Collection::stream)
.sorted()
.collect(Collectors.toList()));
}
@Override
public List<String> findDataFormatNames() {
return cache("findDataFormatNames", () -> Stream.of(runtimeProvider.findDataFormatNames(), extraDataFormats.keySet())
.flatMap(Collection::stream)
.sorted()
.collect(Collectors.toList()));
}
@Override
public List<String> findLanguageNames() {
return cache("findLanguageNames", runtimeProvider::findLanguageNames);
}
@Override
public List<String> findModelNames() {
return cache("findModelNames", () -> {
try (InputStream is = versionManager.getResourceAsStream(MODELS_CATALOG)) {
return CatalogHelper.loadLines(is);
} catch (IOException e) {
return Collections.emptyList();
}
});
}
@Override
public List<String> findOtherNames() {
return cache("findOtherNames", runtimeProvider::findOtherNames);
}
@Override
public List<String> findModelNames(String filter) {
// should not cache when filter parameter can by any kind of value
return findNames(filter, this::findModelNames, this::eipModel);
}
@Override
public List<String> findComponentNames(String filter) {
// should not cache when filter parameter can by any kind of value
return findNames(filter, this::findComponentNames, this::componentModel);
}
@Override
public List<String> findDataFormatNames(String filter) {
// should not cache when filter parameter can by any kind of value
return findNames(filter, this::findDataFormatNames, this::dataFormatModel);
}
@Override
public List<String> findLanguageNames(String filter) {
// should not cache when filter parameter can by any kind of value
return findNames(filter, this::findLanguageNames, this::languageModel);
}
@Override
public List<String> findOtherNames(String filter) {
// should not cache when filter parameter can by any kind of value
return findNames(filter, this::findOtherNames, this::otherModel);
}
private List<String> findNames(
String filter, Supplier<List<String>> findNames, Function<String, ? extends BaseModel<?>> modelLoader) {
List<String> answer = new ArrayList<>();
List<String> names = findNames.get();
for (String name : names) {
BaseModel<?> model = modelLoader.apply(name);
if (model != null) {
String label = model.getLabel();
String[] parts = label.split(",");
for (String part : parts) {
try {
if (part.equalsIgnoreCase(filter) || CatalogHelper.matchWildcard(part, filter)
|| part.matches(filter)) {
answer.add(name);
}
} catch (PatternSyntaxException e) {
// ignore as filter is maybe not a pattern
}
}
}
}
return answer;
}
@Override
public String modelJSonSchema(String name) {
return cache("eip-" + name, name, super::modelJSonSchema);
}
@Override
public EipModel eipModel(String name) {
return cache("eip-model-" + name, name, super::eipModel);
}
@Override
public String componentJSonSchema(String name) {
return cache("component-" + name, name, super::componentJSonSchema);
}
@Override
public ComponentModel componentModel(String name) {
return cache("component-model-" + name, name, super::componentModel);
}
@Override
public String dataFormatJSonSchema(String name) {
return cache("dataformat-" + name, name, super::dataFormatJSonSchema);
}
@Override
public DataFormatModel dataFormatModel(String name) {
return cache("dataformat-model-" + name, name, super::dataFormatModel);
}
@Override
public String languageJSonSchema(String name) {
return cache("language-" + name, name, super::languageJSonSchema);
}
@Override
public LanguageModel languageModel(String name) {
return cache("language-model-" + name, name, super::languageModel);
}
@Override
public String otherJSonSchema(String name) {
return cache("other-" + name, name, super::otherJSonSchema);
}
@Override
public OtherModel otherModel(String name) {
return cache("other-model-" + name, name, super::otherModel);
}
public String mainJSonSchema() {
return cache("main", "main", k -> super.mainJSonSchema());
}
@Override
public MainModel mainModel() {
return cache("main-model", "main-model", k -> super.mainModel());
}
@Override
public Set<String> findModelLabels() {
return cache("findModelLabels", () -> findLabels(this::findModelNames, this::eipModel));
}
@Override
public Set<String> findComponentLabels() {
return cache("findComponentLabels", () -> findLabels(this::findComponentNames, this::componentModel));
}
@Override
public Set<String> findDataFormatLabels() {
return cache("findDataFormatLabels", () -> findLabels(this::findDataFormatNames, this::dataFormatModel));
}
@Override
public Set<String> findLanguageLabels() {
return cache("findLanguageLabels", () -> findLabels(this::findLanguageNames, this::languageModel));
}
@Override
public Set<String> findOtherLabels() {
return cache("findOtherLabels", () -> findLabels(this::findOtherNames, this::otherModel));
}
private SortedSet<String> findLabels(Supplier<List<String>> findNames, Function<String, ? extends BaseModel<?>> loadModel) {
TreeSet<String> answer = new TreeSet<>();
List<String> names = findNames.get();
for (String name : names) {
BaseModel<?> model = loadModel.apply(name);
if (model != null) {
String label = model.getLabel();
String[] parts = label.split(",");
Collections.addAll(answer, parts);
}
}
return answer;
}
@Override
public String archetypeCatalogAsXml() {
return cache(ARCHETYPES_CATALOG, this::loadResource);
}
@Override
public String springSchemaAsXml() {
return cache(SCHEMAS_XML + "/camel-spring.xsd", this::loadResource);
}
@Override
public String mainJsonSchema() {
return cache(MAIN_DIR + "/camel-main-configuration-metadata.json", this::loadResource);
}
@Override
public String listComponentsAsJson() {
return cache("listComponentsAsJson", () -> JsonMapper.serialize(findComponentNames().stream()
.map(this::componentJSonSchema)
.map(JsonMapper::deserialize)
.map(o -> o.get("component"))
.collect(Collectors.toList())));
}
@Override
public String listDataFormatsAsJson() {
return cache("listDataFormatsAsJson", () -> JsonMapper.serialize(findDataFormatNames().stream()
.map(this::dataFormatJSonSchema)
.map(JsonMapper::deserialize)
.map(o -> o.get("dataformat"))
.collect(Collectors.toList())));
}
@Override
public String listLanguagesAsJson() {
return cache("listLanguagesAsJson", () -> JsonMapper.serialize(findLanguageNames().stream()
.map(this::languageJSonSchema)
.map(JsonMapper::deserialize)
.map(o -> o.get("language"))
.collect(Collectors.toList())));
}
@Override
public String listModelsAsJson() {
return cache("listModelsAsJson", () -> JsonMapper.serialize(findModelNames().stream()
.map(this::modelJSonSchema)
.map(JsonMapper::deserialize)
.map(o -> o.get("model"))
.collect(Collectors.toList())));
}
@Override
public String listOthersAsJson() {
return cache("listOthersAsJson", () -> JsonMapper.serialize(findOtherNames().stream()
.map(this::otherJSonSchema)
.map(JsonMapper::deserialize)
.map(o -> o.get("other"))
.collect(Collectors.toList())));
}
@Override
public String summaryAsJson() {
return cache("summaryAsJson", () -> {
Map<String, Object> obj = new JsonObject();
obj.put("version", getCatalogVersion());
obj.put("eips", findModelNames().size());
obj.put("components", findComponentNames().size());
obj.put("dataformats", findDataFormatNames().size());
obj.put("languages", findLanguageNames().size());
obj.put("archetypes", getArchetypesCount());
return JsonMapper.serialize(obj);
});
}
private int getArchetypesCount() {
int archetypes = 0;
try {
String xml = archetypeCatalogAsXml();
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, Boolean.TRUE);
dbf.setFeature("http://apache.org/xml/features/disallow-doctype-decl", Boolean.TRUE);
Document dom = dbf.newDocumentBuilder().parse(new ByteArrayInputStream(xml.getBytes()));
Object val = XPathFactory.newInstance().newXPath().evaluate("count(/archetype-catalog/archetypes/archetype)", dom,
XPathConstants.NUMBER);
double num = (double) val;
archetypes = (int) num;
} catch (Exception e) {
// ignore
}
return archetypes;
}
@SuppressWarnings("unchecked")
private <T> T cache(String name, Supplier<T> loader) {
if (caching) {
T t = (T) cache.get(name);
if (t == null) {
t = loader.get();
if (t != null) {
cache.put(name, t);
}
}
return t;
} else {
return loader.get();
}
}
@SuppressWarnings("unchecked")
private <T> T cache(String key, String name, Function<String, T> loader) {
if (caching) {
T t = (T) cache.get(key);
if (t == null) {
t = loader.apply(name);
if (t != null) {
cache.put(key, t);
}
}
return t;
} else {
return loader.apply(name);
}
}
@SuppressWarnings("unchecked")
private <T> T cache(String name, Function<String, T> loader) {
if (caching) {
T t = (T) cache.get(name);
if (t == null) {
t = loader.apply(name);
if (t != null) {
cache.put(name, t);
}
}
return t;
} else {
return loader.apply(name);
}
}
private String loadResource(String file) {
try (InputStream is = versionManager.getResourceAsStream(file)) {
return is != null ? CatalogHelper.loadText(is) : null;
} catch (IOException e) {
return null;
}
}
// CHECKSTYLE:ON
}
| 7,496 |
587 | /* ISC license. */
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/stat.h>
#include <skalibs/posixplz.h>
#include <skalibs/allreadwrite.h>
#include <skalibs/djbunix.h>
#include <s6/supervise.h>
void s6_supervise_unlink (char const *scdir, char const *name, uint32_t options)
{
int e = errno ;
int fd = -1, fdlog = -1 ;
size_t scdirlen = strlen(scdir) ;
size_t namelen = strlen(name) ;
char fn[scdirlen + namelen + sizeof(S6_SUPERVISE_CTLDIR) + 14] ;
memcpy(fn, scdir, scdirlen) ;
fn[scdirlen] = '/' ;
memcpy(fn + scdirlen + 1, name, namelen) ;
if (options & 4)
{
memcpy(fn + scdirlen + 1 + namelen, "/down", 6) ;
unlink_void(fn) ;
}
if (options & 1)
{
memcpy(fn + scdirlen + 1 + namelen, "/" S6_SUPERVISE_CTLDIR, sizeof(S6_SUPERVISE_CTLDIR)) ;
memcpy(fn + scdirlen + 1 + namelen + sizeof(S6_SUPERVISE_CTLDIR), "/control", 9) ;
fd = open_write(fn) ;
memcpy(fn + scdirlen + 1 + namelen, "/log/" S6_SUPERVISE_CTLDIR, 4 + sizeof(S6_SUPERVISE_CTLDIR)) ;
memcpy(fn + scdirlen + 5 + namelen + sizeof(S6_SUPERVISE_CTLDIR), "/control", 9) ;
fdlog = open_write(fn) ;
}
fn[scdirlen + 1 + namelen] = 0 ;
unlink_void(fn) ;
if (fd >= 0)
{
fd_write(fd, "xd", 1 + !!(options & 2)) ;
fd_close(fd) ;
}
if (fdlog >= 0)
{
fd_write(fdlog, "xo", 1 + !!(options & 2)) ;
fd_close(fdlog) ;
}
errno = e ;
}
| 702 |
561 | //////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2011, <NAME>. All rights reserved.
// Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the following
// disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided with
// the distribution.
//
// * Neither the name of <NAME> nor the names of
// any other contributors to this software may be used to endorse or
// promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////
#include "boost/python.hpp"
#include "UndoScopeBinding.h"
#include "Gaffer/ScriptNode.h"
#include "Gaffer/UndoScope.h"
#include "IECorePython/ScopedGILRelease.h"
#include <memory>
using namespace boost::python;
using namespace Gaffer;
namespace
{
typedef std::shared_ptr<UndoScope> UndoScopePtr;
void deleter( UndoScope *undoScope )
{
// The destructor for the undo scope may trigger a dirty
// propagation, and observers of plugDirtiedSignal() may
// well invoke a compute. We need to release the GIL so that
// if that compute is multithreaded, those threads can acquire
// the GIL for python based nodes and expressions.
IECorePython::ScopedGILRelease gilRelease;
delete undoScope;
}
UndoScopePtr construct( ScriptNodePtr script, UndoScope::State state, const char *mergeGroup )
{
return UndoScopePtr( new UndoScope( script, state, mergeGroup ), deleter );
}
} // namespace
void GafferModule::bindUndoScope()
{
class_<UndoScope, UndoScopePtr, boost::noncopyable> cls( "_UndoScope", no_init );
// Must bind enum before constructor, because we need to
// use an enum value for a default value.
scope s( cls );
enum_<UndoScope::State>( "State" )
.value( "Invalid", UndoScope::Invalid )
.value( "Enabled", UndoScope::Enabled )
.value( "Disabled", UndoScope::Disabled )
;
cls.def(
"__init__",
make_constructor(
construct,
default_call_policies(),
(
boost::python::arg_( "script" ),
boost::python::arg_( "state" ) = UndoScope::Enabled,
boost::python::arg_( "mergeGroup" ) = ""
)
)
);
}
| 1,094 |
948 | <filename>arch/cpu/cc26x0-cc13x0/dev/cc26xx-aes.c
/*
* Copyright (c) 2016, University of Bristol - http://www.bristol.ac.uk
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/**
* \addtogroup cc26xx-aes
* @{
*
* \file
* Implementation of the AES driver for the CC26x0/CC13x0 SoC
* \author
* <NAME> <<EMAIL>>
*/
#include "contiki.h"
#include "dev/cc26xx-aes.h"
#include "ti-lib.h"
/*---------------------------------------------------------------------------*/
#include "sys/log.h"
#define LOG_MODULE "cc26xx-aes"
#define LOG_LEVEL LOG_LEVEL_MAIN
/*---------------------------------------------------------------------------*/
static uint32_t skey[AES_128_KEY_LENGTH / sizeof(uint32_t)];
/*---------------------------------------------------------------------------*/
void
cc26xx_aes_set_key(const uint8_t *key)
{
memcpy(skey, key, AES_128_KEY_LENGTH);
}
/*---------------------------------------------------------------------------*/
static void
encrypt_decrypt(uint8_t *plaintext_and_result, bool do_encrypt)
{
uint32_t result[AES_128_BLOCK_SIZE / sizeof(uint32_t)];
unsigned status;
int i;
/* First, make sure the PERIPH PD is on */
ti_lib_prcm_power_domain_on(PRCM_DOMAIN_PERIPH);
while((ti_lib_prcm_power_domain_status(PRCM_DOMAIN_PERIPH)
!= PRCM_DOMAIN_POWER_ON));
/* Enable CRYPTO peripheral */
ti_lib_prcm_peripheral_run_enable(PRCM_PERIPH_CRYPTO);
ti_lib_prcm_load_set();
while(!ti_lib_prcm_load_get());
status = ti_lib_crypto_aes_load_key(skey, CRYPTO_KEY_AREA_0);
if(status != AES_SUCCESS) {
LOG_WARN("load key failed: %u\n", status);
} else {
status = ti_lib_crypto_aes_ecb((uint32_t *)plaintext_and_result, result, CRYPTO_KEY_AREA_0, do_encrypt, false);
if(status != AES_SUCCESS) {
LOG_WARN("ecb failed: %u\n", status);
} else {
for(i = 0; i < 100; ++i) {
ti_lib_cpu_delay(10);
status = ti_lib_crypto_aes_ecb_status();
if(status != AES_DMA_BSY) {
break;
}
}
ti_lib_crypto_aes_ecb_finish();
if(status != AES_SUCCESS) {
LOG_WARN("ecb get result failed: %u\n", status);
}
}
}
ti_lib_prcm_peripheral_run_disable(PRCM_PERIPH_CRYPTO);
ti_lib_prcm_load_set();
while(!ti_lib_prcm_load_get());
if(status == AES_SUCCESS) {
memcpy(plaintext_and_result, result, AES_128_BLOCK_SIZE);
} else {
/* corrupt the result */
plaintext_and_result[0] ^= 1;
}
}
/*---------------------------------------------------------------------------*/
void
cc26xx_aes_encrypt(uint8_t *plaintext_and_result)
{
encrypt_decrypt(plaintext_and_result, true);
}
/*---------------------------------------------------------------------------*/
void
cc26xx_aes_decrypt(uint8_t *cyphertext_and_result)
{
encrypt_decrypt(cyphertext_and_result, false);
}
/*---------------------------------------------------------------------------*/
const struct aes_128_driver cc26xx_aes_128_driver = {
cc26xx_aes_set_key,
cc26xx_aes_encrypt
};
/** @} */
| 1,579 |
10,882 | from .forest import RandomForestClassifier, RandomForestRegressor
| 14 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _XMLTEXTCOLUMNSCONTEXT_HXX
#define _XMLTEXTCOLUMNSCONTEXT_HXX
#include "XMLElementPropertyContext.hxx"
namespace rtl { class OUString; }
class XMLTextColumnsArray_Impl;
class XMLTextColumnSepContext_Impl;
class SvXMLTokenMap;
class XMLTextColumnsContext :public XMLElementPropertyContext
{
const ::rtl::OUString sSeparatorLineIsOn;
const ::rtl::OUString sSeparatorLineWidth;
const ::rtl::OUString sSeparatorLineColor;
const ::rtl::OUString sSeparatorLineRelativeHeight;
const ::rtl::OUString sSeparatorLineVerticalAlignment;
const ::rtl::OUString sIsAutomatic;
const ::rtl::OUString sAutomaticDistance;
XMLTextColumnsArray_Impl *pColumns;
XMLTextColumnSepContext_Impl *pColumnSep;
SvXMLTokenMap *pColumnAttrTokenMap;
SvXMLTokenMap *pColumnSepAttrTokenMap;
sal_Int16 nCount;
sal_Bool bAutomatic;
sal_Int32 nAutomaticDistance;
public:
TYPEINFO();
XMLTextColumnsContext(
SvXMLImport& rImport, sal_uInt16 nPrfx,
const ::rtl::OUString& rLName,
const ::com::sun::star::uno::Reference<
::com::sun::star::xml::sax::XAttributeList > & xAttrList,
const XMLPropertyState& rProp,
::std::vector< XMLPropertyState > &rProps );
virtual ~XMLTextColumnsContext();
virtual SvXMLImportContext *CreateChildContext( sal_uInt16 nPrefix,
const ::rtl::OUString& rLocalName,
const ::com::sun::star::uno::Reference< ::com::sun::star::xml::sax::XAttributeList > & xAttrList );
virtual void EndElement();
};
#endif
| 816 |
7,407 | /*
===========================================================================
Copyright (C) 1999-2005 Id Software, Inc.
This file is part of Quake III Arena source code.
Quake III Arena source code is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
Quake III Arena source code is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
===========================================================================
*/
#include "qbsp.h"
#include "l_mem.h"
#include "botlib/aasfile.h"
#include "aas_store.h"
#include "aas_cfg.h"
#include "aas_file.h"
//
// creating tetrahedrons from a arbitrary world bounded by triangles
//
// a triangle has 3 corners and 3 edges
// a tetrahedron is build out of 4 triangles
// a tetrahedron has 6 edges
// we start with a world bounded by triangles, a side of a triangle facing
// towards the oudside of the world is marked as part of tetrahedron -1
//
// a tetrahedron is defined by two non-coplanar triangles with a shared edge
//
// a tetrahedron is defined by one triangle and a vertex not in the triangle plane
//
// if all triangles using a specific vertex have tetrahedrons
// at both sides then this vertex will never be part of a new tetrahedron
//
// if all triangles using a specific edge have tetrahedrons
// at both sides then this vertex will never be part of a new tetrahedron
//
// each triangle can only be shared by two tetrahedrons
// when all triangles have tetrahedrons at both sides then we're done
//
// if we cannot create any new tetrahedrons and there is at least one triangle
// which has a tetrahedron only at one side then the world leaks
//
#define Sign(x) (x < 0 ? 1 : 0)
#define MAX_TH_VERTEXES 128000
#define MAX_TH_PLANES 128000
#define MAX_TH_EDGES 512000
#define MAX_TH_TRIANGLES 51200
#define MAX_TH_TETRAHEDRONS 12800
#define PLANEHASH_SIZE 1024
#define EDGEHASH_SIZE 1024
#define TRIANGLEHASH_SIZE 1024
#define VERTEXHASH_SHIFT 7
#define VERTEXHASH_SIZE ((MAX_MAP_BOUNDS>>(VERTEXHASH_SHIFT-1))+1) //was 64
#define NORMAL_EPSILON 0.0001
#define DIST_EPSILON 0.1
#define VERTEX_EPSILON 0.01
#define INTEGRAL_EPSILON 0.01
//plane
typedef struct th_plane_s
{
vec3_t normal;
float dist;
int type;
int signbits;
struct th_plane_s *hashnext; //next plane in hash
} th_plane_t;
//vertex
typedef struct th_vertex_s
{
vec3_t v;
int usercount; //2x the number of to be processed
//triangles using this vertex
struct th_vertex_s *hashnext; //next vertex in hash
} th_vertex_t;
//edge
typedef struct th_edge_s
{
int v[2]; //vertex indexes
int usercount; //number of to be processed
//triangles using this edge
struct th_edge_s *hashnext; //next edge in hash
} th_edge_t;
//triangle
typedef struct th_triangle_s
{
int edges[3]; //negative if edge is flipped
th_plane_t planes[3]; //triangle bounding planes
int planenum; //plane the triangle is in
int front; //tetrahedron at the front
int back; //tetrahedron at the back
vec3_t mins, maxs; //triangle bounding box
struct th_triangle_s *prev, *next; //links in linked triangle lists
struct th_triangle_s *hashnext; //next triangle in hash
} th_triangle_t;
//tetrahedron
typedef struct th_tetrahedron_s
{
int triangles[4]; //negative if at backside of triangle
float volume; //tetrahedron volume
} th_tetrahedron_t;
typedef struct th_s
{
//vertexes
int numvertexes;
th_vertex_t *vertexes;
th_vertex_t *vertexhash[VERTEXHASH_SIZE * VERTEXHASH_SIZE];
//planes
int numplanes;
th_plane_t *planes;
th_plane_t *planehash[PLANEHASH_SIZE];
//edges
int numedges;
th_edge_t *edges;
th_edge_t *edgehash[EDGEHASH_SIZE];
//triangles
int numtriangles;
th_triangle_t *triangles;
th_triangle_t *trianglehash[TRIANGLEHASH_SIZE];
//tetrahedrons
int numtetrahedrons;
th_tetrahedron_t *tetrahedrons;
} th_t;
th_t thworld;
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_InitMaxTH(void)
{
//get memory for the tetrahedron data
thworld.vertexes = (th_vertex_t *) GetClearedMemory(MAX_TH_VERTEXES * sizeof(th_vertex_t));
thworld.planes = (th_plane_t *) GetClearedMemory(MAX_TH_PLANES * sizeof(th_plane_t));
thworld.edges = (th_edge_t *) GetClearedMemory(MAX_TH_EDGES * sizeof(th_edge_t));
thworld.triangles = (th_triangle_t *) GetClearedMemory(MAX_TH_TRIANGLES * sizeof(th_triangle_t));
thworld.tetrahedrons = (th_tetrahedron_t *) GetClearedMemory(MAX_TH_TETRAHEDRONS * sizeof(th_tetrahedron_t));
//reset the hash tables
memset(thworld.vertexhash, 0, VERTEXHASH_SIZE * sizeof(th_vertex_t *));
memset(thworld.planehash, 0, PLANEHASH_SIZE * sizeof(th_plane_t *));
memset(thworld.edgehash, 0, EDGEHASH_SIZE * sizeof(th_edge_t *));
memset(thworld.trianglehash, 0, TRIANGLEHASH_SIZE * sizeof(th_triangle_t *));
} //end of the function TH_InitMaxTH
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_FreeMaxTH(void)
{
if (thworld.vertexes) FreeMemory(thworld.vertexes);
thworld.vertexes = NULL;
thworld.numvertexes = 0;
if (thworld.planes) FreeMemory(thworld.planes);
thworld.planes = NULL;
thworld.numplanes = 0;
if (thworld.edges) FreeMemory(thworld.edges);
thworld.edges = NULL;
thworld.numedges = 0;
if (thworld.triangles) FreeMemory(thworld.triangles);
thworld.triangles = NULL;
thworld.numtriangles = 0;
if (thworld.tetrahedrons) FreeMemory(thworld.tetrahedrons);
thworld.tetrahedrons = NULL;
thworld.numtetrahedrons = 0;
} //end of the function TH_FreeMaxTH
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float TH_TriangleArea(th_triangle_t *tri)
{
return 0;
} //end of the function TH_TriangleArea
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
float TH_TetrahedronVolume(th_tetrahedron_t *tetrahedron)
{
int edgenum, verts[3], i, j, v2;
float volume, d;
th_triangle_t *tri, *tri2;
th_plane_t *plane;
tri = &thworld.triangles[abs(tetrahedron->triangles[0])];
for (i = 0; i < 3; i++)
{
edgenum = tri->edges[i];
if (edgenum < 0) verts[i] = thworld.edges[abs(edgenum)].v[1];
else verts[i] = thworld.edges[edgenum].v[0];
} //end for
//
tri2 = &thworld.triangles[abs(tetrahedron->triangles[1])];
for (j = 0; j < 3; j++)
{
edgenum = tri2->edges[i];
if (edgenum < 0) v2 = thworld.edges[abs(edgenum)].v[1];
else v2 = thworld.edges[edgenum].v[0];
if (v2 != verts[0] &&
v2 != verts[1] &&
v2 != verts[2]) break;
} //end for
plane = &thworld.planes[tri->planenum];
d = -(DotProduct (thworld.vertexes[v2].v, plane->normal) - plane->dist);
volume = TH_TriangleArea(tri) * d / 3;
return volume;
} //end of the function TH_TetrahedronVolume
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_PlaneSignBits(vec3_t normal)
{
int i, signbits;
signbits = 0;
for (i = 2; i >= 0; i--)
{
signbits = (signbits << 1) + Sign(normal[i]);
} //end for
return signbits;
} //end of the function TH_PlaneSignBits
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_PlaneTypeForNormal(vec3_t normal)
{
vec_t ax, ay, az;
// NOTE: should these have an epsilon around 1.0?
if (normal[0] == 1.0 || normal[0] == -1.0)
return PLANE_X;
if (normal[1] == 1.0 || normal[1] == -1.0)
return PLANE_Y;
if (normal[2] == 1.0 || normal[2] == -1.0)
return PLANE_Z;
ax = fabs(normal[0]);
ay = fabs(normal[1]);
az = fabs(normal[2]);
if (ax >= ay && ax >= az)
return PLANE_ANYX;
if (ay >= ax && ay >= az)
return PLANE_ANYY;
return PLANE_ANYZ;
} //end of the function TH_PlaneTypeForNormal
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
qboolean TH_PlaneEqual(th_plane_t *p, vec3_t normal, vec_t dist)
{
if (
fabs(p->normal[0] - normal[0]) < NORMAL_EPSILON
&& fabs(p->normal[1] - normal[1]) < NORMAL_EPSILON
&& fabs(p->normal[2] - normal[2]) < NORMAL_EPSILON
&& fabs(p->dist - dist) < DIST_EPSILON )
return true;
return false;
} //end of the function TH_PlaneEqual
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AddPlaneToHash(th_plane_t *p)
{
int hash;
hash = (int)fabs(p->dist) / 8;
hash &= (PLANEHASH_SIZE-1);
p->hashnext = thworld.planehash[hash];
thworld.planehash[hash] = p;
} //end of the function TH_AddPlaneToHash
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_CreateFloatPlane(vec3_t normal, vec_t dist)
{
th_plane_t *p, temp;
if (VectorLength(normal) < 0.5)
Error ("FloatPlane: bad normal");
// create a new plane
if (thworld.numplanes+2 > MAX_TH_PLANES)
Error ("MAX_TH_PLANES");
p = &thworld.planes[thworld.numplanes];
VectorCopy (normal, p->normal);
p->dist = dist;
p->type = (p+1)->type = TH_PlaneTypeForNormal (p->normal);
p->signbits = TH_PlaneSignBits(p->normal);
VectorSubtract (vec3_origin, normal, (p+1)->normal);
(p+1)->dist = -dist;
(p+1)->signbits = TH_PlaneSignBits((p+1)->normal);
thworld.numplanes += 2;
// allways put axial planes facing positive first
if (p->type < 3)
{
if (p->normal[0] < 0 || p->normal[1] < 0 || p->normal[2] < 0)
{
// flip order
temp = *p;
*p = *(p+1);
*(p+1) = temp;
TH_AddPlaneToHash(p);
TH_AddPlaneToHash(p+1);
return thworld.numplanes - 1;
} //end if
} //end if
TH_AddPlaneToHash(p);
TH_AddPlaneToHash(p+1);
return thworld.numplanes - 2;
} //end of the function TH_CreateFloatPlane
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_SnapVector(vec3_t normal)
{
int i;
for (i = 0; i < 3; i++)
{
if ( fabs(normal[i] - 1) < NORMAL_EPSILON )
{
VectorClear (normal);
normal[i] = 1;
break;
} //end if
if ( fabs(normal[i] - -1) < NORMAL_EPSILON )
{
VectorClear (normal);
normal[i] = -1;
break;
} //end if
} //end for
} //end of the function TH_SnapVector
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_SnapPlane(vec3_t normal, vec_t *dist)
{
TH_SnapVector(normal);
if (fabs(*dist-Q_rint(*dist)) < DIST_EPSILON)
*dist = Q_rint(*dist);
} //end of the function TH_SnapPlane
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindFloatPlane(vec3_t normal, vec_t dist)
{
int i;
th_plane_t *p;
int hash, h;
TH_SnapPlane (normal, &dist);
hash = (int)fabs(dist) / 8;
hash &= (PLANEHASH_SIZE-1);
// search the border bins as well
for (i = -1; i <= 1; i++)
{
h = (hash+i)&(PLANEHASH_SIZE-1);
for (p = thworld.planehash[h]; p; p = p->hashnext)
{
if (TH_PlaneEqual(p, normal, dist))
{
return p - thworld.planes;
} //end if
} //end for
} //end for
return TH_CreateFloatPlane(normal, dist);
} //end of the function TH_FindFloatPlane
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_PlaneFromPoints(int v1, int v2, int v3)
{
vec3_t t1, t2, normal;
vec_t dist;
float *p0, *p1, *p2;
p0 = thworld.vertexes[v1].v;
p1 = thworld.vertexes[v2].v;
p2 = thworld.vertexes[v3].v;
VectorSubtract(p0, p1, t1);
VectorSubtract(p2, p1, t2);
CrossProduct(t1, t2, normal);
VectorNormalize(normal);
dist = DotProduct(p0, normal);
return TH_FindFloatPlane(normal, dist);
} //end of the function TH_PlaneFromPoints
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AddEdgeUser(int edgenum)
{
th_edge_t *edge;
edge = &thworld.edges[abs(edgenum)];
//increase edge user count
edge->usercount++;
//increase vertex user count as well
thworld.vertexes[edge->v[0]].usercount++;
thworld.vertexes[edge->v[1]].usercount++;
} //end of the function TH_AddEdgeUser
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_RemoveEdgeUser(int edgenum)
{
th_edge_t *edge;
edge = &thworld.edges[abs(edgenum)];
//decrease edge user count
edge->usercount--;
//decrease vertex user count as well
thworld.vertexes[edge->v[0]].usercount--;
thworld.vertexes[edge->v[1]].usercount--;
} //end of the function TH_RemoveEdgeUser
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_FreeTriangleEdges(th_triangle_t *tri)
{
int i;
for (i = 0; i < 3; i++)
{
TH_RemoveEdgeUser(abs(tri->edges[i]));
} //end for
} //end of the function TH_FreeTriangleEdges
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
unsigned TH_HashVec(vec3_t vec)
{
int x, y;
x = (MAX_MAP_BOUNDS + (int)(vec[0]+0.5)) >> VERTEXHASH_SHIFT;
y = (MAX_MAP_BOUNDS + (int)(vec[1]+0.5)) >> VERTEXHASH_SHIFT;
if (x < 0 || x >= VERTEXHASH_SIZE || y < 0 || y >= VERTEXHASH_SIZE)
Error("HashVec: point %f %f %f outside valid range", vec[0], vec[1], vec[2]);
return y*VERTEXHASH_SIZE + x;
} //end of the function TH_HashVec
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindVertex(vec3_t v)
{
int i, h;
th_vertex_t *vertex;
vec3_t vert;
for (i = 0; i < 3; i++)
{
if ( fabs(v[i] - Q_rint(v[i])) < INTEGRAL_EPSILON)
vert[i] = Q_rint(v[i]);
else
vert[i] = v[i];
} //end for
h = TH_HashVec(vert);
for (vertex = thworld.vertexhash[h]; vertex; vertex = vertex->hashnext)
{
if (fabs(vertex->v[0] - vert[0]) < VERTEX_EPSILON &&
fabs(vertex->v[1] - vert[1]) < VERTEX_EPSILON &&
fabs(vertex->v[2] - vert[2]) < VERTEX_EPSILON)
{
return vertex - thworld.vertexes;
} //end if
} //end for
return 0;
} //end of the function TH_FindVertex
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AddVertexToHash(th_vertex_t *vertex)
{
int hashvalue;
hashvalue = TH_HashVec(vertex->v);
vertex->hashnext = thworld.vertexhash[hashvalue];
thworld.vertexhash[hashvalue] = vertex;
} //end of the function TH_AddVertexToHash
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_CreateVertex(vec3_t v)
{
if (thworld.numvertexes == 0) thworld.numvertexes = 1;
if (thworld.numvertexes >= MAX_TH_VERTEXES)
Error("MAX_TH_VERTEXES");
VectorCopy(v, thworld.vertexes[thworld.numvertexes].v);
thworld.vertexes[thworld.numvertexes].usercount = 0;
TH_AddVertexToHash(&thworld.vertexes[thworld.numvertexes]);
thworld.numvertexes++;
return thworld.numvertexes-1;
} //end of the function TH_CreateVertex
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindOrCreateVertex(vec3_t v)
{
int vertexnum;
vertexnum = TH_FindVertex(v);
if (!vertexnum) vertexnum = TH_CreateVertex(v);
return vertexnum;
} //end of the function TH_FindOrCreateVertex
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindEdge(int v1, int v2)
{
int hashvalue;
th_edge_t *edge;
hashvalue = (v1 + v2) & (EDGEHASH_SIZE-1);
for (edge = thworld.edgehash[hashvalue]; edge; edge = edge->hashnext)
{
if (edge->v[0] == v1 && edge->v[1] == v2) return edge - thworld.edges;
if (edge->v[1] == v1 && edge->v[0] == v2) return -(edge - thworld.edges);
} //end for
return 0;
} //end of the function TH_FindEdge
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AddEdgeToHash(th_edge_t *edge)
{
int hashvalue;
hashvalue = (edge->v[0] + edge->v[1]) & (EDGEHASH_SIZE-1);
edge->hashnext = thworld.edgehash[hashvalue];
thworld.edgehash[hashvalue] = edge;
} //end of the function TH_AddEdgeToHash
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_CreateEdge(int v1, int v2)
{
th_edge_t *edge;
if (thworld.numedges == 0) thworld.numedges = 1;
if (thworld.numedges >= MAX_TH_EDGES)
Error("MAX_TH_EDGES");
edge = &thworld.edges[thworld.numedges++];
edge->v[0] = v1;
edge->v[1] = v2;
TH_AddEdgeToHash(edge);
return thworld.numedges-1;
} //end of the function TH_CreateEdge
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindOrCreateEdge(int v1, int v2)
{
int edgenum;
edgenum = TH_FindEdge(v1, v2);
if (!edgenum) edgenum = TH_CreateEdge(v1, v2);
return edgenum;
} //end of the function TH_FindOrCreateEdge
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindTriangle(int verts[3])
{
int i, hashvalue, edges[3];
th_triangle_t *tri;
for (i = 0; i < 3; i++)
{
edges[i] = TH_FindEdge(verts[i], verts[(i+1)%3]);
if (!edges[i]) return false;
} //end for
hashvalue = (abs(edges[0]) + abs(edges[1]) + abs(edges[2])) & (TRIANGLEHASH_SIZE-1);
for (tri = thworld.trianglehash[hashvalue]; tri; tri = tri->next)
{
for (i = 0; i < 3; i++)
{
if (abs(tri->edges[i]) != abs(edges[0]) &&
abs(tri->edges[i]) != abs(edges[1]) &&
abs(tri->edges[i]) != abs(edges[2])) break;
} //end for
if (i >= 3) return tri - thworld.triangles;
} //end for
return 0;
} //end of the function TH_FindTriangle
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AddTriangleToHash(th_triangle_t *tri)
{
int hashvalue;
hashvalue = (abs(tri->edges[0]) + abs(tri->edges[1]) + abs(tri->edges[2])) & (TRIANGLEHASH_SIZE-1);
tri->hashnext = thworld.trianglehash[hashvalue];
thworld.trianglehash[hashvalue] = tri;
} //end of the function TH_AddTriangleToHash
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_CreateTrianglePlanes(int verts[3], th_plane_t *triplane, th_plane_t *planes)
{
int i;
vec3_t dir;
for (i = 0; i < 3; i++)
{
VectorSubtract(thworld.vertexes[verts[(i+1)%3]].v, thworld.vertexes[verts[i]].v, dir);
CrossProduct(dir, triplane->normal, planes[i].normal);
VectorNormalize(planes[i].normal);
planes[i].dist = DotProduct(thworld.vertexes[verts[i]].v, planes[i].normal);
} //end for
} //end of the function TH_CreateTrianglePlanes
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_CreateTriangle(int verts[3])
{
th_triangle_t *tri;
int i;
if (thworld.numtriangles == 0) thworld.numtriangles = 1;
if (thworld.numtriangles >= MAX_TH_TRIANGLES)
Error("MAX_TH_TRIANGLES");
tri = &thworld.triangles[thworld.numtriangles++];
for (i = 0; i < 3; i++)
{
tri->edges[i] = TH_FindOrCreateEdge(verts[i], verts[(i+1)%3]);
TH_AddEdgeUser(abs(tri->edges[i]));
} //end for
tri->front = 0;
tri->back = 0;
tri->planenum = TH_PlaneFromPoints(verts[0], verts[1], verts[2]);
tri->prev = NULL;
tri->next = NULL;
tri->hashnext = NULL;
TH_CreateTrianglePlanes(verts, &thworld.planes[tri->planenum], tri->planes);
TH_AddTriangleToHash(tri);
ClearBounds(tri->mins, tri->maxs);
for (i = 0; i < 3; i++)
{
AddPointToBounds(thworld.vertexes[verts[i]].v, tri->mins, tri->maxs);
} //end for
return thworld.numtriangles-1;
} //end of the function TH_CreateTriangle
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_CreateTetrahedron(int triangles[4])
{
th_tetrahedron_t *tetrahedron;
int i;
if (thworld.numtetrahedrons == 0) thworld.numtetrahedrons = 1;
if (thworld.numtetrahedrons >= MAX_TH_TETRAHEDRONS)
Error("MAX_TH_TETRAHEDRONS");
tetrahedron = &thworld.tetrahedrons[thworld.numtetrahedrons++];
for (i = 0; i < 4; i++)
{
tetrahedron->triangles[i] = triangles[i];
if (thworld.triangles[abs(triangles[i])].front)
{
thworld.triangles[abs(triangles[i])].back = thworld.numtetrahedrons-1;
} //end if
else
{
thworld.triangles[abs(triangles[i])].front = thworld.numtetrahedrons-1;
} //end else
} //end for
tetrahedron->volume = 0;
return thworld.numtetrahedrons-1;
} //end of the function TH_CreateTetrahedron
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_IntersectTrianglePlanes(int v1, int v2, th_plane_t *triplane, th_plane_t *planes)
{
float *p1, *p2, front, back, frac, d;
int i, side, lastside;
vec3_t mid;
p1 = thworld.vertexes[v1].v;
p2 = thworld.vertexes[v2].v;
front = DotProduct(p1, triplane->normal) - triplane->dist;
back = DotProduct(p2, triplane->normal) - triplane->dist;
//if both points at the same side of the plane
if (front < 0.1 && back < 0.1) return false;
if (front > -0.1 && back > -0.1) return false;
//
frac = front/(front-back);
mid[0] = p1[0] + (p2[0] - p1[0]) * frac;
mid[1] = p1[1] + (p2[1] - p1[1]) * frac;
mid[2] = p1[2] + (p2[2] - p1[2]) * frac;
//if the mid point is at the same side of all the tri bounding planes
lastside = 0;
for (i = 0; i < 3; i++)
{
d = DotProduct(mid, planes[i].normal) - planes[i].dist;
side = d < 0;
if (i && side != lastside) return false;
lastside = side;
} //end for
return true;
} //end of the function TH_IntersectTrianglePlanes
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_OutsideBoundingBox(int v1, int v2, vec3_t mins, vec3_t maxs)
{
float *p1, *p2;
int i;
p1 = thworld.vertexes[v1].v;
p2 = thworld.vertexes[v2].v;
//if both points are at the outer side of one of the bounding box planes
for (i = 0; i < 3; i++)
{
if (p1[i] < mins[i] && p2[i] < mins[i]) return true;
if (p1[i] > maxs[i] && p2[i] > maxs[i]) return true;
} //end for
return false;
} //end of the function TH_OutsideBoundingBox
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_TryEdge(int v1, int v2)
{
int i, j, v;
th_plane_t *plane;
th_triangle_t *tri;
//if the edge already exists it must be valid
if (TH_FindEdge(v1, v2)) return true;
//test the edge with all existing triangles
for (i = 1; i < thworld.numtriangles; i++)
{
tri = &thworld.triangles[i];
//if triangle is enclosed by two tetrahedrons we don't have to test it
//because the edge always has to go through another triangle of those
//tetrahedrons first to reach the enclosed triangle
if (tri->front && tri->back) continue;
//if the edges is totally outside the triangle bounding box
if (TH_OutsideBoundingBox(v1, v2, tri->mins, tri->maxs)) continue;
//if one of the edge vertexes is used by this triangle
for (j = 0; j < 3; j++)
{
v = thworld.edges[abs(tri->edges[j])].v[tri->edges[j] < 0];
if (v == v1 || v == v2) break;
} //end for
if (j < 3) continue;
//get the triangle plane
plane = &thworld.planes[tri->planenum];
//if the edge intersects with a triangle then it's not valid
if (TH_IntersectTrianglePlanes(v1, v2, plane, tri->planes)) return false;
} //end for
return true;
} //end of the function TH_TryEdge
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_TryTriangle(int verts[3])
{
th_plane_t planes[3], triplane;
vec3_t t1, t2;
float *p0, *p1, *p2;
int i, j;
p0 = thworld.vertexes[verts[0]].v;
p1 = thworld.vertexes[verts[1]].v;
p2 = thworld.vertexes[verts[2]].v;
VectorSubtract(p0, p1, t1);
VectorSubtract(p2, p1, t2);
CrossProduct(t1, t2, triplane.normal);
VectorNormalize(triplane.normal);
triplane.dist = DotProduct(p0, triplane.normal);
//
TH_CreateTrianglePlanes(verts, &triplane, planes);
//test if any existing edge intersects with this triangle
for (i = 1; i < thworld.numedges; i++)
{
//if the edge is only used by triangles with tetrahedrons at both sides
if (!thworld.edges[i].usercount) continue;
//if one of the triangle vertexes is used by this edge
for (j = 0; j < 3; j++)
{
if (verts[j] == thworld.edges[j].v[0] ||
verts[j] == thworld.edges[j].v[1]) break;
} //end for
if (j < 3) continue;
//if this edge intersects with the triangle
if (TH_IntersectTrianglePlanes(thworld.edges[i].v[0], thworld.edges[i].v[1], &triplane, planes)) return false;
} //end for
return true;
} //end of the function TH_TryTriangle
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AddTriangleToList(th_triangle_t **trianglelist, th_triangle_t *tri)
{
tri->prev = NULL;
tri->next = *trianglelist;
if (*trianglelist) (*trianglelist)->prev = tri;
*trianglelist = tri;
} //end of the function TH_AddTriangleToList
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_RemoveTriangleFromList(th_triangle_t **trianglelist, th_triangle_t *tri)
{
if (tri->next) tri->next->prev = tri->prev;
if (tri->prev) tri->prev->next = tri->next;
else *trianglelist = tri->next;
} //end of the function TH_RemoveTriangleFromList
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindTetrahedron1(th_triangle_t *tri, int *triangles)
{
int i, j, edgenum, side, v1, v2, v3, v4;
int verts1[3], verts2[3];
th_triangle_t *tri2;
//find another triangle with a shared edge
for (tri2 = tri->next; tri2; tri2 = tri2->next)
{
//if the triangles are in the same plane
if ((tri->planenum & ~1) == (tri2->planenum & ~1)) continue;
//try to find a shared edge
for (i = 0; i < 3; i++)
{
edgenum = abs(tri->edges[i]);
for (j = 0; j < 3; j++)
{
if (edgenum == abs(tri2->edges[j])) break;
} //end for
if (j < 3) break;
} //end for
//if the triangles have a shared edge
if (i < 3)
{
edgenum = tri->edges[(i+1)%3];
if (edgenum < 0) v1 = thworld.edges[abs(edgenum)].v[0];
else v1 = thworld.edges[edgenum].v[1];
edgenum = tri2->edges[(j+1)%3];
if (edgenum < 0) v2 = thworld.edges[abs(edgenum)].v[0];
else v2 = thworld.edges[edgenum].v[1];
//try the new edge
if (TH_TryEdge(v1, v2))
{
edgenum = tri->edges[i];
side = edgenum < 0;
//get the vertexes of the shared edge
v3 = thworld.edges[abs(edgenum)].v[side];
v4 = thworld.edges[abs(edgenum)].v[!side];
//try the two new triangles
verts1[0] = v1;
verts1[1] = v2;
verts1[2] = v3;
triangles[2] = TH_FindTriangle(verts1);
if (triangles[2] || TH_TryTriangle(verts1))
{
verts2[0] = v2;
verts2[1] = v1;
verts2[2] = v4;
triangles[3] = TH_FindTriangle(verts2);
if (triangles[3] || TH_TryTriangle(verts2))
{
triangles[0] = tri - thworld.triangles;
triangles[1] = tri2 - thworld.triangles;
if (!triangles[2]) triangles[2] = TH_CreateTriangle(verts1);
if (!triangles[3]) triangles[3] = TH_CreateTriangle(verts2);
return true;
} //end if
} //end if
} //end if
} //end if
} //end for
return false;
} //end of the function TH_FindTetrahedron
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_FindTetrahedron2(th_triangle_t *tri, int *triangles)
{
int i, edgenum, v1, verts[3], triverts[3];
float d;
th_plane_t *plane;
//get the verts of this triangle
for (i = 0; i < 3; i++)
{
edgenum = tri->edges[i];
if (edgenum < 0) verts[i] = thworld.edges[abs(edgenum)].v[1];
else verts[i] = thworld.edges[edgenum].v[0];
} //end for
//
plane = &thworld.planes[tri->planenum];
for (v1 = 0; v1 < thworld.numvertexes; v1++)
{
//if the vertex is only used by triangles with tetrahedrons at both sides
if (!thworld.vertexes[v1].usercount) continue;
//check if the vertex is not coplanar with the triangle
d = DotProduct(thworld.vertexes[v1].v, plane->normal) - plane->dist;
if (fabs(d) < 1) continue;
//check if we can create edges from the triangle towards this new vertex
for (i = 0; i < 3; i++)
{
if (v1 == verts[i]) break;
if (!TH_TryEdge(v1, verts[i])) break;
} //end for
if (i < 3) continue;
//check if the triangles are valid
for (i = 0; i < 3; i++)
{
triverts[0] = v1;
triverts[1] = verts[i];
triverts[2] = verts[(i+1)%3];
//if the triangle already exists then it is valid
triangles[i] = TH_FindTriangle(triverts);
if (!triangles[i])
{
if (!TH_TryTriangle(triverts)) break;
} //end if
} //end for
if (i < 3) continue;
//create the tetrahedron triangles using the new vertex
for (i = 0; i < 3; i++)
{
if (!triangles[i])
{
triverts[0] = v1;
triverts[1] = verts[i];
triverts[2] = verts[(i+1)%3];
triangles[i] = TH_CreateTriangle(triverts);
} //end if
} //end for
//add the existing triangle
triangles[3] = tri - thworld.triangles;
//
return true;
} //end for
return false;
} //end of the function TH_FindTetrahedron2
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_TetrahedralDecomposition(th_triangle_t *triangles)
{
int i, thtriangles[4], numtriangles;
th_triangle_t *donetriangles, *tri;
donetriangles = NULL;
/*
numtriangles = 0;
qprintf("%6d triangles", numtriangles);
for (tri = triangles; tri; tri = triangles)
{
qprintf("\r%6d", numtriangles++);
if (!TH_FindTetrahedron1(tri, thtriangles))
{
// if (!TH_FindTetrahedron2(tri, thtriangles))
{
// Error("triangle without tetrahedron");
TH_RemoveTriangleFromList(&triangles, tri);
continue;
} //end if
} //end if
//create a tetrahedron from the triangles
TH_CreateTetrahedron(thtriangles);
//
for (i = 0; i < 4; i++)
{
if (thworld.triangles[abs(thtriangles[i])].front &&
thworld.triangles[abs(thtriangles[i])].back)
{
TH_RemoveTriangleFromList(&triangles, &thworld.triangles[abs(thtriangles[i])]);
TH_AddTriangleToList(&donetriangles, &thworld.triangles[abs(thtriangles[i])]);
TH_FreeTriangleEdges(&thworld.triangles[abs(thtriangles[i])]);
} //end if
else
{
TH_AddTriangleToList(&triangles, &thworld.triangles[abs(thtriangles[i])]);
} //end else
} //end for
} //end for*/
qprintf("%6d tetrahedrons", thworld.numtetrahedrons);
do
{
do
{
numtriangles = 0;
for (i = 1; i < thworld.numtriangles; i++)
{
tri = &thworld.triangles[i];
if (tri->front && tri->back) continue;
//qprintf("\r%6d", numtriangles++);
if (!TH_FindTetrahedron1(tri, thtriangles))
{
// if (!TH_FindTetrahedron2(tri, thtriangles))
{
continue;
} //end if
} //end if
numtriangles++;
//create a tetrahedron from the triangles
TH_CreateTetrahedron(thtriangles);
qprintf("\r%6d", thworld.numtetrahedrons);
} //end for
} while(numtriangles);
for (i = 1; i < thworld.numtriangles; i++)
{
tri = &thworld.triangles[i];
if (tri->front && tri->back) continue;
//qprintf("\r%6d", numtriangles++);
// if (!TH_FindTetrahedron1(tri, thtriangles))
{
if (!TH_FindTetrahedron2(tri, thtriangles))
{
continue;
} //end if
} //end if
numtriangles++;
//create a tetrahedron from the triangles
TH_CreateTetrahedron(thtriangles);
qprintf("\r%6d", thworld.numtetrahedrons);
} //end for
} while(numtriangles);
//
numtriangles = 0;
for (i = 1; i < thworld.numtriangles; i++)
{
tri = &thworld.triangles[i];
if (!tri->front && !tri->back) numtriangles++;
} //end for
Log_Print("\r%6d triangles with front only\n", numtriangles);
Log_Print("\r%6d tetrahedrons\n", thworld.numtetrahedrons-1);
} //end of the function TH_TetrahedralDecomposition
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AASFaceVertex(aas_face_t *face, int index, vec3_t vertex)
{
int edgenum, side;
edgenum = aasworld.edgeindex[face->firstedge + index];
side = edgenum < 0;
VectorCopy(aasworld.vertexes[aasworld.edges[abs(edgenum)].v[side]], vertex);
} //end of the function TH_AASFaceVertex
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
int TH_Colinear(float *v0, float *v1, float *v2)
{
vec3_t t1, t2, vcross;
float d;
VectorSubtract(v1, v0, t1);
VectorSubtract(v2, v0, t2);
CrossProduct (t1, t2, vcross);
d = VectorLength( vcross );
// if cross product is zero point is colinear
if (d < 10)
{
return true;
} //end if
return false;
} //end of the function TH_Colinear
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_FaceCenter(aas_face_t *face, vec3_t center)
{
int i, edgenum, side;
aas_edge_t *edge;
VectorClear(center);
for (i = 0; i < face->numedges; i++)
{
edgenum = abs(aasworld.edgeindex[face->firstedge + i]);
side = edgenum < 0;
edge = &aasworld.edges[abs(edgenum)];
VectorAdd(aasworld.vertexes[edge->v[side]], center, center);
} //end for
VectorScale(center, 1.0 / face->numedges, center);
} //end of the function TH_FaceCenter
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
th_triangle_t *TH_CreateAASFaceTriangles(aas_face_t *face)
{
int i, first, verts[3], trinum;
vec3_t p0, p1, p2, p3, p4, center;
th_triangle_t *tri, *triangles;
triangles = NULL;
//find three points that are not colinear
for (i = 0; i < face->numedges; i++)
{
TH_AASFaceVertex(face, (face->numedges + i-2)%face->numedges, p0);
TH_AASFaceVertex(face, (face->numedges + i-1)%face->numedges, p1);
TH_AASFaceVertex(face, (i )%face->numedges, p2);
if (TH_Colinear(p2, p0, p1)) continue;
TH_AASFaceVertex(face, (i+1)%face->numedges, p3);
TH_AASFaceVertex(face, (i+2)%face->numedges, p4);
if (TH_Colinear(p2, p3, p4)) continue;
break;
} //end for
//if there are three points that are not colinear
if (i < face->numedges)
{
//normal triangulation
first = i; //left and right most point of three non-colinear points
TH_AASFaceVertex(face, first, p0);
verts[0] = TH_FindOrCreateVertex(p0);
for (i = 1; i < face->numedges-1; i++)
{
TH_AASFaceVertex(face, (first+i )%face->numedges, p1);
TH_AASFaceVertex(face, (first+i+1)%face->numedges, p2);
verts[1] = TH_FindOrCreateVertex(p1);
verts[2] = TH_FindOrCreateVertex(p2);
trinum = TH_CreateTriangle(verts);
tri = &thworld.triangles[trinum];
tri->front = -1;
TH_AddTriangleToList(&triangles, tri);
} //end for
} //end if
else
{
//fan triangulation
TH_FaceCenter(face, center);
//
verts[0] = TH_FindOrCreateVertex(center);
for (i = 0; i < face->numedges; i++)
{
TH_AASFaceVertex(face, (i )%face->numedges, p1);
TH_AASFaceVertex(face, (i+1)%face->numedges, p2);
if (TH_Colinear(center, p1, p2)) continue;
verts[1] = TH_FindOrCreateVertex(p1);
verts[2] = TH_FindOrCreateVertex(p2);
trinum = TH_CreateTriangle(verts);
tri = &thworld.triangles[trinum];
tri->front = -1;
TH_AddTriangleToList(&triangles, tri);
} //end for
} //end else
return triangles;
} //end of the function TH_CreateAASFaceTriangles
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
th_triangle_t *TH_AASToTriangleMesh(void)
{
int i, j, facenum, otherareanum;
aas_face_t *face;
th_triangle_t *tri, *nexttri, *triangles;
triangles = NULL;
for (i = 1; i < aasworld.numareas; i++)
{
//if (!(aasworld.areasettings[i].presencetype & PRESENCE_NORMAL)) continue;
for (j = 0; j < aasworld.areas[i].numfaces; j++)
{
facenum = abs(aasworld.faceindex[aasworld.areas[i].firstface + j]);
face = &aasworld.faces[facenum];
//only convert solid faces into triangles
if (!(face->faceflags & FACE_SOLID))
{
/*
if (face->frontarea == i) otherareanum = face->backarea;
else otherareanum = face->frontarea;
if (aasworld.areasettings[otherareanum].presencetype & PRESENCE_NORMAL) continue;
*/
continue;
} //end if
//
tri = TH_CreateAASFaceTriangles(face);
for (; tri; tri = nexttri)
{
nexttri = tri->next;
TH_AddTriangleToList(&triangles, tri);
} //end for
} //end if
} //end for
return triangles;
} //end of the function TH_AASToTriangleMesh
//===========================================================================
//
// Parameter: -
// Returns: -
// Changes Globals: -
//===========================================================================
void TH_AASToTetrahedrons(char *filename)
{
th_triangle_t *triangles, *tri, *lasttri;
int cnt;
if (!AAS_LoadAASFile(filename, 0, 0))
Error("couldn't load %s\n", filename);
//
TH_InitMaxTH();
//create a triangle mesh from the solid faces in the AAS file
triangles = TH_AASToTriangleMesh();
//
cnt = 0;
lasttri = NULL;
for (tri = triangles; tri; tri = tri->next)
{
cnt++;
if (tri->prev != lasttri) Log_Print("BAH\n");
lasttri = tri;
} //end for
Log_Print("%6d triangles\n", cnt);
//create a tetrahedral decomposition of the world bounded by triangles
TH_TetrahedralDecomposition(triangles);
//
TH_FreeMaxTH();
} //end of the function TH_AASToTetrahedrons
| 15,902 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.