max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.php.codeception.run;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.Reader;
import org.netbeans.junit.NbTestCase;
import org.netbeans.modules.php.spi.testing.run.TestCase;
public class CodeceptionLogParserTest extends NbTestCase {
public CodeceptionLogParserTest(String name) {
super(name);
}
public void testParseLogWithOneCodeceptionSuite() throws Exception {
Reader reader = createReader("codeception-log-one-codeception-suite.xml");
TestSessionVo testSession = new TestSessionVo();
CodeceptionLogParser.parse(reader, testSession);
assertEquals(4, testSession.getTime());
assertEquals(1, testSession.getTests());
// test suites & test cases
assertEquals(1, testSession.getTestSuites().size());
// 1st
TestSuiteVo testSuite = testSession.getTestSuites().get(0);
assertEquals("unit", testSuite.getName());
assertEquals(null, testSuite.getLocation());
assertEquals(4, testSuite.getTime());
assertEquals(1, testSuite.getTestCases().size());
TestCaseVo testCase = testSuite.getTestCases().get(0);
assertEquals("App\\FizzBuzzTest", testCase.getClassName());
assertEquals("testExec", testCase.getName());
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/unit/App/FizzBuzzTest.php", testCase.getFile());
assertEquals(-1, testCase.getLine());
assertEquals(4, testCase.getTime());
}
public void testParseLogWithOnePhpUnitSuite() throws Exception {
Reader reader = createReader("codeception-log-one-phpunit-suite.xml");
TestSessionVo testSession = new TestSessionVo();
CodeceptionLogParser.parse(reader, testSession);
assertEquals(5, testSession.getTime());
assertEquals(1, testSession.getTests());
// test suites & test cases
assertEquals(1, testSession.getTestSuites().size());
// 1st
TestSuiteVo testSuite = testSession.getTestSuites().get(0);
assertEquals("unit", testSuite.getName());
assertEquals(null, testSuite.getLocation());
assertEquals(5, testSuite.getTime());
assertEquals(1, testSuite.getTestCases().size());
TestCaseVo testCase = testSuite.getTestCases().get(0);
assertEquals("App\\FizzBuzzPhpUnitTest", testCase.getClassName());
assertEquals("testExec", testCase.getName());
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/unit/App/FizzBuzzPhpUnitTest.php", testCase.getFile());
assertEquals(17, testCase.getLine());
assertEquals(5, testCase.getTime());
}
public void testParseLogWithMoreSuites() throws Exception {
Reader reader = createReader("codeception-log-more-suites.xml");
TestSessionVo testSession = new TestSessionVo();
CodeceptionLogParser.parse(reader, testSession);
assertEquals(268 + 175 + 19, testSession.getTime());
assertEquals(4 + 4 + 1, testSession.getTests());
assertEquals(3, testSession.getTestSuites().size());
// 1st
TestSuiteVo testSuite = testSession.getTestSuites().get(0);
assertEquals("functional", testSuite.getName());
assertEquals(268, testSuite.getTime());
assertEquals(4, testSuite.getTestCases().size());
TestCaseVo testCase = testSuite.getTestCases().get(0);
assertEquals("About", testCase.getName());
assertEquals("/home/junichi11/NetBeansProjects/yii2-codeception/tests/codeception/functional/AboutCept.php", testCase.getFile());
assertEquals(-1, testCase.getLine());
assertEquals(28, testCase.getTime());
// 2nd
testSuite = testSession.getTestSuites().get(1);
assertEquals("unit", testSuite.getName());
assertEquals(175, testSuite.getTime());
assertEquals(4, testSuite.getTestCases().size());
testCase = testSuite.getTestCases().get(0);
assertEquals("testExec", testCase.getName());
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/unit/App/FizzBuzz2Test.php", testCase.getFile());
assertEquals(-1, testCase.getLine());
assertEquals(56, testCase.getTime());
testCase = testSuite.getTestCases().get(1);
assertEquals("testFailure", testCase.getName());
assertTrue(testCase.isFailure());
assertFalse(testCase.isError());
assertEquals(TestCase.Status.FAILED, testCase.getStatus());
assertEquals(2, testCase.getStackTrace().length);
assertEquals("Failed asserting that two objects are equal.\n"
+ "--- Expected\n"
+ "+++ Actual\n"
+ "@@ @@\n"
+ " App\\FizzBuzz Object (\n"
+ " 'start' => 0\n"
+ "- 'end' => 200\n"
+ "+ 'end' => 300\n"
+ " )",
testCase.getStackTrace()[0]);
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/unit/App/FizzBuzz2Test.php:33", testCase.getStackTrace()[1]);
testCase = testSuite.getTestCases().get(2);
assertEquals("testFailure2", testCase.getName());
assertTrue(testCase.isFailure());
assertFalse(testCase.isError());
assertEquals(TestCase.Status.FAILED, testCase.getStatus());
assertEquals(2, testCase.getStackTrace().length);
assertEquals("Failed asserting that 2 matches expected 1.", testCase.getStackTrace()[0]);
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/unit/App/FizzBuzz2Test.php:37", testCase.getStackTrace()[1]);
testCase = testSuite.getTestCases().get(3);
assertEquals("testError", testCase.getName());
assertTrue(testCase.isError());
assertFalse(testCase.isFailure());
assertEquals(TestCase.Status.ERROR, testCase.getStatus());
assertEquals(2, testCase.getStackTrace().length);
assertEquals("Exception: my exception", testCase.getStackTrace()[0]);
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/unit/App/FizzBuzz2Test.php:42", testCase.getStackTrace()[1]);
// 3rd
testSuite = testSession.getTestSuites().get(2);
assertEquals("acceptance", testSuite.getName());
assertEquals(19, testSuite.getTime());
assertEquals(1, testSuite.getTestCases().size());
testCase = testSuite.getTestCases().get(0);
assertEquals("Welcome", testCase.getName());
assertTrue(testCase.isFailure());
assertFalse(testCase.isError());
assertEquals(TestCase.Status.FAILED, testCase.getStatus());
assertEquals(3, testCase.getStackTrace().length);
assertEquals("Failed asserting that <bold>/</bold>\n"
+ "--> <info><!DOCTYPE html>\n"
+ "<html>\n"
+ " <head>\n"
+ " <meta charset=\"UTF-8\">\n"
+ " <title>Home</title>\n"
+ " </head>\n"
+ " <body>\n"
+ " </body>\n"
+ "</html>\n"
+ "</info>\n"
+ "--> contains \"welcome\".",
testCase.getStackTrace()[0]);
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/_support/_generated/AcceptanceTesterActions.php:257", testCase.getStackTrace()[1]);
assertEquals("/home/junichi11/NetBeansProjects/codeception/tests/acceptance/WelcomeCept.php:6", testCase.getStackTrace()[2]);
}
public void testParseLogWithWarningPhpUnitSuite() throws Exception {
Reader reader = createReader("codeception-log-warning-phpunit-suite.xml");
TestSessionVo testSession = new TestSessionVo();
CodeceptionLogParser.parse(reader, testSession);
assertEquals(20, testSession.getTime());
assertEquals(1, testSession.getTests());
// test suites & test cases
assertEquals(1, testSession.getTestSuites().size());
// 1st
TestSuiteVo testSuite = testSession.getTestSuites().get(0);
assertEquals("unit", testSuite.getName());
assertEquals(null, testSuite.getLocation());
assertEquals(20, testSuite.getTime());
assertEquals(1, testSuite.getTestCases().size());
TestCaseVo testCase = testSuite.getTestCases().get(0);
assertEquals("ProjectX\\FooTest", testCase.getClassName());
assertEquals("testGetBar", testCase.getName());
assertEquals("/home/kacer/projectx/tests/unit/FooTest.php", testCase.getFile());
assertEquals(6, testCase.getLine());
assertEquals(20, testCase.getTime());
assertTrue(testCase.isFailure());
assertFalse(testCase.isError());
assertEquals(TestCase.Status.FAILED, testCase.getStatus());
assertEquals(1, testCase.getStackTrace().length);
assertEquals("Trying to configure method \"getBarAAA\" which cannot be configured because it does not exist, has not been specified, is final, or is static", testCase.getStackTrace()[0]);
}
private Reader createReader(String filename) throws FileNotFoundException {
return new BufferedReader(new FileReader(new File(getDataDir(), filename)));
}
}
| 4,120 |
679 | <gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#include <com/sun/star/lang/XSingleServiceFactory.hpp>
#include <cppuhelper/factory.hxx>
#include <cppuhelper/implementationentry.hxx>
#include <debugservices/doctok/DocTokTestService.hxx>
#include <debugservices/doctok/DocTokAnalyzeService.hxx>
#include <debugservices/ooxml/OOXMLTestService.hxx>
#include <debugservices/ooxml/OOXMLAnalyzeService.hxx>
#include <stdio.h>
using namespace com::sun::star;
extern "C"
{
/* shared lib exports implemented with helpers */
static struct ::cppu::ImplementationEntry s_component_entries [] =
{//uno -l writerfilter.uno.dll -c debugservices.rtftok.ScannerTestService -- a b c
DOCTOK_SCANNERTESTSERVICE_COMPONENT_ENTRY, /* debugservices.doctok.ScannerTestService */
DOCTOK_ANALYZESERVICE_COMPONENT_ENTRY, /* debugservices.doctok.AnalyzeService */
OOXML_SCANNERTESTSERVICE_COMPONENT_ENTRY, /* debugservices.ooxml.ScannerTestService */
OOXML_ANALYZESERVICE_COMPONENT_ENTRY, /* debugservices.ooxml.AnalyzeService */
{ 0, 0, 0, 0, 0, 0 } // terminate with NULL
};
SAL_DLLPUBLIC_EXPORT void SAL_CALL
component_getImplementationEnvironment(const sal_Char ** ppEnvTypeName, uno_Environment ** /*ppEnv*/ )
{
*ppEnvTypeName = CPPU_CURRENT_LANGUAGE_BINDING_NAME;
}
SAL_DLLPUBLIC_EXPORT void * SAL_CALL
component_getFactory(sal_Char const * implName, ::com::sun::star::lang::XMultiServiceFactory * xMgr, ::com::sun::star::registry::XRegistryKey * xRegistry )
{
fprintf(stderr, "Loading service: %s: ", implName);
void * pResult = ::cppu::component_getFactoryHelper(implName, xMgr, xRegistry, s_component_entries );
fprintf(stderr, "%p\n", pResult);
return pResult;
}
}
| 834 |
2,143 | package com.tngtech.archunit.testutil;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import com.google.common.collect.Lists;
import org.junit.rules.ExternalResource;
import static com.google.common.base.Preconditions.checkState;
public class ReplaceFileRule extends ExternalResource {
private final File tempDir = TestUtils.newTemporaryFolder();
private final List<FileAction> fileActions = new ArrayList<>();
private final List<MoveAction> moveActions = new ArrayList<>();
private final Set<File> replacedFiles = new HashSet<>();
public void replace(File target, String content, Charset charset) {
if (target.exists()) {
addMoveAction(new MoveAction(target, new File(tempDir, target.getName())).execute());
}
replacedFiles.add(target);
makePath(target);
write(target, content, charset);
}
private void makePath(File target) {
LinkedList<FileAction> mkdirs = new LinkedList<>();
while (!target.getParentFile().exists()) {
mkdirs.add(0, new MkDirAction(target.getParentFile()));
target = target.getParentFile();
}
for (FileAction mkdir : mkdirs) {
mkdir.execute();
}
fileActions.addAll(mkdirs);
}
public void appendLine(File file, String line, Charset charset) {
if (replacedFiles.contains(file)) {
append(file, line, charset);
} else {
replace(file, line, charset);
}
}
private void write(File target, String content, Charset charset) {
fileActions.add(new CreateFileAction(target, content, charset).execute());
}
private void append(File file, String line, Charset charset) {
try {
com.google.common.io.Files.append(System.lineSeparator() + line, file, charset);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void addMoveAction(MoveAction moveAction) {
fileActions.add(moveAction);
moveActions.add(moveAction);
}
@Override
protected void after() {
for (FileAction action : Lists.reverse(fileActions)) {
action.revert();
}
}
private interface FileAction {
FileAction execute();
void revert();
}
private static class MoveAction implements FileAction {
private final File from;
private final File to;
private MoveAction(File from, File to) {
this.from = from;
this.to = to;
}
@Override
public MoveAction execute() {
move(from, to);
return this;
}
@Override
public void revert() {
move(to, from);
}
private void move(File origin, File target) {
try {
java.nio.file.Files.move(origin.toPath(), target.toPath());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
private static class MkDirAction implements FileAction {
private final File dir;
private MkDirAction(File dir) {
this.dir = dir;
}
@Override
public FileAction execute() {
checkState(dir.mkdir());
return this;
}
@Override
public void revert() {
checkState(dir.delete());
}
}
private static class CreateFileAction implements FileAction {
private final File target;
private final String content;
private final Charset charset;
private CreateFileAction(File target, String content, Charset charset) {
this.target = target;
this.content = content;
this.charset = charset;
}
@Override
public FileAction execute() {
try {
com.google.common.io.Files.write(content, target, charset);
} catch (IOException e) {
throw new RuntimeException(e);
}
return this;
}
@Override
public void revert() {
checkState(target.delete());
}
}
}
| 1,905 |
1,738 | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#include "precompiled.h"
#include <QPainter>
#include <Components/Slots/Execution/ExecutionSlotConnectionPin.h>
#include <GraphCanvas/Styling/definitions.h>
namespace GraphCanvas
{
///////////////////////////////
// ExecutionSlotConnectionPin
///////////////////////////////
ExecutionSlotConnectionPin::ExecutionSlotConnectionPin(const AZ::EntityId& slotId)
: SlotConnectionPin(slotId)
{
}
ExecutionSlotConnectionPin::~ExecutionSlotConnectionPin()
{
}
void ExecutionSlotConnectionPin::OnRefreshStyle()
{
m_style.SetStyle(m_slotId, Styling::Elements::ExecutionConnectionPin);
m_connectedStyle.SetStyle(m_slotId, ".connected");
}
void ExecutionSlotConnectionPin::DrawConnectionPin(QPainter *painter, QRectF drawRect, bool isConnected)
{
// draw triangle, pointing to the right
if (isConnected)
{
// Add fill color for slots if it is connected
painter->setBrush(m_connectedStyle.GetBrush(Styling::Attribute::BackgroundColor, QColor{ 0xFF, 0xFF, 0xFF }));
}
QPen decorationBorder = m_style.GetBorder();
decorationBorder.setJoinStyle(Qt::PenJoinStyle::MiterJoin);
painter->setPen(decorationBorder);
qreal sideLength = AZ::GetMin(drawRect.width(), drawRect.height());
qreal halfLength = sideLength * 0.5;
painter->drawConvexPolygon(QPolygonF({
drawRect.center() + QPointF(-halfLength, -halfLength),
drawRect.center() + QPointF(halfLength,0),
drawRect.center() + QPointF(-halfLength, halfLength)
}));
}
} | 816 |
12,278 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: <EMAIL> (<NAME>)
#include "s2/s2region_term_indexer.h"
#include <cstdio>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include "s2/base/commandlineflags.h"
#include "s2/base/logging.h"
#include <gtest/gtest.h>
#include "s2/s2cap.h"
#include "s2/s2cell.h"
#include "s2/s2cell_id.h"
#include "s2/s2cell_union.h"
#include "s2/s2testing.h"
using std::vector;
DEFINE_int32(iters, 400, "number of iterations for testing");
namespace {
enum QueryType { POINT, CAP };
void TestRandomCaps(const S2RegionTermIndexer::Options& options,
QueryType query_type) {
// This function creates an index consisting either of points (if
// options.index_contains_points_only() is true) or S2Caps of random size.
// It then executes queries consisting of points (if query_type == POINT)
// or S2Caps of random size (if query_type == CAP).
S2RegionTermIndexer indexer(options);
S2RegionCoverer coverer(options);
vector<S2Cap> caps;
vector<S2CellUnion> coverings;
std::unordered_map<string, vector<int>> index;
int index_terms = 0, query_terms = 0;
for (int i = 0; i < FLAGS_iters; ++i) {
// Choose the region to be indexed: either a single point or a cap
// of random size (up to a full sphere).
S2Cap cap;
vector<string> terms;
if (options.index_contains_points_only()) {
cap = S2Cap::FromPoint(S2Testing::RandomPoint());
terms = indexer.GetIndexTerms(cap.center(), "");
} else {
cap = S2Testing::GetRandomCap(
0.3 * S2Cell::AverageArea(options.max_level()),
4.0 * S2Cell::AverageArea(options.min_level()));
terms = indexer.GetIndexTerms(cap, "");
}
caps.push_back(cap);
coverings.push_back(coverer.GetCovering(cap));
for (const string& term : terms) {
index[term].push_back(i);
}
index_terms += terms.size();
}
for (int i = 0; i < FLAGS_iters; ++i) {
// Choose the region to be queried: either a random point or a cap of
// random size.
S2Cap cap;
vector<string> terms;
if (query_type == QueryType::CAP) {
cap = S2Cap::FromPoint(S2Testing::RandomPoint());
terms = indexer.GetQueryTerms(cap.center(), "");
} else {
cap = S2Testing::GetRandomCap(
0.3 * S2Cell::AverageArea(options.max_level()),
4.0 * S2Cell::AverageArea(options.min_level()));
terms = indexer.GetQueryTerms(cap, "");
}
// Compute the expected results of the S2Cell query by brute force.
S2CellUnion covering = coverer.GetCovering(cap);
std::set<int> expected, actual;
for (int j = 0; j < caps.size(); ++j) {
if (covering.Intersects(coverings[j])) {
expected.insert(j);
}
}
for (const string& term : terms) {
actual.insert(index[term].begin(), index[term].end());
}
EXPECT_EQ(expected, actual);
query_terms += terms.size();
}
printf("Index terms/doc: %.2f, Query terms/doc: %.2f\n",
static_cast<double>(index_terms) / FLAGS_iters,
static_cast<double>(query_terms) / FLAGS_iters);
}
// We run one test case for each combination of space vs. time optimization,
// and indexing regions vs. only points.
TEST(S2RegionTermIndexer, IndexRegionsQueryRegionsOptimizeTime) {
S2RegionTermIndexer::Options options;
options.set_optimize_for_space(false); // Optimize for time.
options.set_min_level(0); // Use face cells.
options.set_max_level(16);
options.set_max_cells(20);
TestRandomCaps(options, QueryType::CAP);
}
TEST(S2RegionTermIndexer, IndexRegionsQueryPointsOptimizeTime) {
S2RegionTermIndexer::Options options;
options.set_optimize_for_space(false); // Optimize for time.
options.set_min_level(0); // Use face cells.
options.set_max_level(16);
options.set_max_cells(20);
TestRandomCaps(options, QueryType::POINT);
}
TEST(S2RegionTermIndexer, IndexRegionsQueryRegionsOptimizeTimeWithLevelMod) {
S2RegionTermIndexer::Options options;
options.set_optimize_for_space(false); // Optimize for time.
options.set_min_level(6); // Constrain min/max levels.
options.set_max_level(12);
options.set_level_mod(3);
TestRandomCaps(options, QueryType::CAP);
}
TEST(S2RegionTermIndexer, IndexRegionsQueryRegionsOptimizeSpace) {
S2RegionTermIndexer::Options options;
options.set_optimize_for_space(true); // Optimize for space.
options.set_min_level(4);
options.set_max_level(S2CellId::kMaxLevel); // Use leaf cells.
options.set_max_cells(8);
TestRandomCaps(options, QueryType::CAP);
}
TEST(S2RegionTermIndexer, IndexPointsQueryRegionsOptimizeTime) {
S2RegionTermIndexer::Options options;
options.set_optimize_for_space(false); // Optimize for time.
options.set_min_level(0); // Use face cells.
options.set_max_level(S2CellId::kMaxLevel);
options.set_level_mod(2);
options.set_max_cells(20);
options.set_index_contains_points_only(true);
TestRandomCaps(options, QueryType::CAP);
}
TEST(S2RegionTermIndexer, IndexPointsQueryRegionsOptimizeSpace) {
S2RegionTermIndexer::Options options;
options.set_optimize_for_space(true); // Optimize for space.
options.set_index_contains_points_only(true);
// Use default parameter values.
TestRandomCaps(options, QueryType::CAP);
}
TEST(S2RegionTermIndexer, MaxLevelSetLoosely) {
// Test that correct terms are generated even when (max_level - min_level)
// is not a multiple of level_mod.
S2RegionTermIndexer::Options options;
options.set_min_level(1);
options.set_level_mod(2);
options.set_max_level(19);
S2RegionTermIndexer indexer1(options);
options.set_max_level(20);
S2RegionTermIndexer indexer2(options);
S2Point point = S2Testing::RandomPoint();
EXPECT_EQ(indexer1.GetIndexTerms(point, ""),
indexer2.GetIndexTerms(point, ""));
EXPECT_EQ(indexer1.GetQueryTerms(point, ""),
indexer2.GetQueryTerms(point, ""));
S2Cap cap = S2Testing::GetRandomCap(0.0, 1.0); // Area range.
EXPECT_EQ(indexer1.GetIndexTerms(cap, ""),
indexer2.GetIndexTerms(cap, ""));
EXPECT_EQ(indexer1.GetQueryTerms(cap, ""),
indexer2.GetQueryTerms(cap, ""));
}
TEST(S2RegionTermIndexer, MoveConstructor) {
S2RegionTermIndexer x;
x.mutable_options()->set_max_cells(12345);
S2RegionTermIndexer y = std::move(x);
EXPECT_EQ(12345, y.options().max_cells());
}
TEST(S2RegionTermIndexer, MoveAssignmentOperator) {
S2RegionTermIndexer x;
x.mutable_options()->set_max_cells(12345);
S2RegionTermIndexer y;
y.mutable_options()->set_max_cells(0);
y = std::move(x);
EXPECT_EQ(12345, y.options().max_cells());
}
} // namespace
| 2,844 |
1,150 | <filename>android-frontia/frontia/src/main/java/moe/studio/frontia/core/PluginLoader.java
/*
* Copyright (c) 2016 <NAME> (<EMAIL>)
*/
package moe.studio.frontia.core;
import android.support.annotation.NonNull;
import moe.studio.frontia.ext.PluginError;
/**
* 插件加载器
*/
public interface PluginLoader {
/**
* 加载插件
*
* @param request 插件请求
* @return 插件请求
*/
PluginRequest load(@NonNull PluginRequest request);
/**
* 加载插件
* <p>
* 前提是你得有个可用的插件{@linkplain Plugin},没有的话看看{@linkplain #load(PluginRequest)}。
*
* @param manager 插件管理器
* @param plugin 插件
* @return 插件
* @throws PluginError.LoadError 插件加载异常
*/
Plugin load(PluginManager manager, Plugin plugin) throws PluginError.LoadError, PluginError.InstallError;
/**
* 获取插件
*
* @param packageName 插件ID
* @return 插件
*/
Plugin getPlugin(String packageName);
/**
* 保存插件
*
* @param id 插件ID
* @param plugin 插件
*/
void putPlugin(String id, Plugin plugin);
/**
* 加载插件中指定的类
*
* @param plugin 插件
* @param className 类名
* @return 目标类
* @throws PluginError.LoadError 加载插件类异常
*/
Class loadClass(@NonNull Plugin plugin, String className) throws PluginError.LoadError;
PluginBehavior createBehavior(Plugin plugin) throws PluginError.LoadError;
}
| 744 |
450 | # The MIT License
#
# Copyright (C) 2009 <NAME>
#
# Copyright (C) 2008-2009 Abilisoft Ltd.
#
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import unittest
import apphelper
import psi.arch
import psi.mount
class MountAttrTests(unittest.TestCase):
def setUp(self):
self.mounts = psi.mount.mounts()
for mount in psi.mount.mounts():
if mount.mountpoint == '/':
break
self.m = mount
if isinstance(psi.arch.arch_type(), psi.arch.ArchLinux):
fd = open('/etc/mtab')
mtab = fd.readlines()
fd.close()
for line in mtab:
if line.split()[1] == '/':
break
mount = line.split()
self.device = mount[0]
self.mountpoint = mount[1]
self.fstype = mount[2]
self.options = mount[3]
elif isinstance(psi.arch.arch_type(), psi.arch.ArchSunOS):
mounts = apphelper.run(['/usr/sbin/mount', '-p']).split('\n')
for line in mounts:
if line.split()[2] == '/':
break
mount = line.split()
self.device = mount[0]
self.mountpoint = mount[2]
self.fstype = mount[3]
if len(mount) >= 7:
self.options = mount[6]
else:
self.options = ''
# Special case to find dev=XXXXX option
if psi.arch.arch_type().release_info > (5, 8):
mounts = apphelper.run(['/usr/sbin/mount', '-v']).split('\n')
for line in mounts:
if line.split()[2] == '/':
break
opts = line.split()[5]
opts = opts.split('/')
for o in opts:
if o[:4] == 'dev=':
break
self.options += ',' + o
self.options = self.options.strip(',')
elif isinstance(psi.arch.arch_type(), psi.arch.ArchAIX):
mounts = apphelper.run(['/usr/sbin/mount']).split('\n')[2:]
for line in mounts:
if line.split()[1] == '/':
break
mount = line.split()
self.device = mount[0]
self.mountpoint = mount[1]
self.fstype = mount[2]
self.options = mount[-1]
elif isinstance(psi.arch.arch_type(), psi.arch.ArchDarwin):
mounts = apphelper.run(['/sbin/mount']).split('\n')
for line in mounts:
if line.split()[2] == '/':
break
mount = line.split()
self.device = mount[0]
self.mountpoint = mount[2]
self.fstype = mount[3][1:-1]
self.options = ','.join(line[line.find('('):line.find(')')].split(', ')[1:])
def test_enumerate(self):
self.assert_(hasattr(self.mounts, '__iter__'))
def test_len(self):
self.assert_(len(list(self.mounts)) > 0)
def test_type(self):
for m in self.mounts:
self.assert_(isinstance(m, psi.mount.MountBase))
def test_remote(self):
local = len(list(psi.mount.mounts()))
remote = len(list(psi.mount.mounts(True)))
self.assert_(remote >= local, '%d >= %d' % (remote, local))
def test_device(self):
self.assertEqual(self.m.device, self.device)
def test_fstype(self):
self.assertEqual(self.m.fstype, self.fstype)
def test_options(self):
self.assertEqual(self.m.options, self.options)
def test_mountpoint(self):
self.assertEqual(self.m.mountpoint, self.mountpoint)
def test_total(self):
self.assert_(self.m.total > 0)
def test_free(self):
self.assert_(self.m.free > 0)
self.assert_(self.m.total > self.m.free)
def test_available(self):
self.assert_(self.m.available > 0)
self.assert_(self.m.free >= self.m.available,
'%d > %d' % (self.m.free, self.m.available))
def test_inodes(self):
self.assert_(self.m.inodes > 0)
def test_free_inodes(self):
self.assert_(self.m.free_inodes > 0)
self.assert_(self.m.inodes > self.m.free_inodes)
def test_available_inodes(self):
self.assert_(self.m.available_inodes > 0)
self.assert_(self.m.free_inodes >= self.m.available_inodes,
'%d > %d' % (self.m.free_inodes, self.m.available_inodes))
class MountMethodsTests(unittest.TestCase):
def setUp(self):
for mount in psi.mount.mounts():
break
self.m = mount
def test_refresh(self):
mp = self.m.mountpoint
self.m.refresh()
self.assertEqual(mp, self.m.mountpoint)
if __name__ == '__main__':
unittest.main()
| 2,765 |
3,101 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dtstack.flinkx.sink.format;
import com.dtstack.flinkx.conf.BaseFileConf;
import com.dtstack.flinkx.enums.Semantic;
import com.dtstack.flinkx.enums.SizeUnitType;
import com.dtstack.flinkx.sink.WriteMode;
import com.dtstack.flinkx.throwable.WriteRecordException;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.table.data.RowData;
import org.apache.commons.lang3.StringUtils;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* @author jiangbo
* @date 2019/8/28
*/
public abstract class BaseFileOutputFormat extends BaseRichOutputFormat {
protected static final String TMP_DIR_NAME = ".data";
protected BaseFileConf baseFileConf;
/** The first half of the file name currently written */
protected String currentFileNamePrefix;
/** Full file name */
protected String currentFileName;
/** Data file write path */
protected String outputFilePath;
/** Temporary data file write path, outputFilePath + /.data */
protected String tmpPath;
protected long sumRowsOfBlock;
protected long rowsOfCurrentBlock;
/** Current file index number */
protected int currentFileIndex = 0;
protected List<String> preCommitFilePathList = new ArrayList<>();
protected long nextNumForCheckDataSize;
protected long lastWriteTime = System.currentTimeMillis();
@Override
public void initializeGlobal(int parallelism) {
initVariableFields();
if (WriteMode.OVERWRITE.name().equalsIgnoreCase(baseFileConf.getWriteMode())
&& StringUtils.isBlank(baseFileConf.getSavePointPath())) {
// not delete the data directory when restoring from checkpoint
deleteDataDir();
} else {
deleteTmpDataDir();
}
checkOutputDir();
}
@Override
public void finalizeGlobal(int parallelism) {
initVariableFields();
moveAllTmpDataFileToDir();
}
@Override
public void open(int taskNumber, int numTasks) throws IOException {
super.open(taskNumber, numTasks);
super.checkpointMode = CheckpointingMode.EXACTLY_ONCE;
super.semantic = Semantic.EXACTLY_ONCE;
}
@Override
protected void openInternal(int taskNumber, int numTasks) throws IOException {
if (null != formatState && formatState.getFileIndex() > -1) {
currentFileIndex = formatState.getFileIndex() + 1;
}
LOG.info("Start current File Index:{}", currentFileIndex);
currentFileNamePrefix = jobId + "_" + taskNumber;
LOG.info("Channel:[{}], currentFileNamePrefix:[{}]", taskNumber, currentFileNamePrefix);
initVariableFields();
}
protected void initVariableFields() {
// The file name here is actually the partition name
if (StringUtils.isNotBlank(baseFileConf.getFileName())) {
outputFilePath =
baseFileConf.getPath() + File.separatorChar + baseFileConf.getFileName();
} else {
outputFilePath = baseFileConf.getPath();
}
tmpPath = outputFilePath + File.separatorChar + TMP_DIR_NAME;
nextNumForCheckDataSize = baseFileConf.getNextCheckRows();
openSource();
}
protected void nextBlock() {
currentFileName = currentFileNamePrefix + "_" + currentFileIndex + getExtension();
}
@Override
protected void writeMultipleRecordsInternal() {
throw new UnsupportedOperationException("Do not support batch write");
}
@Override
public void writeSingleRecordInternal(RowData rowData) throws WriteRecordException {
writeSingleRecordToFile(rowData);
rowsOfCurrentBlock++;
checkCurrentFileSize();
lastRow = rowData;
lastWriteTime = System.currentTimeMillis();
}
private void checkCurrentFileSize() {
if (numWriteCounter.getLocalValue() < nextNumForCheckDataSize) {
return;
}
long currentFileSize = getCurrentFileSize();
if (currentFileSize > baseFileConf.getMaxFileSize()) {
flushData();
}
nextNumForCheckDataSize += baseFileConf.getNextCheckRows();
LOG.info(
"current file: {}, size = {}, nextNumForCheckDataSize = {}",
currentFileName,
SizeUnitType.readableFileSize(currentFileSize),
nextNumForCheckDataSize);
}
public void flushData() {
if (rowsOfCurrentBlock != 0) {
flushDataInternal();
sumRowsOfBlock += rowsOfCurrentBlock;
LOG.info(
"flush file:{}, rowsOfCurrentBlock = {}, sumRowsOfBlock = {}",
currentFileName,
rowsOfCurrentBlock,
sumRowsOfBlock);
rowsOfCurrentBlock = 0;
}
}
@Override
protected void preCommit() {
flushData();
if (sumRowsOfBlock != 0) {
preCommitFilePathList = copyTmpDataFileToDir();
}
snapshotWriteCounter.add(sumRowsOfBlock);
sumRowsOfBlock = 0;
formatState.setJobId(jobId);
formatState.setFileIndex(currentFileIndex - 1);
}
@Override
public void commit(long checkpointId) {
deleteDataFiles(preCommitFilePathList, tmpPath);
preCommitFilePathList.clear();
}
@Override
public void rollback(long checkpointId) {
deleteDataFiles(preCommitFilePathList, outputFilePath);
preCommitFilePathList.clear();
}
@Override
public void closeInternal() throws IOException {
flushData();
snapshotWriteCounter.add(sumRowsOfBlock);
sumRowsOfBlock = 0;
closeSource();
}
/** Check whether the writing path exists and whether it is a directory */
protected abstract void checkOutputDir();
/** Overwrite mode to clear the data file directory */
protected abstract void deleteDataDir();
/** Clear temporary data files */
protected abstract void deleteTmpDataDir();
/** Open resource */
protected abstract void openSource();
/**
* Get file suffix
*
* @return .gz
*/
protected abstract String getExtension();
/**
* Get the actual size of the file currently written
*
* @return
*/
protected abstract long getCurrentFileSize();
/**
* Write single data to file
*
* @param rowData Data to be written
* @throws WriteRecordException Dirty data abnormal
*/
protected abstract void writeSingleRecordToFile(RowData rowData) throws WriteRecordException;
/** flush data to storage media */
protected abstract void flushDataInternal();
/**
* copy the temporary data file corresponding to the channel index to the official path
*
* @return pre Commit File Path List
*/
protected abstract List<String> copyTmpDataFileToDir();
/** Delete the data files submitted in the pre-submission phase under the official directory */
protected abstract void deleteDataFiles(List<String> preCommitFilePathList, String path);
/**
* It is closed normally, triggering files in the .data directory to move to the data directory
*/
protected abstract void moveAllTmpDataFileToDir();
/** close Source */
protected abstract void closeSource();
/**
* Get file compression ratio
*
* @return 压缩比 < 1
*/
public abstract float getDeviation();
public long getLastWriteTime() {
return lastWriteTime;
}
public void setBaseFileConf(BaseFileConf baseFileConf) {
this.baseFileConf = baseFileConf;
}
}
| 3,102 |
4,772 | <filename>jpa/deferred/src/main/java/example/repo/Customer508Repository.java
package example.repo;
import example.model.Customer508;
import java.util.List;
import org.springframework.data.repository.CrudRepository;
public interface Customer508Repository extends CrudRepository<Customer508, Long> {
List<Customer508> findByLastName(String lastName);
}
| 110 |
1,738 | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#pragma once
///
/// Perform debugging operations for any HMD devices that are connected to the system.
/// This can include drawing debug geometry to represent different HMD objects (such as HMD controllers)
/// and logging any specific info regarding the HMDs themselves.
///
#include <AzCore/Component/Component.h>
#include <AzCore/std/containers/vector.h>
#include <HMDBus.h>
#include <VRControllerBus.h>
#include <AzCore/Component/TickBus.h>
#include <AzCore/std/containers/bitset.h>
// for input
#include <InputNotificationBus.h>
#include <AzFramework/Input/Events/InputChannelEventListener.h>
class HMDDebuggerComponent
: public AZ::VR::HMDDebuggerRequestBus::Handler
, public AzFramework::InputChannelEventListener
, public AZ::TickBus::Handler
, public AZ::Component
{
public:
enum EHMDDebugCameraKeys
{
Forward = 0,
Back,
Left,
Right,
Count
};
AZ_COMPONENT(HMDDebuggerComponent, "{CFBDF646-1E50-4863-9782-D3BA7EB44DC5}");
static void Reflect(AZ::ReflectContext* context);
static void GetProvidedServices(AZ::ComponentDescriptor::DependencyArrayType& provided);
HMDDebuggerComponent() = default;
~HMDDebuggerComponent() override = default;
/// AZ::Component //////////////////////////////////////////////////////////
void Init() override;
void Activate() override;
void Deactivate() override;
////////////////////////////////////////////////////////////////////////////
/// TickBus ////////////////////////////////////////////////////////////////
void OnTick(float deltaTime, AZ::ScriptTimePoint time) override;
////////////////////////////////////////////////////////////////////////////
/// AzFramework::InputChannelEventListener ////////////////////////////////
bool OnInputChannelEventFiltered(const AzFramework::InputChannel& inputChannel) override;
///////////////////////////////////////////////////////////////////////////
/// HMDDebuggerBus /////////////////////////////////////////////////////////
void EnableInfo(bool enable) override;
void EnableCamera(bool enable) override;
////////////////////////////////////////////////////////////////////////////
private:
// called whenever debug flags have changed
void OnDebugFlagsChanged();
// Update HMD Debug info
void UpdateDebugInfo(float delta);
// Update HMD Debug camera
void UpdateDebugCamera(float delta);
// This class is not copyable.
HMDDebuggerComponent(const HMDDebuggerComponent& other) = delete;
HMDDebuggerComponent& operator=(const HMDDebuggerComponent& other) = delete;
// HMD Debug flags
enum EHMDDebugFlags
{
Info = 0,
Camera = 1
};
AZStd::bitset<2> m_debugFlags;
AZ::Vector3 m_debugCameraRotation;
bool m_debugCameraInputState[EHMDDebugCameraKeys::Count];
};
| 996 |
1,988 | <gh_stars>1000+
/*
* (C) 2011,2017 <NAME>
*
* Botan is released under the Simplified BSD License (see license.txt)
*/
#include <botan/nist_keywrap.h>
#include <botan/block_cipher.h>
#include <botan/internal/loadstor.h>
#include <botan/exceptn.h>
namespace Botan {
namespace {
std::vector<uint8_t>
raw_nist_key_wrap(const uint8_t input[],
size_t input_len,
const BlockCipher& bc,
uint64_t ICV)
{
const size_t n = (input_len + 7) / 8;
secure_vector<uint8_t> R((n + 1) * 8);
secure_vector<uint8_t> A(16);
store_be(ICV, A.data());
copy_mem(&R[8], input, input_len);
for(size_t j = 0; j <= 5; ++j)
{
for(size_t i = 1; i <= n; ++i)
{
const uint32_t t = static_cast<uint32_t>((n * j) + i);
copy_mem(&A[8], &R[8*i], 8);
bc.encrypt(A.data());
copy_mem(&R[8*i], &A[8], 8);
uint8_t t_buf[4] = { 0 };
store_be(t, t_buf);
xor_buf(&A[4], t_buf, 4);
}
}
copy_mem(R.data(), A.data(), 8);
return std::vector<uint8_t>(R.begin(), R.end());
}
secure_vector<uint8_t>
raw_nist_key_unwrap(const uint8_t input[],
size_t input_len,
const BlockCipher& bc,
uint64_t& ICV_out)
{
if(input_len < 16 || input_len % 8 != 0)
throw Invalid_Argument("Bad input size for NIST key unwrap");
const size_t n = (input_len - 8) / 8;
secure_vector<uint8_t> R(n * 8);
secure_vector<uint8_t> A(16);
for(size_t i = 0; i != 8; ++i)
A[i] = input[i];
copy_mem(R.data(), input + 8, input_len - 8);
for(size_t j = 0; j <= 5; ++j)
{
for(size_t i = n; i != 0; --i)
{
const uint32_t t = static_cast<uint32_t>((5 - j) * n + i);
uint8_t t_buf[4] = { 0 };
store_be(t, t_buf);
xor_buf(&A[4], t_buf, 4);
copy_mem(&A[8], &R[8*(i-1)], 8);
bc.decrypt(A.data());
copy_mem(&R[8*(i-1)], &A[8], 8);
}
}
ICV_out = load_be<uint64_t>(A.data(), 0);
return R;
}
}
std::vector<uint8_t>
nist_key_wrap(const uint8_t input[],
size_t input_len,
const BlockCipher& bc)
{
if(bc.block_size() != 16)
throw Invalid_Argument("NIST key wrap algorithm requires a 128-bit cipher");
if(input_len % 8 != 0)
throw Invalid_Argument("Bad input size for NIST key wrap");
return raw_nist_key_wrap(input, input_len, bc, 0xA6A6A6A6A6A6A6A6);
}
secure_vector<uint8_t>
nist_key_unwrap(const uint8_t input[],
size_t input_len,
const BlockCipher& bc)
{
if(bc.block_size() != 16)
throw Invalid_Argument("NIST key wrap algorithm requires a 128-bit cipher");
if(input_len < 16 || input_len % 8 != 0)
throw Invalid_Argument("Bad input size for NIST key unwrap");
uint64_t ICV_out = 0;
secure_vector<uint8_t> R = raw_nist_key_unwrap(input, input_len, bc, ICV_out);
if(ICV_out != 0xA6A6A6A6A6A6A6A6)
throw Invalid_Authentication_Tag("NIST key unwrap failed");
return R;
}
std::vector<uint8_t>
nist_key_wrap_padded(const uint8_t input[],
size_t input_len,
const BlockCipher& bc)
{
if(bc.block_size() != 16)
throw Invalid_Argument("NIST key wrap algorithm requires a 128-bit cipher");
const uint64_t ICV = 0xA65959A600000000 | static_cast<uint32_t>(input_len);
if(input_len <= 8)
{
/*
* Special case for small inputs: if input <= 8 bytes just use ECB
*/
std::vector<uint8_t> block(16);
store_be(ICV, block.data());
copy_mem(block.data() + 8, input, input_len);
bc.encrypt(block);
return block;
}
else
{
return raw_nist_key_wrap(input, input_len, bc, ICV);
}
}
secure_vector<uint8_t>
nist_key_unwrap_padded(const uint8_t input[],
size_t input_len,
const BlockCipher& bc)
{
if(bc.block_size() != 16)
throw Invalid_Argument("NIST key wrap algorithm requires a 128-bit cipher");
if(input_len < 16 || input_len % 8 != 0)
throw Invalid_Argument("Bad input size for NIST key unwrap");
uint64_t ICV_out = 0;
secure_vector<uint8_t> R;
if(input_len == 16)
{
secure_vector<uint8_t> block(input, input + input_len);
bc.decrypt(block);
ICV_out = load_be<uint64_t>(block.data(), 0);
R.resize(8);
copy_mem(R.data(), block.data() + 8, 8);
}
else
{
R = raw_nist_key_unwrap(input, input_len, bc, ICV_out);
}
if((ICV_out >> 32) != 0xA65959A6)
throw Invalid_Authentication_Tag("NIST key unwrap failed");
const size_t len = (ICV_out & 0xFFFFFFFF);
if(R.size() < 8 || len > R.size() || len < R.size() - 8)
throw Invalid_Authentication_Tag("NIST key unwrap failed");
const size_t padding = R.size() - len;
for(size_t i = 0; i != padding; ++i)
{
if(R[R.size() - i - 1] != 0)
throw Invalid_Authentication_Tag("NIST key unwrap failed");
}
R.resize(R.size() - padding);
return R;
}
}
| 2,569 |
1,069 | <reponame>Leopere/django-th
# coding: utf-8
import arrow
import datetime
from django.conf import settings
from django_th.tests.test_main import MainTest
from pocket import Pocket
from th_pocket.forms import PocketProviderForm, PocketConsumerForm
from th_pocket.models import Pocket as PocketModel
from th_pocket.my_pocket import ServicePocket
from unittest.mock import patch
class PocketTest(MainTest):
"""
PocketTest Model
"""
def create_pocket(self):
trigger = self.create_triggerservice(consumer_name='ServicePocket')
tag = 'test'
title = 'foobar'
url = 'http://foobar.com/somewhere/other/the/rainbow'
tweet_id = ''
status = True
return PocketModel.objects.create(tag=tag, url=url, title=title,
tweet_id=tweet_id, trigger=trigger,
status=status)
class PocketModelAndFormTest(PocketTest):
"""
PocketModelTest
"""
def test_pocket(self):
p = self.create_pocket()
self.assertTrue(isinstance(p, PocketModel))
self.assertEqual(p.show(), "My Pocket {}".format(p.url))
self.assertEqual(p.__str__(), "{}".format(p.url))
def test_get_config_th(self):
"""
does this settings exists ?
"""
self.assertTrue(settings.TH_POCKET_KEY)
def test_get_services_list(self):
th_service = ('th_pocket.my_pocket.ServicePocket',)
for service in th_service:
self.assertIn(service, settings.TH_SERVICES)
"""
Form
"""
# provider
def test_valid_provider_form(self):
"""
test if that form is a valid provider one
"""
p = self.create_pocket()
data = {'tag': p.tag}
form = PocketProviderForm(data=data)
self.assertTrue(form.is_valid())
form = PocketProviderForm(data={})
self.assertTrue(form.is_valid())
# consumer
def test_valid_consumer_form(self):
"""
test if that form is a valid consumer one
"""
p = self.create_pocket()
data = {'tag': p.tag}
form = PocketConsumerForm(data=data)
self.assertTrue(form.is_valid())
form = PocketConsumerForm(data={})
self.assertTrue(form.is_valid())
class ServicePocketTest(PocketTest):
"""
ServicePocketTest
"""
def setUp(self):
super(ServicePocketTest, self).setUp()
self.pocket = self.create_pocket()
self.date_triggered = datetime.datetime(2013, 6, 10, 00, 00)
self.data = {'link': 'http://foo.bar/some/thing/else/what/else',
'title': 'what else'}
self.token = '<PASSWORD>'
self.trigger_id = 1
self.service = ServicePocket(self.token)
@patch.object(Pocket, 'get')
def test_read_data(self, mock1):
kwargs = {'date_triggered': self.date_triggered,
'link': 'http://foo.bar/some/thing/else/what/else',
'title': 'what else'}
since = arrow.get(self.date_triggered).timestamp
sp = ServicePocket(self.token)
sp.read_data(**kwargs)
mock1.assert_called_once_with(since=since, state='unread')
@patch.object(Pocket, 'add')
def test_save_data(self, mock1):
self.assertTrue(self.token)
self.assertTrue(isinstance(self.trigger_id, int))
self.assertIn('link', self.data)
self.assertIn('title', self.data)
self.assertIsNotNone(self.data['link'])
self.assertNotEqual(self.data['title'], '')
se = ServicePocket(self.token)
se.save_data(self.trigger_id, **self.data)
mock1.assert_called_once_with(url=self.data.get('link'),
title=self.data.get('title'),
tags=self.pocket.tag)
def test_save_data_no_url(self):
self.assertTrue(self.token)
self.assertTrue(isinstance(self.trigger_id, int))
self.assertIn('link', self.data)
self.assertIn('title', self.data)
self.assertIsNotNone(self.data['link'])
self.assertNotEqual(self.data['title'], '')
self.data['link'] = ''
se = ServicePocket(self.token)
status = se.save_data(self.trigger_id, **{'title': 'what else'})
self.assertFalse(status)
def test_get_config_th(self):
"""
does this settings exists ?
"""
self.assertTrue(settings.TH_POCKET_KEY)
self.assertIn('consumer_key', settings.TH_POCKET_KEY)
def test_auth(self):
pass
def test_callback(self):
pass
| 2,148 |
952 | <reponame>samisouabni/Main
{
"version": "1.21.0",
"description": "A Powerline style prompt for your shell",
"homepage": "https://github.com/justjanne/powerline-go",
"license": "GPL-3.0-or-later",
"architecture": {
"64bit": {
"url": "https://github.com/justjanne/powerline-go/releases/download/v1.21.0/powerline-go-windows-amd64#/powerline-go.exe",
"hash": "F5B495807C9F2BAEFE1D9D2F584AD3D1AFDFD4EDA96C4828B1B458D9908BCD8C"
}
},
"bin": "powerline-go.exe",
"checkver": "github",
"autoupdate": {
"architecture": {
"64bit": {
"url": "https://github.com/justjanne/powerline-go/releases/download/v$version/powerline-go-windows-amd64#/powerline-go.exe"
}
}
}
}
| 427 |
2,742 | <filename>javamelody-swing/src/main/java/net/bull/javamelody/swing/table/TablePopupMenu.java<gh_stars>1000+
/*
* Copyright 2008-2019 by <NAME>
*
* This file is part of Java Melody.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.bull.javamelody.swing.table;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import javax.swing.JPopupMenu;
import org.slf4j.LoggerFactory;
import net.bull.javamelody.swing.MMenuItem;
import net.bull.javamelody.swing.print.MCsvLocalWriter;
import net.bull.javamelody.swing.print.MHtmlWriter;
import net.bull.javamelody.swing.print.MJavaPrinter;
import net.bull.javamelody.swing.print.MJsonWriter;
import net.bull.javamelody.swing.print.MPdfWriter;
import net.bull.javamelody.swing.print.MPrinter;
import net.bull.javamelody.swing.print.MRtfWriter;
import net.bull.javamelody.swing.print.MXmlWriter;
import net.bull.javamelody.swing.util.MSwingUtilities;
/**
* Popup menu des tables.
*
* @author <NAME>
*/
class TablePopupMenu extends JPopupMenu {
private static final long serialVersionUID = 1L;
/**
* Constructeur.
* @param table MBasicTable
*/
TablePopupMenu(final MBasicTable table) {
super();
final List<MPrinter> printers = getPrinters();
for (final MPrinter printer : printers) {
final MMenuItem menuItem = new MMenuItem(printer.getName(), printer.getIcon());
add(menuItem);
menuItem.addActionListener(new ActionListener() {
@Override
public void actionPerformed(final ActionEvent event) {
try {
printer.print(table);
} catch (final IOException e) {
MSwingUtilities.showException(e);
}
}
});
}
}
/**
* Retourne la liste des objets d'export / impression.
*
* @return List
*/
private List<MPrinter> getPrinters() {
// ne sont pas inclus le printer "Clipboard" qui est utilisé directement avec Ctrl+C, les printers PDF/RTF paysages et le printer CSV US
final List<MPrinter> printers = new ArrayList<>();
printers.add(new MCsvLocalWriter());
try {
Class.forName("com.lowagie.text.Document");
printers.add(new MPdfWriter());
} catch (final ClassNotFoundException e) {
// l'export PDF ne sera pas disponible dans cette application
LoggerFactory.getLogger(TablePopupMenu.class)
.debug("Export PDF non disponible sans iText");
}
try {
Class.forName("com.lowagie.text.rtf.RtfWriter2");
printers.add(new MRtfWriter());
} catch (final ClassNotFoundException e) {
// l'export RTF ne sera pas disponible dans cette application
LoggerFactory.getLogger(TablePopupMenu.class)
.debug("Export RTF non disponible sans iText-RTF");
}
printers.add(new MHtmlWriter());
try {
Class.forName("com.thoughtworks.xstream.XStream");
printers.add(new MXmlWriter());
printers.add(new MJsonWriter());
} catch (final ClassNotFoundException e) {
// l'export XML et JSON ne seront pas disponibles dans cette application
LoggerFactory.getLogger(TablePopupMenu.class)
.debug("Exports XML et JSON non disponibles sans XStream et XPP3");
}
printers.add(new MJavaPrinter());
return printers;
}
}
| 1,430 |
414 | package com.github.irshulx.wysiwyg.Utilities;
import android.text.Editable;
import android.text.SpannableStringBuilder;
import android.view.View;
import android.view.inputmethod.BaseInputConnection;
import android.widget.TextView;
/**
* Created by mkallingal on 1/17/2016.
*/
class MyInputConnection extends BaseInputConnection {
private SpannableStringBuilder _editable;
TextView _textView;
public MyInputConnection(View targetView, boolean fullEditor) {
super(targetView, fullEditor);
_textView = (TextView) targetView;
}
public Editable getEditable() {
if (_editable == null) {
_editable = (SpannableStringBuilder) Editable.Factory.getInstance()
.newEditable("Placeholder");
}
return _editable;
}
public boolean commitText(CharSequence text, int newCursorPosition) {
_editable.append(text);
_textView.setText(text);
return true;
}
} | 367 |
3,055 | /*
Fontname: -B&H-Lucida-Bold-I-Normal-Sans-14-100-100-100-P-90-ISO10646-1
Copyright: Copyright <NAME> 1986, 1985.
Glyphs: 191/723
BBX Build Mode: 0
*/
const uint8_t u8g2_font_luBIS10_tf[3328] U8G2_FONT_SECTION("u8g2_font_luBIS10_tf") =
"\277\0\3\3\4\4\5\5\5\32\17\367\375\12\375\12\375\2\3\4Y\14\343 \5\0\60V!\15\245\21"
"V-\311D#\231\34:\2\42\10F\322^\210\70\11#\27\251\21\352\206\202\241h(t\210\205\202\241"
"\330!\224\32\12\206\242\0$\24\350\322\351\206\245%S\342\224\61\42\214H.F\261\20\0%\33\254\21"
"\366\214)\302\220D\26\223\210\242\243\70,\64\15IdI\242\240\204<\1&\31\253\21n\222eQ\221"
"T\42\255ED\61ZH\42\214\204f\263\203\4\0'\11C\322R\34B\21\0(\17\325\261U\306D"
"j\242\221LGa\10\0)\16\325\261U\5\325t\32\311DjA\0*\14U\262Z\251L(\241H"
"\4\0+\22\210\22j\306\1q@\354\20\213\3\342\200\70 \10,\11T\260\325\234$J\0-\6\23"
"\221R\14.\6#\21V\30/\36\333\260\341\203\344\220\70,\16\212\203\342\260\70(\16\212\303\342\240\70"
"(\16\213C\344 \0\60\23\250\22\352\321D!\231DV\243\325$\62QHF\3\61\14\245\23j\224"
"F\242\235DK\0\62\21\251\21j\331bs\200x,\25\253\216\17\21\0\63\21\250\22j\325\1b\251"
"\214\16\20\253\304&&\0\64\20\250\21jK\207\303\210,\244\345\20\24+\1\65\23\251\22j\235\342\220"
"\70\240\16\230\3\306+A\221\15\0\66\23\251\22\352\231f\221\261i&\21J\204\22\241H\221\6\67\16"
"\250\23j\34\302\311Re\251t*\6\70\23\251\21\352\325\224\202\242\211\220v\231I\244\21\241\250\6\71"
"\23\251\21\352\21E\212\22\241D(\221\215\314\223\330\250\10:\12t\21\326\254C'\23\0;\15\245\260"
"UM\350H\23\222L$\3<\21\210\21\352\3\242\263\231t\16\220C\344\220\10\0=\13Y\61\352\34"
"\342\230\17\21\0>\20\210\21\352\304!r\210\34\60\25\312\246q\0\77\16\247\22bXbS\241\332p"
"\216\66\4@\27\251\22\362\21\223\302\42\212$$\221\204B\222\210Hb\212\203h\0A\23\252\20\356\316"
"\1\343jd\30\32\306d'\341$Z\35B\24\251\21f\331\224d\222Yh\24;\311D\62\311Lr"
"\2C\26\252\21j\332\204!\71@\16\230\3\344\20\71d\16\21\312L\0D\26\253\21r\35b+D"
"\311T\62\225\14'C\311p\62\33\35\1E\22\251\21f\235\344\0\361\330\66\226\3\304\343C\4\0F"
"\21\251\21f\235\344\0\361\330&\7\310\1\342e\0G\26\252\21n\332\204!\71@\16\230\3\344\20\341"
"d\66\222\11M\0H\30\253\21n\315F\302\311p\62\24\35D\302\221p\62\234\14E\263\21\0I\13"
"\245\21V-\211v\22-\1J\16\330\260\335n\25K\267\212\245KD\0K\26\252\21n\315D\243\240"
"(\70\21N\242\306\211P\264&\32J\0L\15\250\21fM\247\322]\245\323C\0M\36\255\21v\21"
"G\64\22m$\21ED\22QD$\221DF\22\312H\62\322&\22\217\0N\30\253\21nIEC"
"\321L$\21\251\210\264\210TD\262\221p$\25\1O\23\252\22r\22e\223\241DZ%S%C\221"
"LH\4P\20\251\21f\331Tf\222\231d$\262\211\67\3Q\32\333\262qRe\243\241H*\231J"
"\304\22\351d(S\255\203\350\20:h\0R\25\251\21j\235d\222\231d\64\71\25E\242\221h$\232"
"I\0S\17\250\21\346\225d\221)\327\261$(\251\1T\27\252\22j\34fs\300\34\60\7\310!r"
"\210\34\60\7\314\1S\0U\27\252\22\356\14%\263\211P$\224\14%C\211p\42\24\315\204\65\0V"
"\25\252\22nL\253\221aL\30\33\5'\302I\224L\7\210\1W\35\254\22z\310\204\63\31i\26\21"
"\215$\242QH\22Q\222D$\261\21m\64\34\15\1X\27\253\20j\15e\262\350$\16\240\3\346\220"
"\71\200,\231\306\24G\0Y\21\251\22j\14%\262\230H(\211\216\227\305\253\0Z\24\252\21j\35\342"
"\0\71@\16\30o\7\310\1r\300!\4[\17\326\261U\221\204\332fBm\63\22\11\0\134\14\325\262"
"a\10\63\12\63\12\63\12]\16\326\260U\261\315\204\332fBM$\0^\16xQ\352\206\245\323P,"
"I\26\221\6_\6\26\320]\30`\6\23\65g\14a\22y\21\352\231D\23\321D\66\21Q(#\321"
"\4\0b\23\251\21j\215W)+\224\212\204$\31IF\42\33\0c\16w\21\342QF\21\341P:"
"\12\221\0d\23\251\21j/\13k\222\221h\42\242HH\24\231d\4e\17x\21\346\321D\222QI"
"b\32F,\0f\17\330\261Y\322\244C\233t\253X\272\25\0g\25\251\261e\226D\23\231H\64\231"
"D$$\245\261((*\2h\24\251\21f\215WG\223\11\213h$\32If\222\321\4\0i\14\245"
"\21V\315\201\243M\242%\0j\17\330\260\331\316\61\210\245S\261\326\251\214\10k\24\251\21f\215\345\0"
"\361L\62\212M\202E\311h$Z\1l\13\245\21V-\211v\22-\1m\27}\21\372,M.\24"
"I\344\62\241\210D#\311h$\31\311$\0n\22y\21\346\214&\23\26\321H\64\222\314$\243\11\0"
"o\16x\22\346\221\64\325$\62\211HF\3p\23\251\261iMd\22JEB\222\214D\23Ym\274"
"\14q\23\251\261\351\231D\23\321D\66\221T(#\321X\274\4r\14x\21\342\254\230\246S\351*\0"
"s\16w\21\336\221D\21\42q\22\223\224\0t\16\225\22\332\214&\25\231h$#M\0u\21y\21"
"\346\214&\262\211h$\32IX\226&\0v\17y\21f\14k\42Yl\22,R\247\0w\25|\22"
"r\310d\64YD\24\11\205$*!Ih\266M\10x\17y\20b\215D\223\350x\65\62\222M\0"
"y\23\252\260\345\14%\63%\341$J&\317!q\210x\16z\15y\20\346\34\302\322U\261\370\20\1"
"{\21\326\261\325\215H\263\241L$\235-\315h\62\0|\16\324\261\321%\305\62\211b\231b\61\0}"
"\21\326\260\325\311h\243\331\342H&\234\215H\63\0~\13\71Q\352\14G\241\341\4\0\240\5\0\60V"
"\241\15\245\260U\315\241\62\321H&Z\2\242\17\247\22\352FG\25\341P*\274Ie\0\243\21\251\21"
"j\326F\261\361Xj\34\217\303\207\10\0\244\31\253\20j\305\1\241\260,\42\211\212\302\61\241,\34\222"
"\332\304\241\70 \4\245\21\251\22j\14%\262\240$:\264\16\255\342U\0\246\16\324\261\321e\212\305\21"
"b\241X\32\0\247\26\331\261\351\231f\241q\265\26\32\11C\244\220\265<\11\212l\0\250\7\25\64g"
"H\4\251\27\251\22\362\15e\222\20E\22\225D%QITD\211\310\204\63\0\252\13W\262bUB"
"\225\211\13\0\253\20x\21\346e\22%Md\22\231H&\11\1\254\14X\62j\34\302\312q@\4\0"
"\255\6\23\221R\14\256\16vsj\215b\222\312E\22\13M\0\257\6\25\64g\24\260\6!\23O\10"
"\261\22\211\21\352\306!qH\360\20\214C\342\340C\4\0\262\13e\210\332\20c\242X\214\2\263\13e"
"\210\332\20#\303\240\5\0\264\6\24\65g\20\265\24\251\261i\211F\242\311H\64\31I\42\242\323dy"
"\31\0\266\30\326\263\351\34\16\226\220$\26\211Eb\221P\226X$\26\211EB\0\267\6\22\225j\10"
"\270\7\63\263\345\210\10\271\13c\207ZL$\241\210\12\0\272\13W\262b\221D\64\222\210\4\273\21x"
"\21fEd\42\231D&\31%\211bi\0\274\31\253\24v\214%\322X\64\230(\23\221D\63Q$"
"\30*\205#b\11\0\275\26\253\24v\214%\322X\64\230(;\204\204\61\241,\230MH\1\276\32\254"
"\25\366P\205i\303p,\16\220\210,\242\241(\22\15\325\302!\261\4\0\277\15\247\260a\316\21\205\63"
"\241\352,b\300\25\312\20n\317\221\346\200q\65\62\14\15c\262\223p\22\255\16\301\25\312\20n\323Q"
"\346\200q\65\62\14\15c\262\223p\22\255\16\302\25\312\20\356\326Q\346\200q\65\62\14\15c\262\223p"
"\22\255\16\303\25\312\20\356\326Q\346\200q\65\62\14\15c\262\223p\22\255\16\304\26\312\20\356J\344("
"s\300\270\32\31\206\206\61\331I\70\211V\7\305\25\312\20n\313\321\346\200q\65\62\14\15c\262\223p"
"\22\255\16\306\34\257\20\372C\356\200\71\60\42\207\325a\21sh\16\261Cdr\200l,<D\0\307"
"\32\332\261i\332\204!\71@\16\230\3\344\20\71d\16\21\312\254qX\34\60\5\310\23\311\21f\316\301"
"'\71@<\266\215\345\0\361\370\20\1\311\23\311\21f\322\241'\71@<\266\215\345\0\361\370\20\1\312"
"\23\311\21\346\325\241'\71@<\266\215\345\0\361\370\20\1\313\24\311\21\346I\344\320\223\34 \36\333\306"
"r\200x|\210\0\314\16\305\21V\315\1\243\221h'\321\22\0\315\16\306\21V\321!\263\231l\67\331"
"\32\0\316\16\306\21\326\324!\263\231l\67\331\32\0\317\16\306\21\326\250Cf\63\331n\262\65\0\320\26"
"\253\21r\35bKC\321\360 \222\14'C\321l$\33\35\1\321\32\313\21n\326Q\244\242\241h&"
"\222\210TDZD*\42\331H\70\222\212\0\322\25\312\22\362\316\221\210\262\311P\42\255\222\251\222\241H"
"&$\2\323\25\312\22\362\322Q\210\262\311P\42\255\222\251\222\241H&$\2\324\25\312\22r\326Q\210"
"\262\311P\42\255\222\251\222\241H&$\2\325\25\312\22r\326Q\210\262\311P\42\255\222\251\222\241H&"
"$\2\326\26\312\22rJ\344(D\331d(\221V\311T\311P$\23\22\1\327\22\211\21\352HCA"
"YD:\236Jb\302PT\2\330\31\255\23\362Rf\262\341l(\243\215$\62\221\232D$\244Ie"
"\302\63\0\331\31\312\22n\316\21\206\222\331D(\22J\206\222\241D\70\21\212f\302\32\0\332\31\312\22"
"n\322\301C\311l\42\24\11%C\311P\42\234\10E\63a\15\0\333\31\312\22\356\325\301C\311l\42"
"\24\11%C\311P\42\234\10E\63a\15\0\334\31\312\22\356I\344\340\241d\66\21\212\204\222\241d("
"\21N\204\242\231\260\6\335\23\311\22\352\321\201C\211,&\22J\242\343e\361*\0\336\23\251\21f\215"
"\345\0\323L\62\223\314$\62Qm<\6\337\31\331\261i\22%\243\211L\42\224\10%\243\225\321D\22"
"\222HH\342e\0\340\23\231\21j\316\21L\242\211h\42\233\210(\224\221h\2\341\23\231\21j\322\301"
"&\321D\64\221MD\24\312H\64\1\342\23\231\21\352\325\301&\321D\64\221MD\24\312H\64\1\343"
"\23\231\21\352\325\301&\321D\64\221MD\24\312H\64\1\344\24\231\21\352I\344`\223h\42\232\310&"
"\42\12e$\232\0\345\23\231\21\352\312\21L\242\211h\42\233\210(\224\221h\2\346\25}\21\372\35d"
"\242\221D\64\222\210N\22\305\222L\42\263\0\347\22\247\261\341QF\21\341P:\12\21\343\200\340\14\0"
"\350\21\230\21f\316\241\64\221dT\222\230\206\21\13\0\351\21\230\21f\322\201\64\221dT\222\230\206\21"
"\13\0\352\21\230\21\346\325\201\64\221dT\222\230\206\21\13\0\353\22\230\21\346I\344@\232H\62*I"
"L\303\210\5\0\354\14\225\21V\215G#\231\322\22\0\355\15\226\21V\321\1\263\231Pm\15\0\356\15"
"\226\21\326\324\1\263\231Pm\15\0\357\15\226\21\326\250\3f\63\241\332\32\0\360\23\307\22\346FE\324"
"YHT\12ID\64ZED\2\361\23\231\21f\326a\243\311\204E\64\22\215$\63\311h\2\362\20"
"\230\22f\316\241$M\65\211L\42\222\321\0\363\20\230\22f\322\201$M\65\211L\42\222\321\0\364\20"
"\230\22\346\325\201$M\65\211L\42\222\321\0\365\20\230\22\346\325\201$M\65\211L\42\222\321\0\366\21"
"\230\22\346I\344@\222\246\232D&\21\311h\0\367\14\210\22\352\315\21\17q\254\63\0\370\20z\23f"
"\232\24%$\11\215\42\233\310\214\0\371\23\231\21f\316\241\243\211l\42\32\211F\22\226\245\11\0\372\23"
"\231\21f\322\201\243\211l\42\32\211F\22\226\245\11\0\373\23\231\21\346\325\201\243\211l\42\32\211F\22"
"\226\245\11\0\374\24\231\21\346I\344\300\321D\66\21\215D#\11\313\322\4\0\375\25\312\260\345\322\241C"
"\311LI\70\211\222\311sH\34\42\236\3\376\25\331\261i\215\267\214.\223\210\204$\31\211F\261\332x"
"\31\0\377\26\312\260eJ\344\320\241d\246$\234D\311\344\71$\16\21\317\1\0\0\0\4\377\377\0";
| 6,017 |
7,217 | <gh_stars>1000+
package org.xutils.http.app;
import android.text.TextUtils;
import android.webkit.URLUtil;
import org.xutils.http.HttpMethod;
import org.xutils.http.RequestParams;
import org.xutils.http.request.HttpRequest;
import org.xutils.http.request.UriRequest;
public class DefaultRedirectHandler implements RedirectHandler {
@Override
public RequestParams getRedirectParams(UriRequest request) throws Throwable {
if (request instanceof HttpRequest) {
HttpRequest httpRequest = (HttpRequest) request;
RequestParams params = httpRequest.getParams();
String location = httpRequest.getResponseHeader("Location");
if (!TextUtils.isEmpty(location)) {
if (!URLUtil.isHttpsUrl(location) && !URLUtil.isHttpUrl(location)) {
String url = params.getUri();
if (location.startsWith("/")) {
int pathIndex = url.indexOf("/", 8);
if (pathIndex != -1) {
url = url.substring(0, pathIndex);
}
} else {
int pathIndex = url.lastIndexOf("/");
if (pathIndex >= 8) {
url = url.substring(0, pathIndex + 1);
} else {
url += "/";
}
}
location = url + location;
}
params.setUri(location);
/* http 1.0 301 302
* http 1.1 303 307 308
*/
int code = request.getResponseCode();
if (code == 301 || code == 302 || code == 303) {
params.clearParams();
params.setMethod(HttpMethod.GET);
} /*else if (code == 307 || code == 308) {
// don't change the request method or params
}*/
return params;
}
}
return null;
}
}
| 1,109 |
7,482 | #ifndef __RASPI4_H__
#define __RASPI4_H__
#include <rthw.h>
#define __REG32(x) (*((volatile unsigned int *)(x)))
//base address
#define PER_BASE (0xFE000000)
//gpio offset
#define GPIO_BASE_OFFSET (0x00200000)
//pl011 offset
#define PL011_UART_BASE_OFFSET (0x00201000)
//pactl cs offset
#define PACTL_CS_OFFSET (0x00204E00)
//aux offset
#define AUX_BASE_OFFSET (0x00215000)
//gpio
#define GPIO_BASE (PER_BASE + GPIO_BASE_OFFSET)
#define GPIO_IRQ_NUM (3) //40 pin mode
#define IRQ_GPIO0 (96 + 49) //bank0 (0 to 27)
#define IRQ_GPIO1 (96 + 50) //bank1 (28 to 45)
#define IRQ_GPIO2 (96 + 51) //bank2 (46 to 57)
#define IRQ_GPIO3 (96 + 52) //bank3
//system timer
#define ARM_TIMER_IRQ (64)
#define ARM_TIMER_BASE (PER_BASE + 0xB000)
#define ARM_TIMER_LOAD HWREG32(ARM_TIMER_BASE + 0x400)
#define ARM_TIMER_VALUE HWREG32(ARM_TIMER_BASE + 0x404)
#define ARM_TIMER_CTRL HWREG32(ARM_TIMER_BASE + 0x408)
#define ARM_TIMER_IRQCLR HWREG32(ARM_TIMER_BASE + 0x40C)
#define ARM_TIMER_RAWIRQ HWREG32(ARM_TIMER_BASE + 0x410)
#define ARM_TIMER_MASKIRQ HWREG32(ARM_TIMER_BASE + 0x414)
#define ARM_TIMER_RELOAD HWREG32(ARM_TIMER_BASE + 0x418)
#define ARM_TIMER_PREDIV HWREG32(ARM_TIMER_BASE + 0x41C)
#define ARM_TIMER_CNTR HWREG32(ARM_TIMER_BASE + 0x420)
//uart
#define UART_BASE (PER_BASE + PL011_UART_BASE_OFFSET)
#define UART0_BASE (UART_BASE + 0x0)
#define UART2_BASE (UART_BASE + 0x400)
#define UART3_BASE (UART_BASE + 0x600)
#define UART4_BASE (UART_BASE + 0x800)
#define UART5_BASE (UART_BASE + 0xA00)
#define IRQ_AUX_UART (96 + 29)
#define UART_REFERENCE_CLOCK (48000000)
//aux
#define AUX_BASE (PER_BASE + AUX_BASE_OFFSET)
#define IRQ_PL011 (96 + 57)
//pactl cs
#define PACTL_CS_ADDR (PER_BASE + PACTL_CS_OFFSET)
#define PACTL_CS HWREG32(PACTL_CS_ADDR)
typedef enum
{
IRQ_SPI0 = 0x00000000,
IRQ_SPI1 = 0x00000002,
IRQ_SPI2 = 0x00000004,
IRQ_SPI3 = 0x00000008,
IRQ_SPI4 = 0x00000010,
IRQ_SPI5 = 0x00000020,
IRQ_SPI6 = 0x00000040,
IRQ_I2C0 = 0x00000100,
IRQ_I2C1 = 0x00000200,
IRQ_I2C2 = 0x00000400,
IRQ_I2C3 = 0x00000800,
IRQ_I2C4 = 0x00001000,
IRQ_I2C5 = 0x00002000,
IRQ_I2C6 = 0x00004000,
IRQ_I2C7 = 0x00008000,
IRQ_UART5 = 0x00010000,
IRQ_UART4 = 0x00020000,
IRQ_UART3 = 0x00040000,
IRQ_UART2 = 0x00080000,
IRQ_UART0 = 0x00100000
} PACTL_CS_VAL;
// 0x40, 0x44, 0x48, 0x4c: Core 0~3 Timers interrupt control
#define CORE0_TIMER_IRQ_CTRL HWREG32(0xFF800040)
#define TIMER_IRQ 30
#define NON_SECURE_TIMER_IRQ (1 << 1)
//core timer
#define ST_BASE_OFFSET (0x003000)
#define STIMER_BASE (PER_BASE + ST_BASE_OFFSET)
#define STIMER_CS HWREG32(STIMER_BASE + 0x0000)
#define STIMER_CLO HWREG32(STIMER_BASE + 0x0004)
#define STIMER_CHI HWREG32(STIMER_BASE + 0x0008)
#define STIMER_C0 HWREG32(STIMER_BASE + 0x000C)
#define STIMER_C1 HWREG32(STIMER_BASE + 0x0010)
#define STIMER_C2 HWREG32(STIMER_BASE + 0x0014)
#define STIMER_C3 HWREG32(STIMER_BASE + 0x0018)
#define DELAY_MICROS(micros) \
do { \
rt_uint32_t compare = STIMER_CLO + micros * 25; \
while (STIMER_CLO < compare); \
} while (0) \
//mmc
#define MMC0_BASE_ADDR (PER_BASE + 0x300000)
#define MMC2_BASE_ADDR (PER_BASE + 0x340000)
//eth
#define MAC_REG_BASE_ADDR (void *)(0xfd580000)
#define ETH_IRQ (160 + 29)
#define SEND_DATA_NO_CACHE (0x08200000)
#define RECV_DATA_NO_CACHE (0x08400000)
//gic max
#define MAX_HANDLERS (256)
#define ARM_GIC_NR_IRQS (512)
#define INTC_BASE (0xff800000)
#define ARM_GIC_MAX_NR (512)
#define GIC_V2_BASE (INTC_BASE + 0x00040000)
#define GIC_V2_DISTRIBUTOR_BASE (INTC_BASE + 0x00041000)
#define GIC_V2_CPU_INTERFACE_BASE (INTC_BASE + 0x00042000)
#define GIC_V2_HYPERVISOR_BASE (INTC_BASE + 0x00044000)
#define GIC_V2_VIRTUAL_CPU_BASE (INTC_BASE + 0x00046000)
#define GIC_IRQ_START 0
#define GIC_ACK_INTID_MASK 0x000003ff
#define GIC_PL400_DISTRIBUTOR_PPTR GIC_V2_DISTRIBUTOR_BASE
#define GIC_PL400_CONTROLLER_PPTR GIC_V2_CPU_INTERFACE_BASE
/* the basic constants and interfaces needed by gic */
rt_inline rt_uint32_t platform_get_gic_dist_base(void)
{
return GIC_PL400_DISTRIBUTOR_PPTR;
}
rt_inline rt_uint32_t platform_get_gic_cpu_base(void)
{
return GIC_PL400_CONTROLLER_PPTR;
}
#endif
| 2,791 |
530 | #define CLASS_REGION 0
#define CLASS_AVATAR 1
#define CLASS_AMULET 2
#define CLASS_GHOST 3
#define CLASS_ATM 4
#define CLASS_GAME_PIECE 5
#define CLASS_BAG 6
#define CLASS_BALL 7
#define CLASS_BOOK 10
#define CLASS_BOOMERANG 11
#define CLASS_BOTTLE 12
#define CLASS_BOX 13
#define CLASS_CLUB 16
#define CLASS_COMPASS 17
#define CLASS_COUNTERTOP 18
#define CLASS_CRYSTAL_BALL 20
#define CLASS_DIE 21
#define CLASS_DISPLAY_CASE 22
#define CLASS_DOOR 23
#define CLASS_DROPBOX 24
#define CLASS_DRUGS 25
#define CLASS_ESCAPE_DEVICE 26
#define CLASS_FAKE_GUN 27
#define CLASS_ELEVATOR 28
#define CLASS_FLAG 29
#define CLASS_FLASHLIGHT 30
#define CLASS_FRISBEE 31
#define CLASS_GARBAGE_CAN 32
#define CLASS_GEMSTONE 33
#define CLASS_GRENADE 35
#define CLASS_GROUND 36
#define CLASS_GUN 37
#define CLASS_HAND_OF_GOD 38
#define CLASS_HAT 39
#define CLASS_INSTANT_OBJECT_PILL 40
#define CLASS_KEY 42
#define CLASS_KNICK_KNACK 43
#define CLASS_KNIFE 44
#define CLASS_MAGIC_LAMP 45
#define CLASS_MAGIC_STAFF 46
#define CLASS_MAGIC_WAND 47
#define CLASS_MAILBOX 48
#define CLASS_MATCHBOOK 49
#define CLASS_MOVIE_CAMERA 52
#define CLASS_PAPER 54
#define CLASS_PLAQUE 55
#define CLASS_SHORT_SIGN 56
#define CLASS_SIGN 57
#define CLASS_PLANT 58
#define CLASS_RING 60
#define CLASS_ROCK 61
#define CLASS_SECURITY_DEVICE 63
#define CLASS_SENSOR 64
#define CLASS_SKY 69
#define CLASS_STEREO 70
#define CLASS_TAPE 71
#define CLASS_TELEPORT_BOOTH 74
#define CLASS_TICKET 75
#define CLASS_TOKENS 76
#define CLASS_WALL 80
#define CLASS_WIND_UP_TOY 82
#define CLASS_CHANGOMATIC 84
#define CLASS_VENDO_FRONT 85
#define CLASS_VENDO_INSIDE 86
#define CLASS_TRAPEZOID 87
#define CLASS_HOLE 88
#define CLASS_SHOVEL 89
#define CLASS_SEX_CHANGER 90
#define CLASS_STUN_GUN 91
#define CLASS_SUPER_TRAPEZOID 92
#define CLASS_FLAT 93
#define CLASS_TEST 94
#define CLASS_SPRAY_CAN 95
#define CLASS_PAWN_MACHINE 96
#define CLASS_MAGIC_IMMOBILE 97
#define CLASS_GLUE 98
#define CLASS_HEAD 127
#define CLASS_AQUARIUM 129
#define CLASS_BED 130
#define CLASS_BRIDGE 131
#define CLASS_BUILDING 132
#define CLASS_BUSH 133
#define CLASS_CHAIR 134
#define CLASS_CHEST 135
#define CLASS_COKE_MACHINE 136
#define CLASS_COUCH 137
#define CLASS_FENCE 138
#define CLASS_FLOOR_LAMP 139
#define CLASS_FORTUNE_MACHINE 140
#define CLASS_FOUNTAIN 141
#define CLASS_HOUSE_CAT 143
#define CLASS_HOT_TUB 144
#define CLASS_JUKEBOX 145
#define CLASS_POND 147
#define CLASS_RIVER 148
#define CLASS_ROOF 149
#define CLASS_SAFE 150
#define CLASS_PICTURE 152
#define CLASS_STREET 153
#define CLASS_STREETLAMP 154
#define CLASS_TABLE 155
#define CLASS_TREE 156
#define CLASS_WINDOW 157
#define CLASS_CRAT_IN_A_BOX 158
#define CLASS_ZONE 255
| 998 |
373 | <reponame>kschu91/pyoidc
#!/usr/bin/env python
from urllib.parse import parse_qs
from urllib.parse import urlencode
from urllib.parse import urlparse
import argparse
import importlib
import json
import logging
from jwkest.jws import alg2keytype
from mako.lookup import TemplateLookup
from requests import ConnectionError
from requests.packages import urllib3
from oic.utils.http_util import NotFound
from oic.utils.http_util import Response
from oic.utils.http_util import SeeOther
from oic.utils.http_util import get_post
from oic.utils.keyio import build_keyjar
from oic.utils.rp.oauth2 import OAuthClients
urllib3.disable_warnings()
LOGGER = logging.getLogger("")
LOGFILE_NAME = 'rp.log'
hdlr = logging.FileHandler(LOGFILE_NAME)
base_formatter = logging.Formatter(
"%(asctime)s %(name)s:%(levelname)s %(message)s")
CPC = ('%(asctime)s %(name)s:%(levelname)s '
'[%(client)s,%(path)s,%(cid)s] %(message)s')
cpc_formatter = logging.Formatter(CPC)
hdlr.setFormatter(base_formatter)
LOGGER.addHandler(hdlr)
LOGGER.setLevel(logging.DEBUG)
LOOKUP = TemplateLookup(directories=['templates', 'htdocs'],
module_directory='modules',
input_encoding='utf-8',
output_encoding='utf-8')
SERVER_ENV = {}
class JLog(object):
def __init__(self, logger, sid):
self.logger = logger
self.id = sid
def info(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.info(json.dumps(_dict))
def error(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.error(json.dumps(_dict))
def warning(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.warning(json.dumps(_dict))
# noinspection PyUnresolvedReferences
def static(environ, start_response, logger, path):
logger.info("[static]sending: %s" % (path,))
try:
data = open(path, 'rb').read()
if path.endswith(".ico"):
start_response('200 OK', [('Content-Type', "image/x-icon")])
elif path.endswith(".html"):
start_response('200 OK', [('Content-Type', 'text/html')])
elif path.endswith(".json"):
start_response('200 OK', [('Content-Type', 'application/json')])
elif path.endswith(".txt"):
start_response('200 OK', [('Content-Type', 'text/plain')])
elif path.endswith(".css"):
start_response('200 OK', [('Content-Type', 'text/css')])
else:
start_response('200 OK', [('Content-Type', "text/xml")])
return [data]
except IOError:
resp = NotFound()
return resp(environ, start_response)
def opchoice(environ, start_response, clients):
resp = Response(mako_template="opchoice.mako",
template_lookup=LOOKUP,
headers=[])
argv = {
"op_list": list(clients.keys())
}
return resp(environ, start_response, **argv)
def opresult(environ, start_response, **kwargs):
resp = Response(mako_template="opresult.mako",
template_lookup=LOOKUP,
headers=[])
_args = {}
for param in ['userinfo', 'userid', 'id_token']:
try:
_args[param] = kwargs[param]
except KeyError:
_args[param] = None
return resp(environ, start_response, **_args)
def operror(environ, start_response, error=None):
resp = Response(mako_template="operror.mako",
template_lookup=LOOKUP,
headers=[])
argv = {
"error": error
}
return resp(environ, start_response, **argv)
def opresult_fragment(environ, start_response):
resp = Response(mako_template="opresult_repost.mako",
template_lookup=LOOKUP,
headers=[])
argv = {}
return resp(environ, start_response, **argv)
def sorry_response(environ, start_response, homepage, err):
resp = Response(mako_template="sorry.mako",
template_lookup=LOOKUP,
headers=[])
argv = {"htmlpage": homepage,
"error": str(err)}
return resp(environ, start_response, **argv)
def get_id_token(client, session):
return client.grant[session["state"]].get_id_token()
# Produce a JWS, a signed JWT, containing a previously received ID token
def id_token_as_signed_jwt(client, id_token, alg="RS256"):
ckey = client.keyjar.get_signing_key(alg2keytype(alg), "")
_signed_jwt = id_token.to_jwt(key=ckey, algorithm=alg)
return _signed_jwt
def url_eq(a, b):
if a.endswith('/'):
if b.endswith('/'):
return a == b
else:
return a[:-1] == b
else:
if b.endswith('/'):
return a == b[:-1]
else:
return a == b
KEY_MAP = {'state': 'state', 'iss': 'op'}
class Application(object):
def __init__(self, acrs, clients, conf, userinfo, base, **extra_args):
self.acr_values = acrs
self.clients = clients
self.conf = conf
self.userinfo = userinfo
self.base = base
self.extra_args = extra_args
self.session = {}
def find_session(self, **kwargs):
_f = 0
_n = 0
for _ses in self.session.values():
for key, vals in kwargs.items():
try:
_val = _ses[KEY_MAP[key]]
except KeyError:
pass
else:
_n += 1
if _val in vals:
_f += 1
if _f and _f == _n:
return _ses
return None
def init_client(self, client, session, query, environ, start_response):
client.get_userinfo = self.userinfo
try:
client.resource_server = session['resource_server']
except KeyError:
pass
try:
session['response_format'] = query["response_format"][0]
except KeyError:
session['response_format'] = 'html'
session["op"] = client.provider_info["issuer"]
try:
resp = client.create_authn_request(session, self.acr_values)
except Exception as err:
logging.error(err)
raise
else:
return resp(environ, start_response)
def application(self, environ, start_response):
b_session = environ['beaker.session']
jlog = JLog(LOGGER, b_session.id)
path = environ.get('PATH_INFO', '').lstrip('/')
try:
jlog.info({'cookie': environ['HTTP_COOKIE'].split(';'),
'path': path})
except KeyError:
jlog.info({'path': path})
if path == "robots.txt":
return static(environ, start_response, LOGGER, "static/robots.txt")
elif path.startswith("static/"):
return static(environ, start_response, LOGGER, path)
elif '/static/' in path:
pre, post = path.split('static')
return static(environ, start_response, LOGGER, 'static' + post)
query = parse_qs(environ["QUERY_STRING"])
try:
session = b_session['session_info']
except KeyError:
session = self.find_session(**query)
if session:
b_session['session_info'] = session
else:
session = {}
b_session['session_info'] = session
self.session[b_session.id] = session
if path == '':
if 'access_token' not in session:
return opchoice(environ, start_response, self.clients)
else:
client = self.clients[session["op"]]
# check_session_iframe_url = None
try:
# check_session_iframe_url = client.provider_info[
# "check_session_iframe"]
session["session_management"] = {
"session_state": query["session_state"][0],
"client_id": client.client_id,
"issuer": client.provider_info["issuer"]
}
except KeyError:
pass
kwargs = dict(
[(p, session[p]) for p in
['id_token', 'userinfo', 'user_id'] if
p in session])
return opresult(environ, start_response, **kwargs)
elif path == "rp": # After having chosen which OP to authenticate at
if "uid" in query:
try:
client = self.clients.dynamic_client(userid=query["uid"][0])
except (ConnectionError, OIDCError) as err:
return operror(environ, start_response, '{}'.format(err))
elif 'issuer' in query:
try:
client = self.clients[query["issuer"][0]]
except (ConnectionError, OIDCError) as err:
return operror(environ, start_response, '{}'.format(err))
else:
client = self.clients[query["op"][0]]
return self.init_client(client, session, query, environ,
start_response)
elif path.endswith('authz_post'):
try:
_iss = session['op']
except KeyError:
jlog.error({'reason': 'No active session',
'remote_addr': environ['REMOTE_ADDR']})
return opchoice(environ, start_response, self.clients)
else:
client = self.clients[_iss]
query = parse_qs(get_post(environ))
try:
info = query["fragment"][0]
except KeyError:
return sorry_response(environ, start_response, self.base,
"missing fragment ?!")
if info == ['x']:
return sorry_response(environ, start_response, self.base,
"Expected fragment didn't get one ?!")
jlog.info({'fragment': info})
try:
result = client.callback(info, session, 'urlencoded')
if isinstance(result, SeeOther):
return result(environ, start_response)
except OIDCError as err:
return operror(environ, start_response, "%s" % err)
except Exception as err:
raise
else:
session.update(result)
res = SeeOther(self.conf['base_url'])
return res(environ, start_response)
elif path in self.clients.return_paths(): # After having
# authenticated at the OP
jlog.info({'query': query})
_client = None
for cli in self.clients.client.values():
if query['state'][0] in cli.authz_req:
_client = cli
break
if not _client:
jlog.error({
'reason': 'No active session',
'remote_addr': environ['REMOTE_ADDR'],
'state': query['state'][0]
})
return opchoice(environ, start_response, self.clients)
if 'error' in query: # something amiss
if query['error'][0] == 'access_denied': # Try reregistering
_iss = _client.provider_info['issuer']
del self.clients[_iss]
try:
client = self.clients[_iss]
except (ConnectionError, OIDCError) as err:
return operror(environ, start_response,
'{}'.format(err))
return self.init_client(client, session, query, environ,
start_response)
try:
_iss = query['iss'][0]
except KeyError:
pass
else:
if _iss != _client.provider_info['issuer']:
jlog.error({'reason': 'Got response from wrong OP'})
return opchoice(environ, start_response, self.clients)
_response_type = _client.behaviour["response_type"]
try:
_response_mode = _client.authz_req[session['state']][
'response_mode']
except KeyError:
_response_mode = ''
jlog.info({
"response_type": _response_type,
"response_mode": _response_mode})
if _response_type and _response_type != "code":
# Fall through if it's a query response anyway
if query:
pass
elif _response_mode:
# form_post encoded
pass
else:
return opresult_fragment(environ, start_response)
try:
result = _client.callback(query, session)
if isinstance(result, SeeOther):
return result(environ, start_response)
except OIDCError as err:
return operror(environ, start_response, "%s" % err)
except Exception:
raise
else:
session.update(result)
res = SeeOther(self.conf['base_url'])
return res(environ, start_response)
elif path == "logout": # After the user has pressed the logout button
try:
_iss = session['op']
except KeyError:
jlog.error(
{'reason': 'No active session',
'remote_addr': environ['REMOTE_ADDR']})
return opchoice(environ, start_response, self.clients)
client = self.clients[_iss]
try:
del client.authz_req[session['state']]
except KeyError:
pass
logout_url = client.end_session_endpoint
try:
# Specify to which URL the OP should return the user after
# log out. That URL must be registered with the OP at client
# registration.
logout_url += "?" + urlencode(
{"post_logout_redirect_uri": client.registration_response[
"post_logout_redirect_uris"][0]})
except KeyError:
pass
else:
# If there is an ID token send it along as a id_token_hint
_idtoken = get_id_token(client, session)
if _idtoken:
logout_url += "&" + urlencode({
"id_token_hint": id_token_as_signed_jwt(client,
_idtoken,
"HS256")})
# Also append the ACR values
logout_url += "&" + urlencode({"acr_values": self.acr_values},
True)
session.delete()
resp = SeeOther(str(logout_url))
return resp(environ, start_response)
elif path == "logout_success": # post_logout_redirect_uri
return Response("Logout successful!")(environ, start_response)
elif path == "session_iframe": # session management
kwargs = session["session_management"]
resp = Response(mako_template="rp_session_iframe.mako",
template_lookup=LOOKUP)
return resp(environ, start_response,
session_change_url="{}session_change".format(
self.conf["base_url"]),
**kwargs)
elif path == "session_change":
try:
_iss = session['op']
except KeyError:
jlog.error({
'reason': 'No active session',
'remote_addr': environ['REMOTE_ADDR']})
return opchoice(environ, start_response, self.clients)
try:
client = self.clients[_iss]
except KeyError:
return Response("No valid session.")(environ, start_response)
kwargs = {"prompt": "none"}
# If there is an ID token send it along as a id_token_hint
idt = get_id_token(client, session)
if idt:
kwargs["id_token_hint"] = id_token_as_signed_jwt(client, idt,
"HS256")
resp = client.create_authn_request(session, self.acr_values,
**kwargs)
return resp(environ, start_response)
return opchoice(environ, start_response, self.clients)
if __name__ == '__main__':
from oic.utils.rp import OIDCClients
from oic.utils.rp import OIDCError
from beaker.middleware import SessionMiddleware
from cherrypy import wsgiserver
parser = argparse.ArgumentParser()
parser.add_argument(dest="config")
parser.add_argument("-p", default=8666, dest="port", help="port of the RP")
parser.add_argument("-b", dest="base_url", help="base url of the RP")
parser.add_argument('-k', dest='verify_ssl', action='store_false')
args = parser.parse_args()
_conf = importlib.import_module(args.config)
if args.base_url:
_conf.BASE = args.base_url
_base = "{base}:{port}/".format(base=_conf.BASE, port=args.port)
for _client, client_conf in _conf.CLIENTS.items():
if "client_registration" in client_conf:
client_reg = client_conf["client_registration"]
client_reg["redirect_uris"] = [
url.format(base=_conf.BASE) for url in
client_reg["redirect_uris"]]
session_opts = {
'session.type': 'memory',
'session.cookie_expires': True,
'session.auto': True,
'session.key': "{}.beaker.session.id".format(
urlparse(_conf.BASE).netloc.replace(":", "."))
}
try:
key_spec = _conf.KEY_SPECIFICATION
except AttributeError:
jwks_info = {}
else:
jwks, keyjar, kidd = build_keyjar(key_spec)
jwks_info = {
'jwks_uri': '{}static/jwks_uri.json'.format(_base),
'keyjar': keyjar,
'kid': kidd
}
f = open('static/jwks_uri.json', 'w')
f.write(json.dumps(jwks))
f.close()
try:
ctype = _conf.CLIENT_TYPE
except KeyError:
ctype = 'OIDC'
if ctype == 'OIDC':
_clients = OIDCClients(_conf, _base, jwks_info=jwks_info,
verify_ssl=args.verify_ssl)
else:
_clients = OAuthClients(_conf, _base, jwks_info=jwks_info,
verify_ssl=args.verify_ssl)
SERVER_ENV.update({"template_lookup": LOOKUP, "base_url": _base})
app_args = {'clients': _clients,
'acrs': _conf.ACR_VALUES,
'conf': SERVER_ENV,
'userinfo': _conf.USERINFO,
'base': _conf.BASE}
try:
app_args['resource_server'] = _conf.RESOURCE_SERVER
except AttributeError:
pass
_app = Application(**app_args)
SRV = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', int(args.port)), # nosec
SessionMiddleware(_app.application, session_opts))
if _conf.BASE.startswith("https"):
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
SRV.ssl_adapter = BuiltinSSLAdapter(_conf.SERVER_CERT, _conf.SERVER_KEY,
_conf.CERT_CHAIN)
extra = " using SSL/TLS"
else:
extra = ""
txt = "RP server starting listening on port:%s%s" % (args.port, extra)
LOGGER.info(txt)
print(txt)
try:
SRV.start()
except KeyboardInterrupt:
SRV.stop()
| 10,471 |
521 | <reponame>Fimbure/icebox-1<filename>third_party/virtualbox/src/VBox/Runtime/r3/darwin/rtProcInitExePath-darwin.cpp
/* $Id: rtProcInitExePath-darwin.cpp $ */
/** @file
* IPRT - rtProcInitName, Darwin.
*/
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*********************************************************************************************************************************
* Header Files *
*********************************************************************************************************************************/
#define LOG_GROUP RTLOGGROUP_PROCESS
#ifdef RT_OS_DARWIN
# include <mach-o/dyld.h>
#endif
#include <stdlib.h>
#include <limits.h>
#include <errno.h>
#include <iprt/string.h>
#include <iprt/assert.h>
#include <iprt/err.h>
#include <iprt/path.h>
#include "internal/process.h"
#include "internal/path.h"
DECLHIDDEN(int) rtProcInitExePath(char *pszPath, size_t cchPath)
{
/*
* Query the image name from the dynamic linker, convert and return it.
*/
const char *pszImageName = _dyld_get_image_name(0);
AssertReturn(pszImageName, VERR_INTERNAL_ERROR);
char szTmpPath[PATH_MAX + 1];
const char *psz = realpath(pszImageName, szTmpPath);
int rc;
if (psz)
rc = rtPathFromNativeCopy(pszPath, cchPath, szTmpPath, NULL);
else
rc = RTErrConvertFromErrno(errno);
AssertMsgRCReturn(rc, ("rc=%Rrc pszLink=\"%s\"\nhex: %.*Rhxs\n", rc, pszPath, strlen(pszImageName), pszPath), rc);
return VINF_SUCCESS;
}
| 909 |
880 | /******************************************************************************
* Copyright (c) 2009-2018, <NAME> and individual contributors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
package py4j.examples;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.FutureTask;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.logging.ConsoleHandler;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.net.ServerSocketFactory;
import javax.net.SocketFactory;
import py4j.ClientServer;
import py4j.GatewayServer;
public class MultiClientServer {
/**
* This Runnable for a Thread is designed to simulate the shared nature of a
* thread like the UI thread in an SWT application.
*/
public static class SharedRunnable implements Runnable {
private BlockingQueue<FutureTask<?>> queue = new LinkedBlockingQueue<FutureTask<?>>();
public void add(FutureTask<?> future) throws InterruptedException {
queue.put(future);
}
@Override
public void run() {
while (true) {
try {
queue.take().run();
} catch (InterruptedException e) {
break;
}
}
}
}
public static class EntryPoint {
private SharedRunnable sharedRunnable;
private int entryId;
private MultiClientServerGetThreadId pythonGetThreadId;
public EntryPoint(int entryId, SharedRunnable sharedRunnable) {
this.entryId = entryId;
this.sharedRunnable = sharedRunnable;
}
public void setPythonThreadIdGetter(MultiClientServerGetThreadId pythonGetThreadId) {
this.pythonGetThreadId = pythonGetThreadId;
}
public int getEntryId() {
return entryId;
}
public long getJavaThreadId() {
return Thread.currentThread().getId();
}
public long getSharedJavaThreadId() throws InterruptedException, ExecutionException {
FutureTask<Long> futureTask = new FutureTask<Long>(new Callable<Long>() {
@Override
public Long call() throws Exception {
return Thread.currentThread().getId();
}
});
sharedRunnable.add(futureTask);
return futureTask.get();
}
public long getPythonThreadId() {
return Long.parseLong(pythonGetThreadId.getThreadId());
}
public long getSharedPythonThreadId() throws Exception {
FutureTask<Long> futureTask = new FutureTask<Long>(new Callable<Long>() {
@Override
public Long call() throws Exception {
return Long.parseLong(pythonGetThreadId.getThreadId());
}
});
sharedRunnable.add(futureTask);
return futureTask.get();
}
public long getViaPythonJavaThreadId() {
return Long.parseLong(pythonGetThreadId.getJavaThreadId());
}
public long getSharedViaPythonJavaThreadId() throws Exception {
FutureTask<Long> futureTask = new FutureTask<Long>(new Callable<Long>() {
@Override
public Long call() throws Exception {
return Long.parseLong(pythonGetThreadId.getJavaThreadId());
}
});
sharedRunnable.add(futureTask);
return futureTask.get();
}
}
public static void main(String[] args) {
// GatewayServer.turnAllLoggingOn();
Logger logger = Logger.getLogger("py4j");
logger.setLevel(Level.ALL);
ConsoleHandler handler = new ConsoleHandler();
handler.setLevel(Level.FINEST);
logger.addHandler(handler);
System.out.println("Starting");
SharedRunnable sharedRunnable = new SharedRunnable();
Thread thread = new Thread(sharedRunnable, "SharedRunnable");
thread.setDaemon(true);
thread.start();
EntryPoint entryPoint0 = new EntryPoint(0, sharedRunnable);
ClientServer clientServer0 = new ClientServer(entryPoint0);
// Wait for Python side to shut down Java side
clientServer0.startServer(true);
// TODO: Refactor with Py4J Pull 204
// Start the second client server on default + 10 port, the rest of the
// arguments are the same
EntryPoint entryPoint1 = new EntryPoint(1, sharedRunnable);
ClientServer clientServer1 = new ClientServer(GatewayServer.DEFAULT_PORT + 2, GatewayServer.defaultAddress(),
GatewayServer.DEFAULT_PYTHON_PORT + 2, GatewayServer.defaultAddress(),
GatewayServer.DEFAULT_CONNECT_TIMEOUT, GatewayServer.DEFAULT_READ_TIMEOUT,
ServerSocketFactory.getDefault(), SocketFactory.getDefault(), entryPoint1);
// Wait for Python side to shut down Java side
clientServer1.startServer(true);
// Shut down after 5 seconds
// clientServer.startServer(true);
// try {
// Thread.currentThread().sleep(5000);
// } catch (Exception e) {
// e.printStackTrace();
// }
// clientServer.shutdown();
//
// System.out.println("Stopping");
}
}
| 1,923 |
3,156 | import unittest
from dbt.exceptions import ConnectionException
from dbt.clients.registry import _get_with_retries
class testRegistryGetRequestException(unittest.TestCase):
def test_registry_request_error_catching(self):
# using non routable IP to test connection error logic in the _get_with_retries function
self.assertRaises(ConnectionException, _get_with_retries, '', 'http://0.0.0.0')
| 136 |
312 | <reponame>leoyey/reveno
package org.reveno.atp.acceptance.tests;
import org.junit.Assert;
import org.junit.Test;
import org.reveno.atp.acceptance.api.commands.CreateNewAccountCommand;
import org.reveno.atp.acceptance.api.commands.NewOrderCommand;
import org.reveno.atp.acceptance.api.events.AccountCreatedEvent;
import org.reveno.atp.acceptance.api.events.OrderCreatedEvent;
import org.reveno.atp.acceptance.api.transactions.AcceptOrder;
import org.reveno.atp.acceptance.api.transactions.CreateAccount;
import org.reveno.atp.acceptance.api.transactions.Credit;
import org.reveno.atp.acceptance.api.transactions.Debit;
import org.reveno.atp.acceptance.handlers.RollbackTransactions;
import org.reveno.atp.acceptance.handlers.Transactions;
import org.reveno.atp.acceptance.model.Account;
import org.reveno.atp.acceptance.model.Order.OrderType;
import org.reveno.atp.acceptance.views.AccountView;
import org.reveno.atp.acceptance.views.OrderView;
import org.reveno.atp.api.Configuration.ModelType;
import org.reveno.atp.api.Configuration.MutableModelFailover;
import org.reveno.atp.api.Reveno;
import org.reveno.atp.api.commands.EmptyResult;
import org.reveno.atp.api.domain.Repository;
import java.util.Arrays;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.stream.IntStream;
public class Tests extends RevenoBaseTest {
@Test
public void testBasic() throws Exception {
Reveno reveno = createEngine();
reveno.startup();
Waiter accountCreatedEvent = listenFor(reveno, AccountCreatedEvent.class);
Waiter orderCreatedEvent = listenFor(reveno, OrderCreatedEvent.class);
long accountId = sendCommandSync(reveno, new CreateNewAccountCommand("USD", 1000_000L));
AccountView accountView = reveno.query().find(AccountView.class, accountId);
Assert.assertTrue(accountCreatedEvent.isArrived());
Assert.assertEquals(accountId, accountView.accountId);
Assert.assertEquals("USD", accountView.currency);
Assert.assertEquals(1000_000L, accountView.balance);
Assert.assertEquals(0, accountView.orders().size());
long orderId = sendCommandSync(reveno, new NewOrderCommand(accountId, null, "EUR/USD", 134000, 1000, OrderType.MARKET));
OrderView orderView = reveno.query().find(OrderView.class, orderId);
accountView = reveno.query().find(AccountView.class, accountId);
Assert.assertTrue(orderCreatedEvent.isArrived());
Assert.assertEquals(orderId, orderView.id);
Assert.assertEquals(1, accountView.orders().size());
reveno.shutdown();
}
@Test
public void testAsyncHandlers() throws Exception {
Reveno reveno = createEngine();
reveno.startup();
Waiter accountCreatedEvent = listenAsyncFor(reveno, AccountCreatedEvent.class, 1_000);
sendCommandsBatch(reveno, new CreateNewAccountCommand("USD", 1000_000L), 1_000);
Assert.assertTrue(accountCreatedEvent.isArrived());
reveno.shutdown();
}
@Test
public void testExceptionalEventHandler() throws Exception {
Reveno reveno = createEngine();
reveno.startup();
Waiter w = listenFor(reveno, AccountCreatedEvent.class, 1_000, (c) -> {
if (c == 500 || c == 600 || c == 601) {
throw new RuntimeException();
}
});
sendCommandsBatch(reveno, new CreateNewAccountCommand("USD", 1000_000L), 1_000);
// it's just fine since on exception we still processing
// but such events won't be committed
Assert.assertTrue(w.isArrived());
reveno.shutdown();
reveno = createEngine();
w = listenFor(reveno, AccountCreatedEvent.class, 4);
reveno.startup();
// after restart we expect that there will be 3 replayed
// events - the count of exceptions
Assert.assertFalse(w.isArrived(1));
Assert.assertEquals(1, w.getCount());
reveno.shutdown();
}
@Test
public void testExceptionalAsyncEventHandler() throws Exception {
TestRevenoEngine reveno = createEngine();
reveno.events().asyncEventExecutors(10);
reveno.startup();
Waiter w = listenAsyncFor(reveno, AccountCreatedEvent.class, 1_000, (c) -> {
if (c == 500 || c == 600 || c == 601) {
throw new RuntimeException();
}
});
sendCommandsBatch(reveno, new CreateNewAccountCommand("USD", 1000_000L), 1_000);
Assert.assertTrue(w.isArrived(5));
reveno.syncAll();
reveno.shutdown();
reveno = createEngine();
w = listenFor(reveno, AccountCreatedEvent.class, 4);
reveno.startup();
Assert.assertFalse(w.isArrived(1));
Assert.assertEquals(1, w.getCount());
reveno.shutdown();
}
@Test
public void testBatch() throws Exception {
Reveno reveno = createEngine();
Waiter accountsWaiter = listenFor(reveno, AccountCreatedEvent.class, 10_000);
Waiter ordersWaiter = listenFor(reveno, OrderCreatedEvent.class, 10_000);
reveno.startup();
generateAndSendCommands(reveno, 10_000);
Assert.assertEquals(10_000, reveno.query().select(AccountView.class).size());
Assert.assertEquals(10_000, reveno.query().select(OrderView.class).size());
Assert.assertTrue(accountsWaiter.isArrived());
Assert.assertTrue(ordersWaiter.isArrived());
reveno.shutdown();
}
@Test
public void testReplay() throws Exception {
testBasic();
Reveno reveno = createEngine();
Waiter accountCreatedEvent = listenFor(reveno, AccountCreatedEvent.class);
Waiter orderCreatedEvent = listenFor(reveno, OrderCreatedEvent.class);
reveno.startup();
Assert.assertFalse(accountCreatedEvent.isArrived(1));
Assert.assertFalse(orderCreatedEvent.isArrived(1));
Assert.assertEquals(1, reveno.query().select(AccountView.class).size());
Assert.assertEquals(1, reveno.query().select(OrderView.class).size());
reveno.shutdown();
}
@Test
public void testBatchReplay() throws Exception {
testBatch();
Reveno reveno = createEngine();
Waiter accountsWaiter = listenFor(reveno, AccountCreatedEvent.class, 1);
Waiter ordersWaiter = listenFor(reveno, OrderCreatedEvent.class, 1);
reveno.startup();
Assert.assertEquals(10_000, reveno.query().select(AccountView.class).size());
Assert.assertEquals(10_000, reveno.query().select(OrderView.class).size());
Assert.assertFalse(accountsWaiter.isArrived(1));
Assert.assertFalse(ordersWaiter.isArrived(1));
long accountId = sendCommandSync(reveno, new CreateNewAccountCommand("USD", 1000_000L));
Assert.assertEquals(10_001, accountId);
long orderId = sendCommandSync(reveno, new NewOrderCommand(accountId, null, "EUR/USD", 134000, 1000, OrderType.MARKET));
Assert.assertEquals(10_001, orderId);
reveno.shutdown();
}
@Test
public void testParallelRolling() throws Exception {
final boolean[] stop = {false};
AtomicLong counter = new AtomicLong(0);
ExecutorService transactionExecutor = Executors.newFixedThreadPool(10);
TestRevenoEngine reveno = createEngine();
reveno.startup();
IntStream.range(0, 10).forEach(i -> transactionExecutor.submit(() -> {
while (!stop[0]) {
counter.incrementAndGet();
try {
sendCommandSync(reveno, new CreateNewAccountCommand("USD", 1000_000L));
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
}));
IntStream.range(0, 20).forEach(i -> {
Waiter w = new Waiter(1);
reveno.roll(w::countDown);
w.awaitSilent();
sleep(200);
});
stop[0] = true;
sleep(10);
transactionExecutor.shutdown();
reveno.shutdown();
Reveno revenoRestarted = createEngine();
Waiter accountCreatedEvent = listenFor(reveno, AccountCreatedEvent.class);
revenoRestarted.startup();
Assert.assertFalse(accountCreatedEvent.isArrived(1));
Assert.assertEquals(counter.get(), reveno.query().select(AccountView.class).size());
revenoRestarted.shutdown();
}
@Test
public void testTransactionWithCompensatingActions() throws Exception {
if (modelType != ModelType.MUTABLE) {
return;
}
class TestTx {
}
class TestCmd {
}
Repository[] repo = new Repository[1];
Consumer<TestRevenoEngine> consumer = r -> {
r.config().mutableModel().mutableModelFailover(MutableModelFailover.COMPENSATING_ACTIONS);
r.domain().transactionWithCompensatingAction(CreateAccount.class, Transactions::createAccount, RollbackTransactions::rollbackCreateAccount);
r.domain().transactionWithCompensatingAction(AcceptOrder.class, Transactions::acceptOrder, RollbackTransactions::rollbackAcceptOrder);
r.domain().transactionWithCompensatingAction(Credit.class, Transactions::credit, RollbackTransactions::rollbackCredit);
r.domain().transactionWithCompensatingAction(Debit.class, Transactions::debit, RollbackTransactions::rollbackDebit);
r.domain().command(TestCmd.class, (c, d) -> d.executeTxAction(new TestTx()));
r.domain().transactionAction(TestTx.class, (a, b) -> {
repo[0] = b.repo();
throw new RuntimeException();
});
};
Reveno reveno = createEngine(consumer);
reveno.startup();
long accountId = sendCommandSync(reveno, new CreateNewAccountCommand("USD", 1000));
Future<EmptyResult> f = reveno.performCommands(Arrays.asList(new Credit(accountId, 15, 0), new Debit(accountId, 8),
new NewOrderCommand(accountId, null, "EUR/USD", 134000, 1, OrderType.MARKET), new TestCmd()));
Assert.assertFalse(f.get().isSuccess());
Assert.assertEquals(RuntimeException.class, f.get().getException().getClass());
Assert.assertEquals(1000, repo[0].get(Account.class, accountId).balance());
Assert.assertEquals(1000, reveno.query().find(AccountView.class, accountId).balance);
reveno.shutdown();
reveno = createEngine(consumer);
reveno.startup();
Assert.assertEquals(1000, reveno.query().find(AccountView.class, accountId).balance);
reveno.shutdown();
}
}
| 4,398 |
313 | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.common.util.rx;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import rx.Observable;
import rx.Producer;
import rx.Scheduler;
import rx.Scheduler.Worker;
import rx.Subscriber;
import rx.Subscription;
import rx.internal.producers.ProducerArbiter;
import rx.observers.SerializedSubscriber;
/**
* Apply a timeout for an entire subscription of a source {@link Observable}. Downstream subscribers will receive an
* {@link Subscriber#onError(Throwable) onError} event with a {@link TimeoutException} if a timeout happens before the
* source {@link Observable} completes.
* <p>
* The implementation is inspired by {@link rx.internal.operators.OnSubscribeTimeoutTimedWithFallback}, but it applies a
* timeout to the entire subscription, rather than between each {@link Subscriber#onNext(Object) onNext emission}.
* <p>
* All onNext, onError and onCompleted calls from the source {@link Observable} will be serialized to prevent any races,
* otherwise the {@link TimeoutException} generated internally would race with external events.
*
* @see rx.internal.operators.OnSubscribeTimeoutTimedWithFallback
* @see Observable#timeout(long, TimeUnit, Scheduler)
*/
class SubscriptionTimeout<T> implements Observable.Operator<T, T> {
private final Supplier<Long> timeout;
private final TimeUnit unit;
private final Scheduler scheduler;
SubscriptionTimeout(Supplier<Long> timeout, TimeUnit unit, Scheduler scheduler) {
this.timeout = timeout;
this.unit = unit;
this.scheduler = scheduler;
}
@Override
public Subscriber<? super T> call(Subscriber<? super T> downstream) {
TimeoutSubscriber<T> upstream = new TimeoutSubscriber<T>(downstream, timeout.get(), unit);
downstream.add(upstream);
downstream.setProducer(upstream.arbiter);
// prevent all races and serialize onNext, onError, and onCompleted calls
final Worker worker = scheduler.createWorker();
final SerializedSubscriber<T> safeUpstream = new SerializedSubscriber<>(upstream, true);
upstream.add(worker);
Subscription task = worker.schedule(() -> safeUpstream.onError(new TimeoutException()), timeout.get(), unit);
upstream.add(task);
return safeUpstream;
}
private static final class TimeoutSubscriber<T> extends Subscriber<T> {
private final Subscriber<? super T> actual;
private final ProducerArbiter arbiter;
private TimeoutSubscriber(Subscriber<? super T> actual, long timeout, TimeUnit unit) {
this.actual = actual;
this.arbiter = new ProducerArbiter();
}
@Override
public void onNext(T t) {
actual.onNext(t);
}
@Override
public void onError(Throwable e) {
unsubscribe();
actual.onError(e);
}
@Override
public void onCompleted() {
unsubscribe();
actual.onCompleted();
}
@Override
public void setProducer(Producer p) {
arbiter.setProducer(p);
}
}
}
| 1,317 |
3,200 | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include <vector>
#include "common/common_test.h"
#include "ir/anf.h"
#include "ir/dtype.h"
#include "frontend/operator/prim_to_function.h"
#include "base/core_ops.h"
namespace mindspore {
namespace prim {
class TestPrimFunc : public UT::Common {
public:
TestPrimFunc() {}
virtual void SetUp() {}
};
TEST_F(TestPrimFunc, ScalarAddTest) {
auto prim = std::make_shared<Primitive>(prim::kScalarAdd);
FunctionPtr func = nullptr;
PrimToFunction::GetInstance().GetFunction(prim, &func);
std::vector<std::shared_ptr<Type>> two_args{std::make_shared<Number>(), std::make_shared<Number>()};
std::shared_ptr<Type> retval = std::make_shared<Number>();
Function func_add = Function(two_args, retval);
std::cout << "func_add: " + func_add.ToString() << std::endl;
std::cout << "prim_func: " + func->ToString() << std::endl;
ASSERT_EQ(func_add.ToString(), func->ToString());
}
TEST_F(TestPrimFunc, ScalarExpTest) {
auto prim = std::make_shared<Primitive>("scalar_exp");
FunctionPtr func = nullptr;
PrimToFunction::GetInstance().GetFunction(prim, &func);
std::vector<std::shared_ptr<Type>> one_arg{std::make_shared<Number>()};
std::shared_ptr<Type> retval = std::make_shared<Number>();
Function func_add = Function(one_arg, retval);
std::cout << "func_exp: " + func_add.ToString() << std::endl;
std::cout << "prim_func: " + func->ToString() << std::endl;
ASSERT_EQ(func_add.ToString(), func->ToString());
}
} // namespace prim
} // namespace mindspore
| 718 |
17,703 | #include "source/common/tcp_proxy/upstream.h"
#include "envoy/upstream/cluster_manager.h"
#include "source/common/http/codec_client.h"
#include "source/common/http/codes.h"
#include "source/common/http/header_map_impl.h"
#include "source/common/http/headers.h"
#include "source/common/http/utility.h"
#include "source/common/runtime/runtime_features.h"
namespace Envoy {
namespace TcpProxy {
using TunnelingConfig =
envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy_TunnelingConfig;
TcpUpstream::TcpUpstream(Tcp::ConnectionPool::ConnectionDataPtr&& data,
Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks)
: upstream_conn_data_(std::move(data)) {
Network::ClientConnection& connection = upstream_conn_data_->connection();
connection.enableHalfClose(true);
upstream_conn_data_->addUpstreamCallbacks(upstream_callbacks);
}
bool TcpUpstream::readDisable(bool disable) {
if (upstream_conn_data_ == nullptr ||
upstream_conn_data_->connection().state() != Network::Connection::State::Open) {
// Because we flush write downstream, we can have a case where upstream has already disconnected
// and we are waiting to flush. If we had a watermark event during this time we should no
// longer touch the upstream connection.
return false;
}
upstream_conn_data_->connection().readDisable(disable);
return true;
}
void TcpUpstream::encodeData(Buffer::Instance& data, bool end_stream) {
upstream_conn_data_->connection().write(data, end_stream);
}
void TcpUpstream::addBytesSentCallback(Network::Connection::BytesSentCb cb) {
upstream_conn_data_->connection().addBytesSentCallback(cb);
}
Tcp::ConnectionPool::ConnectionData*
TcpUpstream::onDownstreamEvent(Network::ConnectionEvent event) {
if (event == Network::ConnectionEvent::RemoteClose) {
// The close call may result in this object being deleted. Latch the
// connection locally so it can be returned for potential draining.
auto* conn_data = upstream_conn_data_.release();
conn_data->connection().close(Network::ConnectionCloseType::FlushWrite);
return conn_data;
} else if (event == Network::ConnectionEvent::LocalClose) {
upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush);
}
return nullptr;
}
HttpUpstream::HttpUpstream(Tcp::ConnectionPool::UpstreamCallbacks& callbacks,
const TunnelingConfig& config,
const StreamInfo::StreamInfo& downstream_info)
: config_(config), downstream_info_(downstream_info), response_decoder_(*this),
upstream_callbacks_(callbacks) {
header_parser_ = Envoy::Router::HeaderParser::configure(config_.headers_to_add());
}
HttpUpstream::~HttpUpstream() { resetEncoder(Network::ConnectionEvent::LocalClose); }
bool HttpUpstream::readDisable(bool disable) {
if (!request_encoder_) {
return false;
}
request_encoder_->getStream().readDisable(disable);
return true;
}
void HttpUpstream::encodeData(Buffer::Instance& data, bool end_stream) {
if (!request_encoder_) {
return;
}
request_encoder_->encodeData(data, end_stream);
if (end_stream) {
doneWriting();
}
}
void HttpUpstream::addBytesSentCallback(Network::Connection::BytesSentCb) {
// The HTTP tunneling mode does not tickle the idle timeout when bytes are
// sent to the kernel.
// This can be implemented if any user cares about the difference in time
// between it being sent to the HTTP/2 stack and out to the kernel.
}
Tcp::ConnectionPool::ConnectionData*
HttpUpstream::onDownstreamEvent(Network::ConnectionEvent event) {
if (event != Network::ConnectionEvent::Connected) {
resetEncoder(Network::ConnectionEvent::LocalClose, false);
}
return nullptr;
}
void HttpUpstream::onResetStream(Http::StreamResetReason, absl::string_view) {
read_half_closed_ = true;
write_half_closed_ = true;
resetEncoder(Network::ConnectionEvent::LocalClose);
}
void HttpUpstream::onAboveWriteBufferHighWatermark() {
upstream_callbacks_.onAboveWriteBufferHighWatermark();
}
void HttpUpstream::onBelowWriteBufferLowWatermark() {
upstream_callbacks_.onBelowWriteBufferLowWatermark();
}
void HttpUpstream::resetEncoder(Network::ConnectionEvent event, bool inform_downstream) {
if (!request_encoder_) {
return;
}
request_encoder_->getStream().removeCallbacks(*this);
if (!write_half_closed_ || !read_half_closed_) {
request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset);
}
request_encoder_ = nullptr;
// If we did not receive a valid CONNECT response yet we treat this as a pool
// failure, otherwise we forward the event downstream.
if (conn_pool_callbacks_ != nullptr) {
conn_pool_callbacks_->onFailure();
return;
}
if (inform_downstream) {
upstream_callbacks_.onEvent(event);
}
}
void HttpUpstream::doneReading() {
read_half_closed_ = true;
if (write_half_closed_) {
resetEncoder(Network::ConnectionEvent::LocalClose);
}
}
void HttpUpstream::doneWriting() {
write_half_closed_ = true;
if (read_half_closed_) {
resetEncoder(Network::ConnectionEvent::LocalClose);
}
}
TcpConnPool::TcpConnPool(Upstream::ThreadLocalCluster& thread_local_cluster,
Upstream::LoadBalancerContext* context,
Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks)
: upstream_callbacks_(upstream_callbacks) {
conn_pool_data_ = thread_local_cluster.tcpConnPool(Upstream::ResourcePriority::Default, context);
}
TcpConnPool::~TcpConnPool() {
if (upstream_handle_ != nullptr) {
upstream_handle_->cancel(ConnectionPool::CancelPolicy::CloseExcess);
}
}
void TcpConnPool::newStream(GenericConnectionPoolCallbacks& callbacks) {
callbacks_ = &callbacks;
// Given this function is reentrant, make sure we only reset the upstream_handle_ if given a
// valid connection handle. If newConnection fails inline it may result in attempting to
// select a new host, and a recursive call to initializeUpstreamConnection. In this case the
// first call to newConnection will return null and the inner call will persist.
Tcp::ConnectionPool::Cancellable* handle = conn_pool_data_.value().newConnection(*this);
if (handle) {
ASSERT(upstream_handle_ == nullptr);
upstream_handle_ = handle;
}
}
void TcpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view,
Upstream::HostDescriptionConstSharedPtr host) {
upstream_handle_ = nullptr;
callbacks_->onGenericPoolFailure(reason, host);
}
void TcpConnPool::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data,
Upstream::HostDescriptionConstSharedPtr host) {
upstream_handle_ = nullptr;
Tcp::ConnectionPool::ConnectionData* latched_data = conn_data.get();
Network::Connection& connection = conn_data->connection();
auto upstream = std::make_unique<TcpUpstream>(std::move(conn_data), upstream_callbacks_);
callbacks_->onGenericPoolReady(
&connection.streamInfo(), std::move(upstream), host,
latched_data->connection().connectionInfoProvider().localAddress(),
latched_data->connection().streamInfo().downstreamAddressProvider().sslConnection());
}
HttpConnPool::HttpConnPool(Upstream::ThreadLocalCluster& thread_local_cluster,
Upstream::LoadBalancerContext* context, const TunnelingConfig& config,
Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks,
Http::CodecType type)
: config_(config), type_(type), upstream_callbacks_(upstream_callbacks),
downstream_info_(context->downstreamConnection()->streamInfo()) {
absl::optional<Http::Protocol> protocol;
if (type_ == Http::CodecType::HTTP3) {
protocol = Http::Protocol::Http3;
} else if (type_ == Http::CodecType::HTTP2) {
protocol = Http::Protocol::Http2;
}
conn_pool_data_ =
thread_local_cluster.httpConnPool(Upstream::ResourcePriority::Default, protocol, context);
}
HttpConnPool::~HttpConnPool() {
if (upstream_handle_ != nullptr) {
// Because HTTP connections are generally shorter lived and have a higher probability of use
// before going idle, they are closed with Default rather than CloseExcess.
upstream_handle_->cancel(ConnectionPool::CancelPolicy::Default);
}
}
void HttpConnPool::newStream(GenericConnectionPoolCallbacks& callbacks) {
callbacks_ = &callbacks;
if (type_ == Http::CodecType::HTTP1) {
upstream_ = std::make_unique<Http1Upstream>(upstream_callbacks_, config_, downstream_info_);
} else {
upstream_ = std::make_unique<Http2Upstream>(upstream_callbacks_, config_, downstream_info_);
}
Tcp::ConnectionPool::Cancellable* handle =
conn_pool_data_.value().newStream(upstream_->responseDecoder(), *this);
if (handle != nullptr) {
upstream_handle_ = handle;
}
}
void HttpConnPool::onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view,
Upstream::HostDescriptionConstSharedPtr host) {
upstream_handle_ = nullptr;
callbacks_->onGenericPoolFailure(reason, host);
}
void HttpConnPool::onPoolReady(Http::RequestEncoder& request_encoder,
Upstream::HostDescriptionConstSharedPtr host,
const StreamInfo::StreamInfo& info, absl::optional<Http::Protocol>) {
upstream_handle_ = nullptr;
upstream_->setRequestEncoder(request_encoder,
host->transportSocketFactory().implementsSecureTransport());
upstream_->setConnPoolCallbacks(std::make_unique<HttpConnPool::Callbacks>(
*this, host, info.downstreamAddressProvider().sslConnection()));
}
void HttpConnPool::onGenericPoolReady(Upstream::HostDescriptionConstSharedPtr& host,
const Network::Address::InstanceConstSharedPtr& local_address,
Ssl::ConnectionInfoConstSharedPtr ssl_info) {
callbacks_->onGenericPoolReady(nullptr, std::move(upstream_), host, local_address, ssl_info);
}
Http2Upstream::Http2Upstream(Tcp::ConnectionPool::UpstreamCallbacks& callbacks,
const TunnelingConfig& config,
const StreamInfo::StreamInfo& downstream_info)
: HttpUpstream(callbacks, config, downstream_info) {}
bool Http2Upstream::isValidResponse(const Http::ResponseHeaderMap& headers) {
if (Http::Utility::getResponseStatus(headers) != 200) {
return false;
}
return true;
}
void Http2Upstream::setRequestEncoder(Http::RequestEncoder& request_encoder, bool is_ssl) {
request_encoder_ = &request_encoder;
request_encoder_->getStream().addCallbacks(*this);
const std::string& scheme =
is_ssl ? Http::Headers::get().SchemeValues.Https : Http::Headers::get().SchemeValues.Http;
auto headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>({
{Http::Headers::get().Method, config_.use_post() ? "POST" : "CONNECT"},
{Http::Headers::get().Host, config_.hostname()},
{Http::Headers::get().Path, "/"},
{Http::Headers::get().Scheme, scheme},
});
if (!config_.use_post()) {
headers->addReference(Http::Headers::get().Protocol,
Http::Headers::get().ProtocolValues.Bytestream);
}
header_parser_->evaluateHeaders(*headers, downstream_info_);
const auto status = request_encoder_->encodeHeaders(*headers, false);
// Encoding can only fail on missing required request headers.
ASSERT(status.ok());
}
Http1Upstream::Http1Upstream(Tcp::ConnectionPool::UpstreamCallbacks& callbacks,
const TunnelingConfig& config,
const StreamInfo::StreamInfo& downstream_info)
: HttpUpstream(callbacks, config, downstream_info) {}
void Http1Upstream::setRequestEncoder(Http::RequestEncoder& request_encoder, bool) {
request_encoder_ = &request_encoder;
request_encoder_->getStream().addCallbacks(*this);
request_encoder_->enableTcpTunneling();
ASSERT(request_encoder_->http1StreamEncoderOptions() != absl::nullopt);
auto headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>({
{Http::Headers::get().Method, config_.use_post() ? "POST" : "CONNECT"},
{Http::Headers::get().Host, config_.hostname()},
});
if (config_.use_post()) {
// Path is required for POST requests.
headers->addReference(Http::Headers::get().Path, "/");
}
header_parser_->evaluateHeaders(*headers, downstream_info_);
const auto status = request_encoder_->encodeHeaders(*headers, false);
// Encoding can only fail on missing required request headers.
ASSERT(status.ok());
}
bool Http1Upstream::isValidResponse(const Http::ResponseHeaderMap& headers) {
// According to RFC7231 any 2xx response indicates that the connection is
// established.
// Any 'Content-Length' or 'Transfer-Encoding' header fields MUST be ignored.
// https://tools.ietf.org/html/rfc7231#section-4.3.6
return Http::CodeUtility::is2xx(Http::Utility::getResponseStatus(headers));
}
void Http1Upstream::encodeData(Buffer::Instance& data, bool end_stream) {
if (!request_encoder_) {
return;
}
request_encoder_->encodeData(data, end_stream);
}
} // namespace TcpProxy
} // namespace Envoy
| 4,660 |
881 | <reponame>faizol/restinio
template< typename C >
const C * is_base64_char_lut()
{
static constexpr C table[] = {
// 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// | + | | / |
// 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1,
// |................ digits .................................| | = |
// 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
// |.....................................ALPHA...............................................|
// 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
// |.............................ALPHA.............................|
// 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
// |.....................................alpha...............................................|
// 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
// |.............................alpha.............................|
// 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
// 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
return table;
}
template< typename C >
const C * base64_alphabet()
{
static constexpr C table[] = {
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d,
0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d,
0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x2b, 0x2f
};
return table;
}
template< typename C >
const C * base64_decode_lut()
{
static constexpr C table[] = {
// 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// | + | | / |
// 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 62, 0, 0, 0, 63,
// |................ digits .................................| | = |
// 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 0, 0, 0, 0, 0, 0,
// |.....................................ALPHA...............................................|
// 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
// |.............................ALPHA.............................|
// 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0,
// |.....................................alpha...............................................|
// 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
// |.............................alpha.............................|
// 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 0, 0, 0, 0, 0,
// 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
return table;
}
| 5,695 |
1,056 | <filename>ide/schema2beans/test/unit/data/TestDupInternalNames.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* TestDupInternalNames - test what happens when different complexTypes
* have the same named element declaration
* inside of them. Duplicate type names should
* get renamed.
*
* The following test assumes that we know the content of the
* graph as we get elements, add and change them. Therefore, the TestDupInternalNames.xml
* file and this java test should be kept in sync.
*
*/
import java.io.*;
import java.util.*;
import org.w3c.dom.*;
import java.math.*;
import root.*;
public class TestDupInternalNames extends BaseTest {
public static void main(String[] argv) {
TestDupInternalNames o = new TestDupInternalNames();
if (argv.length > 0)
o.setDocumentDir(argv[0]);
try {
o.run();
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
System.exit(0);
}
public void run() throws Exception {
Root dupInternalNames;
this.readDocument();
out("creating the bean graph");
dupInternalNames = Root.read(doc);
// Check that we can read the graph an it is complete
out("bean graph created");
dupInternalNames.write(out);
AnalogInput in = dupInternalNames.getIn();
in.setPointNumber(new BigInteger("5"));
AnalogOutput o = dupInternalNames.getOut();
o.setPointNumber(new BigInteger("89"));
dupInternalNames.write(out);
}
}
| 861 |
419 | <filename>thirdparty/win/miracl/miracl_osmt/source/cardano.cpp
/* Solving cubic x^3+AX+B using Cardano's formula */
/* cl /O2 /GX cardona.cpp zzn2.cpp zzn.cpp big.cpp miracl.lib */
#include <iostream>
#include <ctime>
#include "zzn2.h"
using namespace std;
Miracl precision(50,0);
//
// Shanks method modified to find cube roots
//
ZZn2 shanks(ZZn2 n)
{
int i,s;
Big q,p=get_modulus();
ZZn2 t,W,R,V;
BOOL inv;
if (pow(n,(p*p-1)/3)!=1)
{
// cout << "Not a cubic residue" << endl;
return (ZZn2)0;
}
W=randn2();
while (pow(W,(p*p-1)/3)==1) W=randn2();
s=0;
q=p*p-1;
while (q%3==0)
{
q/=3;
s++;
}
if ((q+1)%3==0)
{
R=pow(n,(q+1)/3);
inv=FALSE;
}
else
{
R=pow(n,(q-1)/3);
inv=TRUE;
}
V=pow(W,q);
forever
{
if (!inv) t=(R*R*R)/n;
else t=(R*R*R)*n;
for (i=0;;i++ )
{
if (t==1) break;
t=t*t*t;
}
if (i==0)
{
if (!inv) return R;
else return (ZZn2)1/R;
}
R=R*pow(V,pow((Big)3,s-i-1));
}
}
int main(int argc, char *argv[])
{
int i,j,lt,gt;
Big p;
ZZn x,A,B,D;
ZZn2 r,r3,r1,r2,CD,cu;
time_t seed;
time(&seed);
irand((long)seed);
//
// Generate a random prime, (not 1 mod 8)
//
cout << "Generate a random prime and a random cubic, and try to solve it!" << endl;
cout << "Solutions might be Complex" << endl << endl;
p=rand(80,2);
while (p%8==1 || !prime(p)) p+=1;
cout << "p= " << p << endl;
cout << "p%24= " << p%24 << endl;
modulo(p);
// Find a cube root of unity
do
{
cu=pow((ZZn2)randn2(),(p*p-1)/3);
} while(cu==1);
// cout << "cube root of unity= " << cu << endl;
A=(ZZn)rand(p);
B=(ZZn)rand(p);
// Generate random parameters
cout << "Finding a root of x^3+AX+B mod p, where" << endl;
cout << "A= " << A << endl;
cout << "B= " << B << endl;
// Cardona's formula
D=(B*B)/4 + (A*A*A)/27;
CD=sqrt((ZZn2)D); // Solution may be "complex"
r1=(ZZn2)-B/2+CD; r2=(ZZn2)-B/2-CD;
r1=shanks(r1); // cube roots
r2=shanks(r2);
if (r1==0 || r2==0)
{
cout << "No roots exist" << endl;
return 0;
}
// search for "right" r2
if (r1*r2!=-A/3)
r2*=cu;
if (r1*r2!=-A/3)
r2*=cu;
r=r1+r2;
cout << "root 1= " << r << endl;
if (r*r*r+A*r+B!=0) cout << "Check failed" << endl;
// try next value for r1
r1*=cu;
if (r1*r2!=-A/3)
r2*=cu;
if (r1*r2!=-A/3)
r2*=cu;
r=r1+r2;
cout << "root 2= " << r << endl;
if (r*r*r+A*r+B!=0) cout << "Check failed" << endl;
r1*=cu;
if (r1*r2!=-A/3)
r2*=cu;
if (r1*r2!=-A/3)
r2*=cu;
r=r1+r2;
cout << "root 3= " << r << endl;
if (r*r*r+A*r+B!=0) cout << "Check failed" << endl;
return 0;
}
| 1,747 |
305 | <filename>aat/core/order_book/base.py
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Tuple, Union
from .price_level import PriceLevelRO
from ..data import Order
from ...config import Side
class OrderBookBase(ABC):
@abstractmethod
def reset(self) -> None:
pass
@abstractmethod
def add(self, order: Order) -> None:
pass
@abstractmethod
def cancel(self, order: Order) -> None:
pass
@abstractmethod
def change(self, order: Order) -> None:
pass
@abstractmethod
def find(self, order: Order) -> Optional[Order]:
pass
@abstractmethod
def topOfBook(self) -> Dict[Side, PriceLevelRO]:
pass
@abstractmethod
def spread(self) -> float:
pass
@abstractmethod
def level(self, level: int = 0, price: float = None) -> Tuple:
pass
@abstractmethod
def levels(self, levels: int = 0) -> Dict[Side, List[PriceLevelRO]]:
pass
@abstractmethod
def bids(
self, levels: int = 0
) -> Union[PriceLevelRO, List[Optional[PriceLevelRO]]]:
pass
@abstractmethod
def asks(
self, levels: int = 0
) -> Union[PriceLevelRO, List[Optional[PriceLevelRO]]]:
pass
@abstractmethod
def __iter__(self) -> Iterator[Order]:
pass
| 546 |
587 | <filename>src/lib/OpenEXR/ImfRgbaFile.h
//
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) Contributors to the OpenEXR Project.
//
#ifndef INCLUDED_IMF_RGBA_FILE_H
#define INCLUDED_IMF_RGBA_FILE_H
//-----------------------------------------------------------------------------
//
// Simplified RGBA image I/O
//
// class RgbaOutputFile
// class RgbaInputFile
//
//-----------------------------------------------------------------------------
#include "ImfExport.h"
#include "ImfNamespace.h"
#include "ImfFrameBuffer.h"
#include "ImfHeader.h"
#include "ImfRgba.h"
#include "ImfThreading.h"
#include <ImathBox.h>
#include <ImathVec.h>
#include <half.h>
#include <string>
OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_ENTER
//-------------------------------------------------------
// Utility to compute the origin-based pointer address
//
// With large offsets for the data window, the naive code
// can wrap around, especially on 32-bit machines.
// This can be used to avoid that
//-------------------------------------------------------
inline const Rgba*
ComputeBasePointer (
const Rgba* ptr,
const IMATH_NAMESPACE::V2i& origin,
int64_t w,
size_t xStride = 1,
size_t yStride = 0)
{
if (yStride == 0) yStride = w;
int64_t offx = static_cast<int64_t> (origin.x);
offx *= xStride;
int64_t offy = static_cast<int64_t> (origin.y);
offy *= yStride;
return ptr - offx - offy;
}
inline const Rgba*
ComputeBasePointer (const Rgba* ptr, const IMATH_NAMESPACE::Box2i& dataWindow)
{
return ComputeBasePointer (
ptr,
dataWindow.min,
static_cast<int64_t> (dataWindow.max.x) -
static_cast<int64_t> (dataWindow.min.x) + 1);
}
inline Rgba*
ComputeBasePointer (
Rgba* ptr,
const IMATH_NAMESPACE::V2i& origin,
int64_t w,
size_t xStride = 1,
size_t yStride = 0)
{
if (yStride == 0) yStride = w;
int64_t offx = static_cast<int64_t> (origin.x);
offx *= xStride;
int64_t offy = static_cast<int64_t> (origin.y);
offy *= yStride;
return ptr - offx - offy;
}
inline Rgba*
ComputeBasePointer (Rgba* ptr, const IMATH_NAMESPACE::Box2i& dataWindow)
{
return ComputeBasePointer (
ptr,
dataWindow.min,
static_cast<int64_t> (dataWindow.max.x) -
static_cast<int64_t> (dataWindow.min.x) + 1);
}
//
// RGBA output file.
//
class IMF_EXPORT_TYPE RgbaOutputFile
{
public:
//---------------------------------------------------
// Constructor -- header is constructed by the caller
//---------------------------------------------------
IMF_EXPORT
RgbaOutputFile (
const char name[],
const Header& header,
RgbaChannels rgbaChannels = WRITE_RGBA,
int numThreads = globalThreadCount ());
//----------------------------------------------------
// Constructor -- header is constructed by the caller,
// file is opened by the caller, destructor will not
// automatically close the file.
//----------------------------------------------------
IMF_EXPORT
RgbaOutputFile (
OPENEXR_IMF_INTERNAL_NAMESPACE::OStream& os,
const Header& header,
RgbaChannels rgbaChannels = WRITE_RGBA,
int numThreads = globalThreadCount ());
//----------------------------------------------------------------
// Constructor -- header data are explicitly specified as function
// call arguments (empty dataWindow means "same as displayWindow")
//----------------------------------------------------------------
IMF_EXPORT
RgbaOutputFile (
const char name[],
const IMATH_NAMESPACE::Box2i& displayWindow,
const IMATH_NAMESPACE::Box2i& dataWindow = IMATH_NAMESPACE::Box2i (),
RgbaChannels rgbaChannels = WRITE_RGBA,
float pixelAspectRatio = 1,
const IMATH_NAMESPACE::V2f screenWindowCenter =
IMATH_NAMESPACE::V2f (0, 0),
float screenWindowWidth = 1,
LineOrder lineOrder = INCREASING_Y,
Compression compression = PIZ_COMPRESSION,
int numThreads = globalThreadCount ());
//-----------------------------------------------
// Constructor -- like the previous one, but both
// the display window and the data window are
// Box2i (V2i (0, 0), V2i (width - 1, height -1))
//-----------------------------------------------
IMF_EXPORT
RgbaOutputFile (
const char name[],
int width,
int height,
RgbaChannels rgbaChannels = WRITE_RGBA,
float pixelAspectRatio = 1,
const IMATH_NAMESPACE::V2f screenWindowCenter =
IMATH_NAMESPACE::V2f (0, 0),
float screenWindowWidth = 1,
LineOrder lineOrder = INCREASING_Y,
Compression compression = PIZ_COMPRESSION,
int numThreads = globalThreadCount ());
//-----------
// Destructor
//-----------
IMF_EXPORT
virtual ~RgbaOutputFile ();
//------------------------------------------------
// Define a frame buffer as the pixel data source:
// Pixel (x, y) is at address
//
// base + x * xStride + y * yStride
//
//------------------------------------------------
IMF_EXPORT
void setFrameBuffer (const Rgba* base, size_t xStride, size_t yStride);
//---------------------------------------------
// Write pixel data (see class Imf::OutputFile)
//---------------------------------------------
IMF_EXPORT
void writePixels (int numScanLines = 1);
IMF_EXPORT
int currentScanLine () const;
//--------------------------
// Access to the file header
//--------------------------
IMF_EXPORT
const Header& header () const;
IMF_EXPORT
const FrameBuffer& frameBuffer () const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& displayWindow () const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& dataWindow () const;
IMF_EXPORT
float pixelAspectRatio () const;
IMF_EXPORT
const IMATH_NAMESPACE::V2f screenWindowCenter () const;
IMF_EXPORT
float screenWindowWidth () const;
IMF_EXPORT
LineOrder lineOrder () const;
IMF_EXPORT
Compression compression () const;
IMF_EXPORT
RgbaChannels channels () const;
// --------------------------------------------------------------------
// Update the preview image (see Imf::OutputFile::updatePreviewImage())
// --------------------------------------------------------------------
IMF_EXPORT
void updatePreviewImage (const PreviewRgba[]);
//-----------------------------------------------------------------------
// Rounding control for luminance/chroma images:
//
// If the output file contains luminance and chroma channels (WRITE_YC
// or WRITE_YCA), then the the significands of the luminance and
// chroma values are rounded to roundY and roundC bits respectively (see
// function half::round()). Rounding improves compression with minimal
// image degradation, usually much less than the degradation caused by
// chroma subsampling. By default, roundY is 7, and roundC is 5.
//
// If the output file contains RGB channels or a luminance channel,
// without chroma, then no rounding is performed.
//-----------------------------------------------------------------------
IMF_EXPORT
void setYCRounding (unsigned int roundY, unsigned int roundC);
//----------------------------------------------------
// Break a scan line -- for testing and debugging only
// (see Imf::OutputFile::updatePreviewImage()
//
// Warning: Calling this function usually results in a
// broken image file. The file or parts of it may not
// be readable, or the file may contain bad data.
//
//----------------------------------------------------
IMF_EXPORT
void breakScanLine (int y, int offset, int length, char c);
private:
RgbaOutputFile (const RgbaOutputFile&) = delete;
RgbaOutputFile& operator= (const RgbaOutputFile&) = delete;
RgbaOutputFile (RgbaOutputFile&&) = delete;
RgbaOutputFile& operator= (RgbaOutputFile&&) = delete;
class IMF_HIDDEN ToYca;
OutputFile* _outputFile;
ToYca* _toYca;
};
//
// RGBA input file
//
class IMF_EXPORT_TYPE RgbaInputFile
{
public:
//-------------------------------------------------------
// Constructor -- opens the file with the specified name,
// destructor will automatically close the file.
//-------------------------------------------------------
IMF_EXPORT
RgbaInputFile (const char name[], int numThreads = globalThreadCount ());
//-----------------------------------------------------------
// Constructor -- attaches the new RgbaInputFile object to a
// file that has already been opened by the caller.
// Destroying the RgbaInputFile object will not automatically
// close the file.
//-----------------------------------------------------------
IMF_EXPORT
RgbaInputFile (
OPENEXR_IMF_INTERNAL_NAMESPACE::IStream& is,
int numThreads = globalThreadCount ());
//--------------------------------------------------------------
// Constructors -- the same as the previous two, but the names
// of the red, green, blue, alpha, luminance and chroma channels
// are expected to be layerName.R, layerName.G, etc.
//--------------------------------------------------------------
IMF_EXPORT
RgbaInputFile (
const char name[],
const std::string& layerName,
int numThreads = globalThreadCount ());
IMF_EXPORT
RgbaInputFile (
OPENEXR_IMF_INTERNAL_NAMESPACE::IStream& is,
const std::string& layerName,
int numThreads = globalThreadCount ());
//--------------------------------------------------------------
// Constructors -- the same as the previous, but the specified
// part is opened instead of the first (or only) part within the file
//--------------------------------------------------------------
IMF_EXPORT
RgbaInputFile (
int partNumber,
const char name[],
int numThreads = globalThreadCount ());
IMF_EXPORT
RgbaInputFile (
int partNumber,
const char name[],
const std::string& layerName,
int numThreads = globalThreadCount ());
IMF_EXPORT
RgbaInputFile (
int partNumber,
OPENEXR_IMF_INTERNAL_NAMESPACE::IStream& is,
int numThreads = globalThreadCount ());
IMF_EXPORT
RgbaInputFile (
int partNumber,
OPENEXR_IMF_INTERNAL_NAMESPACE::IStream& is,
const std::string& layerName,
int numThreads = globalThreadCount ());
//-----------
// Destructor
//-----------
IMF_EXPORT
virtual ~RgbaInputFile ();
//-----------------------------------------------------
// Define a frame buffer as the pixel data destination:
// Pixel (x, y) is at address
//
// base + x * xStride + y * yStride
//
//-----------------------------------------------------
IMF_EXPORT
void setFrameBuffer (Rgba* base, size_t xStride, size_t yStride);
//----------------------------------------------------------------
// Switch to a different layer within the current part
//
// subsequent calls to readPixels()
// will read channels layerName.R, layerName.G, etc.
// After each call to setLayerName(), setFrameBuffer() must be
// called at least once before the next call to readPixels().
//----------------------------------------------------------------
IMF_EXPORT
void setLayerName (const std::string& layerName);
//-------------------------------
// Return number of parts in file
//-------------------------------
IMF_EXPORT
int parts () const;
//----------------------------------------------------------------
// Switch to a different part -- subsequent calls to readPixels()
// will read channels from given part
// After each call to setPart() or setPartAndLayer(), setFrameBuffer() must be
// called at least once before the next call to readPixels().
//----------------------------------------------------------------
IMF_EXPORT
void setPart (int part);
//--------------------------
// Equivalent to 'setPart(part) ; setLayerName(layerName);'
//----------------------------
IMF_EXPORT
void setPartAndLayer (int part, const std::string& layerName);
//-------------------------------------------
// Read pixel data (see class Imf::InputFile)
//-------------------------------------------
IMF_EXPORT
void readPixels (int scanLine1, int scanLine2);
IMF_EXPORT
void readPixels (int scanLine);
//--------------------------
// Access to the file header
//--------------------------
IMF_EXPORT
const Header& header () const;
IMF_EXPORT
const FrameBuffer& frameBuffer () const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& displayWindow () const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& dataWindow () const;
IMF_EXPORT
float pixelAspectRatio () const;
IMF_EXPORT
const IMATH_NAMESPACE::V2f screenWindowCenter () const;
IMF_EXPORT
float screenWindowWidth () const;
IMF_EXPORT
LineOrder lineOrder () const;
IMF_EXPORT
Compression compression () const;
IMF_EXPORT
RgbaChannels channels () const;
IMF_EXPORT
const char* fileName () const;
IMF_EXPORT
bool isComplete () const;
//----------------------------------
// Access to the file format version
//----------------------------------
IMF_EXPORT
int version () const;
private:
RgbaInputFile (const RgbaInputFile&) = delete;
RgbaInputFile& operator= (const RgbaInputFile&) = delete;
RgbaInputFile (RgbaInputFile&&) = delete;
RgbaInputFile& operator= (RgbaInputFile&&) = delete;
class IMF_HIDDEN FromYca;
MultiPartInputFile* _multiPartFile;
InputPart* _inputPart;
FromYca* _fromYca;
std::string _channelNamePrefix;
};
OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_EXIT
#endif
| 5,506 |
435 | <reponame>luigidcsoares/psychec<filename>cnippet/wrapper/Python/Singleton.py
# -----------------------------------------------------------------------------
# Copyright (c) 2017 <NAME> (<EMAIL>)
#
# All rights reserved. Unauthorized copying of this file, through any
# medium, is strictly prohibited.
#
# This software is provided on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, explicit or implicit. In no event shall the
# author be liable for any claim or damages.
# -----------------------------------------------------------------------------
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| 242 |
1,080 | <gh_stars>1000+
#------------------------------------------------------------------------------
# Copyright (c) 2019, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
"""Test the button group widget.
"""
import pytest
from utils import is_qt_available, compile_source, wait_for_window_displayed
SOURCE ="""
from enaml.widgets.api import RadioButton, ButtonGroup, Container, Window
enamldef Main(Window):
alias rad1: rd1
alias rad2: rd2
alias rad3: rd3
alias rad4: rd4
alias rad5: rd5
alias rad6: rd6
alias group1: gr1
alias group2: gr2
ButtonGroup: gr1:
exclusive = True
ButtonGroup: gr2:
exclusive = False
Container:
Container:
RadioButton: rd1:
group = gr1
checked = True
RadioButton: rd2:
group = gr2
checked = True
Container:
RadioButton: rd3:
group = gr1
RadioButton: rd4:
group = gr2
Container:
RadioButton: rd5:
checked = False
RadioButton: rd6:
checked = False
"""
def test_tracking_group_members():
"""Test that we track properly which buttons belongs to a group.
"""
win = compile_source(SOURCE, 'Main')()
assert win.group1.group_members == set((win.rad1, win.rad3))
assert win.group2.group_members == set((win.rad2, win.rad4))
win.rad5.group = win.group1
assert win.group1.group_members == set((win.rad1, win.rad3, win.rad5))
win.rad5.group = win.group2
assert win.group1.group_members == set((win.rad1, win.rad3))
assert win.group2.group_members == set((win.rad2, win.rad4, win.rad5))
win.rad5.group = None
assert win.group2.group_members == set((win.rad2, win.rad4))
@pytest.mark.skipif(not is_qt_available(), reason='Requires a Qt binding')
def test_group_exclusivity(enaml_qtbot, enaml_sleep):
"""Test that we properly enforce exclusivity within a group.
"""
win = compile_source(SOURCE, 'Main')()
win.show()
wait_for_window_displayed(enaml_qtbot, win)
# Check that group 1 is exclusive
win.rad3.checked = True
enaml_qtbot.wait(enaml_sleep)
assert win.rad3.checked is True
assert win.rad1.checked is False
# Check that group 2 is non-exclusive
win.rad4.checked = True
enaml_qtbot.wait(enaml_sleep)
assert win.rad2.checked is True
assert win.rad4.checked is True
# Check that dynamically added members are part of the right group
win.rad5.group = win.group1
assert win.rad3.checked is True
assert win.rad1.checked is False
assert win.rad5.checked is False
win.rad5.checked = True
enaml_qtbot.wait(enaml_sleep)
assert win.rad3.checked is False
assert win.rad1.checked is False
assert win.rad5.checked is True
| 1,216 |
454 | package io.vertx.tp.modular.id;
import cn.vertxup.atom.domain.tables.pojos.MJoin;
import io.vertx.tp.atom.modeling.element.DataMatrix;
import io.vertx.tp.atom.refine.Ao;
import io.vertx.up.commune.Record;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
abstract class AbstractId implements AoId {
@Override
public void connect(final Record record,
final ConcurrentMap<String, DataMatrix> keys,
final ConcurrentMap<String, DataMatrix> matrix,
final Set<MJoin> joins) {
/* 设置主键 */
Ao.connect(record, keys, matrix, joins.stream()
.map(MJoin::getEntityKey).collect(Collectors.toSet()));
}
}
| 336 |
1,847 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
#include <windows.h>
namespace krabs {
/** <summary>
* Converts std::wstring argument to std::string using UTF-8 codepage
* Returns empty string if translation fails or input string is empty
* </summary>
*/
inline std::string from_wstring(const std::wstring& wstr, UINT codePage = CP_UTF8)
{
if (wstr.empty())
return {};
const auto requiredLen = WideCharToMultiByte(codePage, 0, wstr.data(), static_cast<int>(wstr.size()),
nullptr, 0, nullptr, nullptr);
if (0 == requiredLen)
return {};
std::string result(requiredLen, 0);
const auto convertedLen = WideCharToMultiByte(codePage, 0, wstr.data(), static_cast<int>(wstr.size()),
&result[0], requiredLen, nullptr, nullptr);
if (0 == convertedLen)
return {};
return result;
}
}
| 413 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.spring.servicebus.stream.binder;
import com.azure.messaging.servicebus.models.ServiceBusReceiveMode;
import com.azure.spring.integration.servicebus.ServiceBusClientConfig;
import com.azure.spring.servicebus.stream.binder.config.ServiceBusQueueBinderConfiguration;
import com.azure.spring.servicebus.stream.binder.properties.ServiceBusConsumerProperties;
import com.azure.spring.servicebus.stream.binder.properties.ServiceBusQueueExtendedBindingProperties;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.runner.ApplicationContextRunner;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ServiceBusQueueSessionBinderConfigTest {
ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withUserConfiguration(ServiceBusQueueBinderConfiguration.class)
.withPropertyValues(
"spring.cloud.azure.servicebus.connection-string=Endpoint=sb://test;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=test",
"spring.cloud.stream.function.definition=consume;supply",
"spring.cloud.stream.bindings.consume-in-0.destination=test",
"spring.cloud.stream.bindings.supply-out-0.destination=test");
@Test
public void testServiceBusExtendedConsumerProperties() {
contextRunner.withPropertyValues(
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.sessionsEnabled:true",
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.maxConcurrentCalls:10",
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.maxConcurrentSessions:20",
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.disableAutoComplete:true",
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.serviceBusReceiveMode:RECEIVE_AND_DELETE")
.run(context -> {
ServiceBusConsumerProperties properties =
context.getBean(ServiceBusQueueExtendedBindingProperties.class).getBindings().get("consume-in-0").getConsumer();
ExtendedConsumerProperties<ServiceBusConsumerProperties> serviceBusProperties = new ExtendedConsumerProperties<>(properties);
ServiceBusClientConfig config = context.getBean(ServiceBusQueueMessageChannelBinder.class).buildClientConfig(serviceBusProperties);
assertEquals(config.getMaxConcurrentCalls(), 10);
assertEquals(config.getMaxConcurrentSessions(), 20);
assertEquals(config.getServiceBusReceiveMode(), ServiceBusReceiveMode.RECEIVE_AND_DELETE);
assertFalse(config.isEnableAutoComplete());
assertTrue(config.isSessionsEnabled());
});
}
@Test
public void testServiceBusExtendedConsumerPropertiesSessionEnabledWithConcurrency() {
contextRunner.withPropertyValues(
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.sessionsEnabled:true",
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.concurrency:20")
.run(context -> {
ServiceBusConsumerProperties properties =
context.getBean(ServiceBusQueueExtendedBindingProperties.class).getBindings().get("consume-in-0").getConsumer();
ExtendedConsumerProperties<ServiceBusConsumerProperties> serviceBusProperties = new ExtendedConsumerProperties<>(properties);
ServiceBusClientConfig config = context.getBean(ServiceBusQueueMessageChannelBinder.class).buildClientConfig(serviceBusProperties);
assertEquals(config.getMaxConcurrentSessions(), 20);
assertEquals(config.getMaxConcurrentCalls(), 1);
});
}
@Test
public void testServiceBusExtendedConsumerPropertiesSessionDisabledWithConcurrency() {
contextRunner.withPropertyValues(
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.sessionsEnabled:false",
"spring.cloud.stream.servicebus.queue.bindings.consume-in-0.consumer.concurrency:20")
.run(context -> {
ServiceBusConsumerProperties properties =
context.getBean(ServiceBusQueueExtendedBindingProperties.class).getBindings().get("consume-in-0").getConsumer();
ExtendedConsumerProperties<ServiceBusConsumerProperties> serviceBusProperties = new ExtendedConsumerProperties<>(properties);
ServiceBusClientConfig config = context.getBean(ServiceBusQueueMessageChannelBinder.class).buildClientConfig(serviceBusProperties);
assertEquals(config.getMaxConcurrentCalls(), 20);
assertEquals(config.getMaxConcurrentSessions(), 1);
});
}
}
| 1,891 |
21,274 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .vertex_direct_embedder import VertexDirectEmbedder
from .vertex_feature_embedder import VertexFeatureEmbedder
from .embedder import Embedder
| 61 |
743 | """
Export module
"""
import os
import os.path
import sqlite3
import sys
import regex as re
# pylint: disable=E0611
# Defined at runtime
from .index import Index
class Export:
"""
Exports database rows into a text file line-by-line.
"""
@staticmethod
def stream(dbfile, output):
"""
Iterates over each row in dbfile and writes text to output file
Args:
dbfile: SQLite file to read
output: output file to store text
"""
with open(output, "w", encoding="utf-8") as out:
# Connection to database file
db = sqlite3.connect(dbfile)
cur = db.cursor()
# Get all indexed text
cur.execute(Index.SECTION_QUERY)
count = 0
for _, name, text in cur:
if not name or not re.search(Index.SECTION_FILTER, name.lower()):
count += 1
if count % 1000 == 0:
print(f"Streamed {count} documents", end="\r")
# Write row
if text:
out.write(text + "\n")
print(f"Iterated over {count} total rows")
# Free database resources
db.close()
@staticmethod
def run(output, path):
"""
Exports data from database to text file, line by line.
Args:
output: output file path
path: model path, if None uses default path
"""
# Derive path to dbfile
dbfile = os.path.join(path, "articles.sqlite")
# Stream text from database to file
Export.stream(dbfile, output)
if __name__ == "__main__":
# Export data
Export.run(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else None)
| 854 |
6,215 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import logging
from pprint import pprint
# %%
import requests
logging.basicConfig(level=logging.DEBUG)
# %%
specification = {
"client_id": "REPLACE_ME",
"secret": "REPLACE_ME",
"start_date": "2021-06-01T00:00:00+00:00",
"end_date": "2021-06-30T00:00:00+00:00",
"is_sandbox": True,
}
# %% READ <client_id> and <secret>
client_id = specification.get("client_id")
secret = specification.get("secret")
# %% GET API_TOKEN
token_refresh_endpoint = "https://api-m.sandbox.paypal.com/v1/oauth2/token"
data = "grant_type=client_credentials"
headers = {
"Accept": "application/json",
"Accept-Language": "en_US",
}
response = requests.request(
method="POST",
url=token_refresh_endpoint,
data=data,
headers=headers,
auth=(client_id, secret),
)
response_json = response.json()
print(response_json)
API_TOKEN = response_json["access_token"]
# CREATE TRANSACTIONS
# for i in range(1000):
# create_response = requests.post(
# "https://api-m.sandbox.paypal.com/v2/checkout/orders",
# headers={'content-type': 'application/json', 'authorization': f'Bearer {API_TOKEN}', "prefer": "return=representation"},
# json={
# "intent": "CAPTURE",
# "purchase_units": [
# {
# "amount": {
# "currency_code": "USD",
# "value": f"{float(i)}"
# }
# }
# ]
# }
# )
#
# print(create_response.json())
# %% LIST TRANSACTIONS
url = "https://api-m.sandbox.paypal.com/v1/reporting/transactions"
params = {
"start_date": "2021-06-20T00:00:00+00:00",
"end_date": "2021-07-10T07:19:45Z",
"fields": "all",
"page_size": "100",
"page": "1",
}
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json",
}
response = requests.get(
url,
headers=headers,
params=params,
)
pprint(response.json())
| 953 |
582 | <reponame>martinChenZ/spring-boot-demo<filename>leetcode/src/main/java/com/easy/leetcode/Sub96.java
package com.easy.leetcode;
/*
96. 不同的二叉搜索树
给定一个整数 n,求以 1 ... n 为节点组成的二叉搜索树有多少种?
示例:
输入: 3
输出: 5
解释:
给定 n = 3, 一共有 5 种不同结构的二叉搜索树:
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
*/
public class Sub96 {
public static void main(String[] args) {
Solution_96_2 solution = new Solution_96_2();
System.out.println("输出:" + solution.numTrees(3));
}
}
/**
* 卡塔兰数
* c[0]=1
* c[n+1]=c[n]*2*(2*n+1)/(n+2)
*/
class Solution_96_1 {
public int numTrees(int n) {
long c = 1;
for (int i = 0; i < n; ++i) {
c = c * 2 * (2 * i + 1) / (i + 2);
}
return (int) c;
}
}
/**
* 动态规划
* 1.g[n]=f(1,n)+f(2,n)+f(3,n)+...+f(n,n)
* 2.f(i,n)=g[i−1]*g[n−i]
* 3.结合公式1、2,得:g[n]=g[0]*g[n−1]+g[2]*g[n−2]+...+g[n-1]*g[0]
*/
class Solution_96_2 {
public int numTrees(int n) {
int[] g = new int[n + 1];
g[0] = 1;
g[1] = 1;
for (int i = 2; i <= n; i++) {
for (int j = 1; j <= i; j++) {
g[i] += g[j - 1] * g[i - j];
}
}
return g[n];
}
}
| 962 |
5,169 | {
"name": "Graphs",
"version": "0.1.1",
"summary": "Charts view generater",
"description": "Light weight charts view generater for iOS. Written in Swift.",
"homepage": "https://github.com/recruit-mtl/Graphs",
"license": {
"type": "MIT",
"file": "LICENSE.md"
},
"authors": {
"kokoro": "<EMAIL>"
},
"social_media_url": "https://twitter.com/kokoron",
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/recruit-mtl/Graphs.git",
"tag": "0.1.1"
},
"source_files": "Graphs/*.swift",
"requires_arc": true
}
| 245 |
2,123 | require_extension('D');
require_fp;
WRITE_RD(f64_le(f64(FRS1), f64(FRS2)));
set_fp_exceptions;
| 47 |
9,136 | /* Copyright (c) 2011 <NAME> (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#ifndef HACD_GRAPH_H
#define HACD_GRAPH_H
#include "hacdVersion.h"
#include "hacdVector.h"
#include "hacdICHull.h"
#include <map>
#include <vector>
#include <set>
namespace HACD
{
class GraphVertex;
class GraphEdge;
class Graph;
class HACD;
class GraphVertex
{
public:
bool AddEdge(long name)
{
m_edges.insert(name);
return true;
}
bool DeleteEdge(long name);
GraphVertex();
~GraphVertex() { delete m_convexHull; };
private:
long m_name;
long m_cc;
std::set<long> m_edges;
bool m_deleted;
std::vector<long> m_ancestors;
std::map<long, DPoint> m_distPoints;
Real m_error;
double m_surf;
double m_volume;
double m_perimeter;
double m_concavity;
ICHull* m_convexHull;
std::set<unsigned long long> m_boudaryEdges;
friend class GraphEdge;
friend class Graph;
friend class HACD;
};
class GraphEdge
{
public:
GraphEdge();
~GraphEdge() { delete m_convexHull; };
private:
long m_name;
long m_v1;
long m_v2;
std::map<long, DPoint> m_distPoints;
Real m_error;
double m_surf;
double m_volume;
double m_perimeter;
double m_concavity;
ICHull* m_convexHull;
std::set<unsigned long long> m_boudaryEdges;
bool m_deleted;
friend class GraphVertex;
friend class Graph;
friend class HACD;
};
class Graph
{
public:
size_t GetNEdges() const { return m_nE; }
size_t GetNVertices() const { return m_nV; }
bool EdgeCollapse(long v1, long v2);
long AddVertex();
long AddEdge(long v1, long v2);
bool DeleteEdge(long name);
bool DeleteVertex(long name);
long GetEdgeID(long v1, long v2) const;
void Clear();
void Print() const;
long ExtractCCs();
Graph();
virtual ~Graph();
void Allocate(size_t nV, size_t nE);
private:
size_t m_nCCs;
size_t m_nV;
size_t m_nE;
std::vector<GraphEdge> m_edges;
std::vector<GraphVertex> m_vertices;
friend class HACD;
};
} // namespace HACD
#endif
| 1,168 |
852 | <filename>RecoJets/JetPlusTracks/python/ZSPJetCorrections332_cff.py
import FWCore.ParameterSet.Config as cms
# Modules
#
# Define the producers of corrected jet collections for each algorithm.
#
ZSPJetCorJetIcone5 = cms.EDProducer("CaloJetProducer",
src = cms.InputTag("iterativeCone5CaloJets"),
tagName = cms.vstring('ZSP_CMSSW332_Iterative_Cone_05_PU0'),
tagNameOffset = cms.vstring(),
PU = cms.int32(-1),
FixedPU = cms.int32(0),
alias = cms.untracked.string('ZSPJetCorJetIcone5')
)
ZSPJetCorJetSiscone5 = cms.EDProducer("CaloJetProducer",
src = cms.InputTag("sisCone5CaloJets"),
tagName = cms.vstring('ZSP_CMSSW332_Iterative_Cone_05_PU0'),
tagNameOffset = cms.vstring(),
PU = cms.int32(-1),
FixedPU = cms.int32(0),
alias = cms.untracked.string('ZSPJetCorJetSiscone5')
)
ZSPJetCorJetAntiKt5 = cms.EDProducer("CaloJetProducer",
src = cms.InputTag("ak5CaloJets"),
tagName = cms.vstring('ZSP_CMSSW332_Iterative_Cone_05_PU0'),
tagNameOffset = cms.vstring(),
PU = cms.int32(-1),
FixedPU = cms.int32(0),
alias = cms.untracked.string('ZSPJetCorJetAntiKt5')
)
#
# Define a sequence to make all corrected jet collections at once.
#
ZSPJetCorrectionsIcone5 = cms.Sequence(ZSPJetCorJetIcone5)
ZSPJetCorrectionsSisCone5 = cms.Sequence(ZSPJetCorJetSiscone5)
ZSPJetCorrectionsAntiKt5 = cms.Sequence(ZSPJetCorJetAntiKt5)
# For backward-compatiblity (but to be deprecated!)
ZSPJetCorrections = ZSPJetCorrectionsIcone5
| 636 |
12,366 | // Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
#include "tink/hybrid/ecies_aead_hkdf_hybrid_decrypt.h"
#include <utility>
#include "absl/memory/memory.h"
#include "tink/hybrid/ecies_aead_hkdf_dem_helper.h"
#include "tink/hybrid_decrypt.h"
#include "tink/subtle/ec_util.h"
#include "tink/subtle/ecies_hkdf_recipient_kem_boringssl.h"
#include "tink/util/enums.h"
#include "tink/util/secret_data.h"
#include "tink/util/status.h"
#include "proto/ecies_aead_hkdf.pb.h"
using ::google::crypto::tink::EciesAeadHkdfPrivateKey;
using ::google::crypto::tink::EllipticCurveType;
namespace crypto {
namespace tink {
namespace {
util::Status Validate(const EciesAeadHkdfPrivateKey& key) {
if (!key.has_public_key() || !key.public_key().has_params() ||
key.public_key().x().empty() || key.key_value().empty()) {
return util::Status(
util::error::INVALID_ARGUMENT,
"Invalid EciesAeadHkdfPublicKey: missing required fields.");
}
if (key.public_key().params().has_kem_params() &&
key.public_key().params().kem_params().curve_type() ==
EllipticCurveType::CURVE25519) {
if (!key.public_key().y().empty()) {
return util::Status(
util::error::INVALID_ARGUMENT,
"Invalid EciesAeadHkdfPublicKey: has unexpected field.");
}
} else if (key.public_key().y().empty()) {
return util::Status(
util::error::INVALID_ARGUMENT,
"Invalid EciesAeadHkdfPublicKey: missing required fields.");
}
return util::Status::OK;
}
} // namespace
// static
util::StatusOr<std::unique_ptr<HybridDecrypt>> EciesAeadHkdfHybridDecrypt::New(
const EciesAeadHkdfPrivateKey& recipient_key) {
util::Status status = Validate(recipient_key);
if (!status.ok()) return status;
auto kem_result = subtle::EciesHkdfRecipientKemBoringSsl::New(
util::Enums::ProtoToSubtle(
recipient_key.public_key().params().kem_params().curve_type()),
util::SecretDataFromStringView(recipient_key.key_value()));
if (!kem_result.ok()) return kem_result.status();
auto dem_result = EciesAeadHkdfDemHelper::New(
recipient_key.public_key().params().dem_params().aead_dem());
if (!dem_result.ok()) return dem_result.status();
return {absl::WrapUnique(new EciesAeadHkdfHybridDecrypt(
recipient_key.public_key().params(), std::move(kem_result).ValueOrDie(),
std::move(dem_result).ValueOrDie()))};
}
util::StatusOr<std::string> EciesAeadHkdfHybridDecrypt::Decrypt(
absl::string_view ciphertext, absl::string_view context_info) const {
// Extract KEM-bytes from the ciphertext.
auto header_size_result = subtle::EcUtil::EncodingSizeInBytes(
util::Enums::ProtoToSubtle(
recipient_key_params_.kem_params().curve_type()),
util::Enums::ProtoToSubtle(recipient_key_params_.ec_point_format()));
if (!header_size_result.ok()) return header_size_result.status();
auto header_size = header_size_result.ValueOrDie();
if (ciphertext.size() < header_size) {
return util::Status(util::error::INVALID_ARGUMENT, "ciphertext too short");
}
// Use KEM to get a symmetric key.
auto symmetric_key_result = recipient_kem_->GenerateKey(
absl::string_view(ciphertext).substr(0, header_size),
util::Enums::ProtoToSubtle(
recipient_key_params_.kem_params().hkdf_hash_type()),
recipient_key_params_.kem_params().hkdf_salt(), context_info,
dem_helper_->dem_key_size_in_bytes(),
util::Enums::ProtoToSubtle(recipient_key_params_.ec_point_format()));
if (!symmetric_key_result.ok()) return symmetric_key_result.status();
auto symmetric_key = std::move(symmetric_key_result.ValueOrDie());
// Use the symmetric key to get an AEAD-primitive.
auto aead_or_daead_result = dem_helper_->GetAeadOrDaead(symmetric_key);
if (!aead_or_daead_result.ok()) return aead_or_daead_result.status();
auto aead_or_daead = std::move(aead_or_daead_result.ValueOrDie());
// Do the actual decryption using the AEAD-primitive.
auto decrypt_result =
aead_or_daead->Decrypt(ciphertext.substr(header_size), ""); // empty aad
if (!decrypt_result.ok()) return decrypt_result.status();
return decrypt_result.ValueOrDie();
}
} // namespace tink
} // namespace crypto
| 1,811 |
485 | /*
* Copyright 2016 Quora, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <vector>
#include <qmf/Types.h>
namespace qmf {
class Vector {
public:
explicit Vector(const size_t n);
Double operator()(const size_t i) const {
return data_[i];
}
Double& operator()(const size_t i) {
return data_[i];
}
size_t size() const {
return data_.size();
}
Double* const data() {
return data_.data();
}
private:
std::vector<Double> data_;
};
}
| 320 |
605 | <reponame>yuriykoch/llvm<filename>libcxx/test/std/utilities/memory/specialized.algorithms/uninitialized.fill.n/ranges_uninitialized_fill_n.pass.cpp
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: libcpp-no-concepts, libcpp-has-no-incomplete-ranges
// <memory>
// template <nothrow-forward-iterator ForwardIterator, class T>
// requires constructible_from<iter_value_t<ForwardIterator>, const T&>
// ForwardIterator ranges::uninitialized_fill_n(ForwardIterator first, iter_difference_t<ForwardIterator> n);
#include <algorithm>
#include <cassert>
#include <iterator>
#include <memory>
#include <ranges>
#include <type_traits>
#include "../buffer.h"
#include "../counted.h"
#include "test_macros.h"
#include "test_iterators.h"
// Because this is a variable and not a function, it's guaranteed that ADL won't be used. However,
// implementations are allowed to use a different mechanism to achieve this effect, so this check is
// libc++-specific.
LIBCPP_STATIC_ASSERT(std::is_class_v<decltype(std::ranges::uninitialized_fill_n)>);
struct NotConvertibleFromInt {};
static_assert(!std::is_invocable_v<decltype(std::ranges::uninitialized_fill_n), NotConvertibleFromInt*,
NotConvertibleFromInt*, int>);
int main(int, char**) {
constexpr int value = 42;
Counted x(value);
Counted::reset();
auto pred = [](const Counted& e) { return e.value == value; };
// An empty range -- no default constructors should be invoked.
{
Buffer<Counted, 1> buf;
std::ranges::uninitialized_fill_n(buf.begin(), 0, x);
assert(Counted::current_objects == 0);
assert(Counted::total_objects == 0);
}
// A range containing several objects.
{
constexpr int N = 5;
Buffer<Counted, N> buf;
std::ranges::uninitialized_fill_n(buf.begin(), N, x);
assert(Counted::current_objects == N);
assert(Counted::total_objects == N);
assert(std::all_of(buf.begin(), buf.end(), pred));
std::destroy(buf.begin(), buf.end());
Counted::reset();
}
// Any existing values should be overwritten by value constructors.
{
constexpr int N = 5;
int buffer[N] = {value, value, value, value, value};
std::ranges::uninitialized_fill_n(buffer, 1, 0);
assert(buffer[0] == 0);
assert(buffer[1] == value);
std::ranges::uninitialized_fill_n(buffer, N, 0);
assert(buffer[0] == 0);
assert(buffer[1] == 0);
assert(buffer[2] == 0);
assert(buffer[3] == 0);
assert(buffer[4] == 0);
}
// An exception is thrown while objects are being created -- the existing objects should stay
// valid. (iterator, sentinel) overload.
#ifndef TEST_HAS_NO_EXCEPTIONS
{
constexpr int N = 5;
Buffer<Counted, N> buf;
Counted::throw_on = 3; // When constructing the fourth object.
try {
std::ranges::uninitialized_fill_n(buf.begin(), N, x);
} catch (...) {
}
assert(Counted::current_objects == 0);
assert(Counted::total_objects == 3);
std::destroy(buf.begin(), buf.begin() + 3);
Counted::reset();
}
#endif // TEST_HAS_NO_EXCEPTIONS
// Works with const iterators.
{
constexpr int N = 5;
Buffer<Counted, N> buf;
std::ranges::uninitialized_fill_n(buf.cbegin(), N, x);
assert(Counted::current_objects == N);
assert(Counted::total_objects == N);
assert(std::all_of(buf.begin(), buf.end(), pred));
std::destroy(buf.begin(), buf.end());
Counted::reset();
}
return 0;
}
| 1,367 |
624 | import pytest
import six
import os
import shutil
import tempfile
import time
from verta._internal_utils.importer import get_tensorflow_major_version
@pytest.mark.tensorflow
class TestKeras:
def test_sequential_api(self, experiment_run):
verta_integrations_keras = pytest.importorskip("verta.integrations.keras")
keras = verta_integrations_keras.keras # use same Keras imported by Verta
np = pytest.importorskip("numpy")
# adapted from https://keras.io/getting-started/sequential-model-guide/
## define hyperparameters
samples = 1000
num_classes = 10
num_hidden = 64
fc_activation = "relu"
dropout_rate = .5
batch_size = 128
epochs = 3
loss = "CategoricalCrossentropy"
optimizer = "Adam"
## create dummy data
x_train = np.random.random((samples, 20))
y_train = keras.utils.to_categorical(np.random.randint(num_classes, size=(samples, 1)), num_classes=num_classes)
## build model
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, activation=fc_activation, input_dim=20))
model.add(keras.layers.Dropout(dropout_rate))
model.add(keras.layers.Dense(num_hidden, activation=fc_activation))
model.add(keras.layers.Dropout(dropout_rate))
model.add(keras.layers.Dense(num_classes, activation="softmax"))
## train model
model.compile(loss=getattr(keras.losses, loss)(),
optimizer=optimizer,
metrics=["accuracy"])
model.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size,
callbacks=[verta_integrations_keras.VertaCallback(experiment_run)])
logged_hyperparams = experiment_run.get_hyperparameters()
if get_tensorflow_major_version() == 1:
# not exposed in TF 2.X
assert logged_hyperparams['batch_size'] == batch_size
assert logged_hyperparams['samples'] == samples
assert logged_hyperparams['epochs'] == epochs
assert logged_hyperparams['loss'] == loss
assert logged_hyperparams['optimizer'] == optimizer
assert "dense" in logged_hyperparams['layer_0_name']
assert logged_hyperparams['layer_0_size'] == num_hidden
assert logged_hyperparams['layer_0_activation'] == fc_activation
assert "dropout" in logged_hyperparams['layer_1_name']
assert logged_hyperparams['layer_1_dropoutrate'] == dropout_rate
assert "dense" in logged_hyperparams['layer_2_name']
assert logged_hyperparams['layer_2_size'] == num_hidden
assert logged_hyperparams['layer_2_activation'] == fc_activation
assert "dropout" in logged_hyperparams['layer_3_name']
assert logged_hyperparams['layer_3_dropoutrate'] == dropout_rate
assert "dense" in logged_hyperparams['layer_4_name']
assert logged_hyperparams['layer_4_size'] == num_classes
assert logged_hyperparams['layer_4_activation'] == "softmax"
logged_observations = experiment_run.get_observations()
assert 'acc' in logged_observations or 'accuracy' in logged_observations
assert 'loss' in logged_observations
def test_functional_api(self, experiment_run):
verta_integrations_keras = pytest.importorskip("verta.integrations.keras")
keras = verta_integrations_keras.keras # use same Keras imported by Verta
np = pytest.importorskip("numpy")
# also adapted from https://keras.io/getting-started/sequential-model-guide/
## define hyperparameters
samples = 1000
num_classes = 10
num_hidden = 64
fc_activation = "relu"
dropout_rate = .5
batch_size = 128
epochs = 3
loss = "categorical_crossentropy"
optimizer = "Adam"
## create dummy data
x_train = np.random.random((samples, 20))
y_train = keras.utils.to_categorical(np.random.randint(num_classes, size=(samples, 1)), num_classes=num_classes)
## build model
inputs = keras.layers.Input(shape=(20,))
output_1 = keras.layers.Dense(num_hidden, activation="relu", input_dim=20)(inputs)
dropout_1 = keras.layers.Dropout(dropout_rate)(output_1)
output_2 = keras.layers.Dense(num_hidden, activation="relu")(dropout_1)
dropout_2 = keras.layers.Dropout(dropout_rate)(output_2)
predictions = keras.layers.Dense(num_classes, activation="softmax")(dropout_2)
model = keras.models.Model(inputs=inputs, outputs=predictions)
## train model
model.compile(loss=getattr(keras.losses, loss),
optimizer=optimizer,
metrics=["accuracy"])
model.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size,
callbacks=[verta_integrations_keras.VertaCallback(experiment_run)])
logged_hyperparams = experiment_run.get_hyperparameters()
if get_tensorflow_major_version() == 1:
# not exposed in TF 2.X
assert logged_hyperparams['batch_size'] == batch_size
assert logged_hyperparams['samples'] == samples
assert logged_hyperparams['epochs'] == epochs
assert logged_hyperparams['loss'] == loss
assert logged_hyperparams['optimizer'] == optimizer
assert "input" in logged_hyperparams['layer_0_name']
assert "dense" in logged_hyperparams['layer_1_name']
assert logged_hyperparams['layer_1_size'] == num_hidden
assert logged_hyperparams['layer_1_activation'] == fc_activation
assert "dropout" in logged_hyperparams['layer_2_name']
assert logged_hyperparams['layer_2_dropoutrate'] == dropout_rate
assert "dense" in logged_hyperparams['layer_3_name']
assert logged_hyperparams['layer_3_size'] == num_hidden
assert logged_hyperparams['layer_3_activation'] == fc_activation
assert "dropout" in logged_hyperparams['layer_4_name']
assert logged_hyperparams['layer_4_dropoutrate'] == dropout_rate
assert "dense" in logged_hyperparams['layer_5_name']
assert logged_hyperparams['layer_5_size'] == num_classes
assert logged_hyperparams['layer_5_activation'] == "softmax"
logged_observations = experiment_run.get_observations()
assert 'acc' in logged_observations or 'accuracy' in logged_observations
assert 'loss' in logged_observations
class TestScikitLearn:
def test_patch_overwrite(self, experiment_run):
"""Patches add `run` parameter."""
verta_integrations_sklearn = pytest.importorskip("verta.integrations.sklearn")
np = pytest.importorskip("numpy")
for cls in verta_integrations_sklearn.classes:
with pytest.raises(TypeError) as excinfo:
cls().fit(run=experiment_run)
assert str(excinfo.value).strip() != "fit() got an unexpected keyword argument 'run'"
def test_patch_log(self, client):
"""Patches log things."""
client.set_project()
client.set_experiment()
verta_integrations_sklearn = pytest.importorskip("verta.integrations.sklearn")
linear_model = pytest.importorskip("sklearn.linear_model")
tree = pytest.importorskip("sklearn.tree")
svm = pytest.importorskip("sklearn.svm")
ensemble = pytest.importorskip("sklearn.ensemble")
neural_network = pytest.importorskip("sklearn.neural_network")
np = pytest.importorskip("numpy")
samples = 5
num_features = 3
num_classes = 10
X = np.random.randint(0, 17, size=(samples, num_features))
y = np.random.randint(0, num_classes, size=(samples,))
models = [
linear_model.Ridge(),
tree.DecisionTreeClassifier(),
svm.SVC(),
ensemble.GradientBoostingClassifier(),
neural_network.MLPClassifier(),
]
for model in models:
run = client.set_experiment_run()
model.fit(X, y, run=run)
assert run.get_hyperparameters()
@pytest.mark.tensorflow
class TestTensorFlow:
def test_estimator_hook(self, experiment_run):
verta_integrations_tensorflow = pytest.importorskip("verta.integrations.tensorflow")
VertaHook = verta_integrations_tensorflow.VertaHook
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
tf = pytest.importorskip("tensorflow")
# adapted from https://www.tensorflow.org/tutorials/estimator/linear
samples = 5
num_features = 3
data_df = pd.DataFrame(
data=np.random.random(size=(samples, num_features))*100,
columns=map(str, range(num_features))
)
label_series = pd.Series(np.random.randint(0, 2, size=samples))
feature_columns = []
for feature_name in data_df.columns:
feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))
def train_input_fn():
return tf.data.Dataset.from_tensor_slices((dict(data_df), label_series)).batch(32)
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns)
linear_est.train(train_input_fn, hooks=[VertaHook(experiment_run, every_n_steps=1)])
assert 'loss' in experiment_run.get_observations()
def test_tensorboard_with_keras(self, experiment_run):
verta_integrations_tensorflow = pytest.importorskip("verta.integrations.tensorflow")
log_tensorboard_events = verta_integrations_tensorflow.log_tensorboard_events
np = pytest.importorskip("numpy")
tf = pytest.importorskip("tensorflow")
samples = 5
num_classes = 10
X_train = np.random.random((samples, samples, samples))
y_train = np.random.randint(num_classes, size=(samples,))
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(samples, samples)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
)
log_dir = tempfile.mkdtemp()
try:
model.fit(
X_train, y_train,
epochs=5,
validation_data=(X_train, y_train),
callbacks=[tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)],
)
log_tensorboard_events(experiment_run, log_dir)
assert experiment_run.get_observations()
finally:
shutil.rmtree(log_dir)
tf.compat.v1.reset_default_graph()
def test_tensorboard_with_tf1X(self, experiment_run):
verta_integrations_tensorflow = pytest.importorskip("verta.integrations.tensorflow")
log_tensorboard_events = verta_integrations_tensorflow.log_tensorboard_events
tf = pytest.importorskip("tensorflow.compat.v1")
np = pytest.importorskip("numpy")
with tf.Graph().as_default():
shape = (5, 5)
x = tf.placeholder(tf.float64, shape=shape)
mean = tf.reduce_mean(x)
tf.summary.scalar("mean", mean)
merged_summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
log_dir = tempfile.mkdtemp()
try:
with tf.Session() as sess:
sess.run(init)
summary_writer = tf.summary.FileWriter(log_dir, graph=sess.graph)
for i in range(5):
data = np.random.random(shape)
_ = sess.run(mean, feed_dict={x: data})
summary = sess.run(merged_summary_op, feed_dict={x: data})
summary_writer.add_summary(summary, i)
time.sleep(.1)
summary_writer.flush()
summary_writer.close()
log_tensorboard_events(experiment_run, log_dir)
assert experiment_run.get_observations()
finally:
shutil.rmtree(log_dir)
def test_tensorboard_with_tf2X(self, experiment_run):
verta_integrations_tensorflow = pytest.importorskip("verta.integrations.tensorflow")
log_tensorboard_events = verta_integrations_tensorflow.log_tensorboard_events
tf = pytest.importorskip("tensorflow", minversion="2.0.0", reason="only applicable to TF 2.X")
np = pytest.importorskip("numpy")
log_dir = tempfile.mkdtemp()
try:
writer = tf.summary.create_file_writer(log_dir)
with writer.as_default():
for step in range(5):
tf.summary.scalar("my_metric", np.random.random(), step=step)
time.sleep(.1)
writer.flush()
writer.close()
log_tensorboard_events(experiment_run, log_dir)
assert experiment_run.get_observations()
finally:
shutil.rmtree(log_dir)
class TestXGBoost:
# TODO: re-enable with VR-11963
@pytest.mark.skip(six.PY2, reason="XGBoost causes a segfault in Python 2")
def test_callback(self, experiment_run):
verta_integrations_xgboost = pytest.importorskip("verta.integrations.xgboost")
verta_callback = verta_integrations_xgboost.verta_callback
xgb = pytest.importorskip("xgboost")
np = pytest.importorskip("numpy")
samples = 5
num_features = 3
X = np.random.random(size=(samples, num_features))*1000
y = np.random.randint(0, 10, size=(samples,))
train_dataset_name = "train"
dtrain = xgb.DMatrix(X, label=y)
params = {
'eta': 0.5,
'max_depth': 3,
'num_class': 10,
'eval_metric': ["merror", "mlogloss"],
}
num_rounds = 3
bst = xgb.train(
params, dtrain,
num_boost_round=num_rounds,
evals=[(dtrain, train_dataset_name)],
callbacks=[verta_callback(experiment_run)],
)
observations = experiment_run.get_observations()
for eval_metric in params['eval_metric']:
assert '{}-{}'.format(train_dataset_name, eval_metric) in observations
class TestPyTorch:
def test_hook(self, experiment_run):
verta_integrations_torch = pytest.importorskip("verta.integrations.torch")
verta_hook = verta_integrations_torch.verta_hook
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
torch = pytest.importorskip("torch")
samples = 5
num_features = 3
num_classes = 10
X = np.random.randint(0, 17, size=(samples, num_features))
y = np.random.randint(0, num_classes, size=(samples,))
X = torch.tensor(X, dtype=torch.float)
y = torch.tensor(y, dtype=torch.long)
hidden_size = 512
dropout = 0.2
class Net(torch.nn.Module):
def __init__(self, hidden_size, dropout):
super(Net, self).__init__()
self.fc = torch.nn.Linear(num_features, hidden_size)
self.dropout = torch.nn.Dropout(dropout)
self.output = torch.nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(x.shape[0], -1) # flatten non-batch dimensions
x = torch.nn.functional.relu(self.fc(x))
x = self.dropout(x)
x = torch.nn.functional.softmax(self.output(x), dim=-1)
return x
model = Net(hidden_size, dropout)
model.register_forward_hook(verta_hook(experiment_run))
output = model(X)
logged_hyperparams = experiment_run.get_hyperparameters()
assert logged_hyperparams['layer_0_name'] == "Linear"
assert logged_hyperparams['layer_0_in_features'] == num_features
assert logged_hyperparams['layer_0_out_features'] == hidden_size
assert logged_hyperparams['layer_1_name'] == "Dropout"
assert logged_hyperparams['layer_1_p'] == dropout
assert logged_hyperparams['layer_2_name'] == "Linear"
assert logged_hyperparams['layer_2_in_features'] == hidden_size
assert logged_hyperparams['layer_2_out_features'] == num_classes
| 7,644 |
6,278 | from pythonforandroid.recipe import CompiledComponentsPythonRecipe
class UJsonRecipe(CompiledComponentsPythonRecipe):
version = '1.35'
url = 'https://pypi.python.org/packages/source/u/ujson/ujson-{version}.tar.gz'
depends = []
recipe = UJsonRecipe()
| 94 |
4,054 | // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.clustercontroller.core.status;
import com.yahoo.vespa.clustercontroller.core.ContentCluster;
import com.yahoo.vespa.clustercontroller.core.FleetControllerOptions;
/**
* @author <NAME>
*/
public interface RunDataExtractor {
FleetControllerOptions getOptions();
long getConfigGeneration();
ContentCluster getCluster();
}
| 142 |
518 | package io.sdb.common.entity.kuaidi100;
import com.thoughtworks.xstream.XStream;
public class TaskResponse {
private static XStream xstream;
private Boolean result;
private String returnCode;
private String message;
public Boolean getResult() {
return result;
}
public void setResult(Boolean result) {
this.result = result;
}
public String getReturnCode() {
return returnCode;
}
public void setReturnCode(String returnCode) {
this.returnCode = returnCode;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
private static XStream getXStream() {
if (xstream == null) {
xstream = new XStream();
xstream.autodetectAnnotations(true);
xstream.alias("orderResponse", TaskResponse.class);
}
return xstream;
}
public String toXml(){
return "<?xml version='1.0' encoding='UTF-8'?>\r\n" + getXStream().toXML(this);
}
public static TaskResponse fromXml(String sXml){
return (TaskResponse)getXStream().fromXML(sXml);
}
public static void main(String[] args){
TaskResponse req = new TaskResponse();
req.setMessage("订阅成功");
req.setResult(true);
req.setReturnCode("200");
System.out.print(req.toXml());
}
}
| 440 |
401 | <filename>app/src/main/java/com/zwh/mvparms/eyepetizer/di/component/AuthorDetailComponent.java<gh_stars>100-1000
package com.zwh.mvparms.eyepetizer.di.component;
import com.jess.arms.di.scope.ActivityScope;
import dagger.Component;
import com.jess.arms.di.component.AppComponent;
import com.zwh.mvparms.eyepetizer.di.module.AuthorDetailModule;
import com.zwh.mvparms.eyepetizer.mvp.ui.activity.AuthorDetailActivity;
@ActivityScope
@Component(modules = AuthorDetailModule.class, dependencies = AppComponent.class)
public interface AuthorDetailComponent {
void inject(AuthorDetailActivity activity);
} | 211 |
2,945 | // Copyright 2012,2013 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.saasovation.identityaccess.resource;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import org.jboss.resteasy.annotations.cache.Cache;
import com.saasovation.common.media.OvationsMediaType;
import com.saasovation.common.serializer.ObjectSerializer;
import com.saasovation.identityaccess.application.ApplicationServiceRegistry;
import com.saasovation.identityaccess.application.IdentityApplicationService;
import com.saasovation.identityaccess.domain.model.identity.Tenant;
@Path("/tenants")
public class TenantResource {
public TenantResource() {
super();
}
@GET
@Path("{tenantId}")
@Produces({ OvationsMediaType.ID_OVATION_TYPE })
@Cache(maxAge=3600)
public Response getTenant(
@PathParam("tenantId") String aTenantId) {
Tenant tenant = this.identityApplicationService().tenant(aTenantId);
if (tenant == null) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
String tenantRepresentation = ObjectSerializer.instance().serialize(tenant);
Response response = Response.ok(tenantRepresentation).build();
return response;
}
private IdentityApplicationService identityApplicationService() {
return ApplicationServiceRegistry.identityApplicationService();
}
}
| 690 |
638 | <filename>examples/unixdomainsock/server.py
from __future__ import print_function
import os
import Pyro4
@Pyro4.expose
class Thingy(object):
def message(self, arg):
print("Message received:", arg)
return "Roger!"
if os.path.exists("example_unix.sock"):
os.remove("example_unix.sock")
with Pyro4.Daemon(unixsocket="example_unix.sock") as d:
uri = d.register(Thingy, "example.unixsock")
print("Server running, uri=", uri)
d.requestLoop()
| 195 |
513 | #include <u.h>
#include <ds/ds.h>
#include <cc/cc.h>
#include <mem/mem.h>
static void expr(Node *);
static void stmt(Node *);
static void store(CTy *);
char *intargregs[] = {"rdi", "rsi", "rdx", "rcx", "r8", "r9"};
int stackoffset;
Vec *pendingdata;
static FILE *o;
void
emitinit(FILE *out)
{
o = out;
pendingdata = vec();
}
void
penddata(char *label, CTy *ty, Node *init, int isglobal)
{
Data *d;
d = xmalloc(sizeof(Data));
d->label = label;
d->type = ty;
d->init = init;
d->isglobal = isglobal;
vecappend(pendingdata, d);
}
static void
out(char *fmt, ...)
{
va_list va;
va_start(va, fmt);
if(vfprintf(o, fmt, va) < 0)
errorf("Error printing\n");
va_end(va);
}
static void
outi(char *fmt, ...)
{
va_list va;
va_start(va, fmt);
fprintf(o, " ");
if(vfprintf(o, fmt, va) < 0)
errorf("Error printing\n");
va_end(va);
}
static void
block(Node *n)
{
Vec *v;
int i;
v = n->Block.stmts;
for(i = 0; i < v->len ; i++) {
stmt(vecget(v, i));
}
}
static void
calcslotoffsets(Node *f)
{
int i, tsz, curoffset;
StkSlot *s;
curoffset = 0;
for(i = 0; i < f->Func.stkslots->len; i++) {
s = vecget(f->Func.stkslots, i);
tsz = s->size;
if(tsz <= 8)
tsz = 8;
curoffset += tsz;
if(curoffset % s->align)
curoffset = (curoffset - (curoffset % s->align) + s->align);
s->offset = -curoffset;
if(f->type->Func.isvararg)
s->offset -= 176;
}
if(curoffset % 16)
curoffset = (curoffset - (curoffset % 16) + 16);
f->Func.localsz = curoffset;
}
static void
pushq(char *reg)
{
stackoffset += 8;
outi("pushq %%%s\n", reg);
}
static void
popq(char *reg)
{
stackoffset -= 8;
outi("popq %%%s\n", reg);
}
static void
func(Node *f, char *label, int isglobal)
{
Vec *v;
Sym *sym;
int i;
calcslotoffsets(f);
out("\n");
out(".text\n");
out("# function %s\n", f->Func.name);
if(isglobal)
out(".globl %s\n", label);
out("%s:\n", label);
pushq("rbp");
outi("movq %%rsp, %%rbp\n");
if(f->type->Func.isvararg) {
stackoffset += 176;
outi("sub $176, %%rsp\n");
outi("movq %%rdi, (%%rsp)\n");
outi("movq %%rsi, 8(%%rsp)\n");
outi("movq %%rdx, 16(%%rsp)\n");
outi("movq %%rcx, 24(%%rsp)\n");
outi("movq %%r8, 32(%%rsp)\n");
outi("movq %%r9, 40(%%rsp)\n");
}
if(f->Func.localsz) {
outi("sub $%d, %%rsp\n", f->Func.localsz);
stackoffset += f->Func.localsz;
}
v = f->Func.params;
for(i = 0; i < v->len; i++) {
sym = vecget(v, i);
if(!isitype(sym->type) && !isptr(sym->type) && !isarray(sym->type))
errorposf(&f->pos, "unimplemented arg type");
if(i < 6) {
outi("movq %%%s, %d(%%rbp)\n", intargregs[i], sym->Local.slot->offset);
} else {
outi("movq %d(%%rbp), %%rcx\n", 16 + 8 * (i - 6));
outi("leaq %d(%%rbp), %%rax\n", sym->Local.slot->offset);
store(sym->type);
}
}
block(f->Func.body);
outi("leave\n");
outi("ret\n");
}
static void
call(Node *n)
{
int i, nargs, nintargs, cleanup;
Vec *args;
Node *arg;
args = n->Call.args;
i = nargs = args->len;
/* Push args in reverse order */
while(i-- != 0) {
arg = vecget(args, i);
if(!isitype(arg->type) && !isptr(arg->type) && !isarray(arg->type) && !isfunc(arg->type))
errorposf(&arg->pos, "unimplemented arg type\n");
expr(arg);
pushq("rax");
}
nintargs = nargs;
if(nintargs > 6)
nintargs = 6;
for(i = 0; i < nintargs; i++)
popq(intargregs[i]);
expr(n->Call.funclike);
outi("call *%%rax\n");
cleanup = 8 * (nargs - nintargs);
if(cleanup) {
outi("add $%d, %%rsp\n", cleanup);
stackoffset -= cleanup;
}
}
static void
ereturn(Node *r)
{
CTy *ty;
if(r->Return.expr) {
ty = r->Return.expr->type;
if(!isitype(ty) && !isptr(ty))
errorposf(&r->pos, "unimplemented return type");
expr(r->Return.expr);
}
/* No need to cleanup with leave */
outi("leave\n");
outi("ret\n");
}
static void
load(CTy *t)
{
if(isitype(t) || isptr(t)) {
switch(t->size) {
case 8:
outi("movq (%%rax), %%rax\n");
break;
case 4:
outi("movslq (%%rax), %%rax\n");
break;
case 2:
outi("movswq (%%rax), %%rax\n");
break;
case 1:
outi("movsbq (%%rax), %%rax\n");
break;
default:
panic("internal error\n");
}
return;
}
if(isstruct(t)) {
return;
}
if(isarray(t)) {
return;
}
if(isfunc(t)) {
return;
}
errorf("unimplemented load %d\n", t->t);
}
static void
store(CTy *t)
{
if(isitype(t) || isptr(t)) {
switch(t->size) {
case 8:
outi("movq %%rcx, (%%rax)\n");
break;
case 4:
outi("movl %%ecx, (%%rax)\n");
break;
case 2:
outi("movw %%cx, (%%rax)\n");
break;
case 1:
outi("movb %%cl, (%%rax)\n");
break;
default:
panic("internal error\n");
}
return;
}
if(isstruct(t)) {
pushq("rdi");
pushq("rsi");
pushq("rcx");
outi("movq %%rcx, %%rsi\n");
outi("movq %%rax, %%rdi\n");
outi("movq $%d, %%rcx\n", t->size);
outi("rep movsb\n");
popq("rcx");
popq("rsi");
popq("rdi");
return;
}
errorf("unimplemented store\n");
}
static void
decl(Node *n)
{
int i;
Sym *sym;
for(i = 0; i < n->Decl.syms->len; i++) {
sym = vecget(n->Decl.syms, i);
emitsym(sym);
}
}
static void
addr(Node *n)
{
int sz;
int offset;
Sym *sym;
switch(n->t) {
case NUNOP:
expr(n->Unop.operand);
break;
case NSEL:
expr(n->Sel.operand);
if(isptr(n->Sel.operand->type))
offset = structoffsetfromname(n->Sel.operand->type->Ptr.subty, n->Sel.name);
else if(isstruct(n->Sel.operand->type))
offset = structoffsetfromname(n->Sel.operand->type, n->Sel.name);
else
panic("internal error");
if(offset < 0)
panic("internal error");
outi("addq $%d, %%rax\n", offset);
break;
case NIDENT:
sym = n->Ident.sym;
switch(sym->k) {
case SYMGLOBAL:
outi("leaq %s(%%rip), %%rax\n", sym->Global.label);
break;
case SYMLOCAL:
outi("leaq %d(%%rbp), %%rax\n", sym->Local.slot->offset);
break;
default:
panic("internal error");
}
break;
case NIDX:
expr(n->Idx.idx);
sz = n->type->size;
if(sz != 1) {
outi("imul $%d, %%rax\n", sz);
}
pushq("rax");
expr(n->Idx.operand);
popq("rcx");
outi("addq %%rcx, %%rax\n");
break;
default:
errorf("unimplemented addr\n");
}
}
static void
obinop(int op, CTy *t)
{
char *lset;
char *lafter;
char *opc;
if(!isitype(t) && !isptr(t))
panic("unimplemented binary operator type\n");
switch(op) {
case '+':
outi("addq %%rcx, %%rax\n");
break;
case '-':
outi("subq %%rcx, %%rax\n");
break;
case '*':
outi("imul %%rcx, %%rax\n");
break;
case '/':
outi("cqto\n");
outi("idiv %%rcx\n");
break;
case '%':
outi("cqto\n");
outi("idiv %%rcx\n");
outi("mov %%rdx, %%rax\n");
break;
case '|':
outi("or %%rcx, %%rax\n");
break;
case '&':
outi("and %%rcx, %%rax\n");
break;
case '^':
outi("xor %%rcx, %%rax\n");
break;
case TOKSHR:
outi("sar %%cl, %%rax\n");
break;
case TOKSHL:
outi("sal %%cl, %%rax\n");
break;
case TOKEQL:
case TOKNEQ:
case TOKGEQ:
case TOKLEQ:
case '>':
case '<':
lset = newlabel();
lafter = newlabel();
switch(op) {
case TOKEQL:
opc = "jz";
break;
case TOKNEQ:
opc = "jnz";
break;
case '<':
opc = "jl";
break;
case '>':
opc = "jg";
break;
case TOKGEQ:
opc = "jge";
break;
case TOKLEQ:
opc = "jle";
break;
}
outi("cmp %%rcx, %%rax\n");
outi("%s %s\n", opc, lset);
outi("movq $0, %%rax\n");
outi("jmp %s\n", lafter);
out("%s:\n", lset);
outi("movq $1, %%rax\n");
out("%s:\n", lafter);
break;
default:
errorf("unimplemented binop %d\n", op);
}
}
static void
assign(Node *n)
{
Node *l, *r;
int op;
op = n->Assign.op;
l = n->Assign.l;
r = n->Assign.r;
if(op == '=') {
expr(r);
pushq("rax");
addr(l);
popq("rcx");
if(!isptr(l->type) && !isitype(l->type) && !isstruct(l->type))
errorf("unimplemented assign\n");
store(l->type);
outi("movq %%rcx, %%rax\n");
return;
}
addr(l);
pushq("rax");
load(l->type);
pushq("rax");
expr(r);
outi("movq %%rax, %%rcx\n");
popq("rax");
/* XXX this type is not correct for comparison ops works anyway, but should be changed*/
obinop(op, n->type);
outi("movq %%rax, %%rcx\n");
popq("rax");
store(l->type);
outi("movq %%rcx, %%rax\n");
}
static void
shortcircuit(Node *n)
{
char *t, *f, *e;
t = newlabel();
f = newlabel();
e = newlabel();
expr(n->Binop.l);
if(n->Binop.op == TOKLAND) {
outi("testq %%rax, %%rax\n");
outi("jz %s\n", f);
} else if(n->Binop.op == TOKLOR) {
outi("testq %%rax, %%rax\n");
outi("jnz %s\n", t);
} else {
panic("internal error");
}
expr(n->Binop.r);
if(n->Binop.op == TOKLAND) {
outi("testq %%rax, %%rax\n");
outi("jz %s\n", f);
outi("jmp %s\n", t);
} else if(n->Binop.op == TOKLOR) {
outi("testq %%rax, %%rax\n");
outi("jnz %s\n", t);
outi("jmp %s\n", f);
} else {
panic("internal error");
}
out("%s:\n", t);
outi("mov $1, %%rax\n");
outi("jmp %s\n", e);
out("%s:\n", f);
outi("xor %%rax, %%rax\n");
outi("jmp %s\n", e);
out("%s:\n", e);
}
static void
binop(Node *n)
{
if(n->Binop.op == TOKLAND || n->Binop.op == TOKLOR) {
shortcircuit(n);
return;
}
expr(n->Binop.l);
pushq("rax");
expr(n->Binop.r);
outi("movq %%rax, %%rcx\n");
popq("rax");
obinop(n->Binop.op, n->type);
}
static void
unop(Node *n)
{
switch(n->Unop.op) {
case '*':
expr(n->Unop.operand);
load(n->type);
break;
case '&':
addr(n->Unop.operand);
break;
case '~':
expr(n->Unop.operand);
out("notq %%rax\n");
break;
case '!':
expr(n->Unop.operand);
outi("xorq %%rcx, %%rcx\n");
outi("testq %%rax, %%rax\n");
outi("setz %%cl\n");
outi("movq %%rcx, %%rax\n");
break;
case '-':
expr(n->Unop.operand);
outi("neg %%rax\n");
break;
case TOKINC:
default:
errorf("unimplemented unop %d\n", n->Unop.op);
}
}
static void
incdec(Node *n)
{
if(!isitype(n->type) && !isptr(n->type))
panic("unimplemented incdec");
addr(n->Incdec.operand);
pushq("rax");
load(n->type);
if(isptr(n->type)) {
if(n->Incdec.op == TOKINC)
outi("add $%d, %%rax\n", n->type->Ptr.subty->size);
else
outi("add $%d, %%rax\n", -n->type->Ptr.subty->size);
} else {
if(n->Incdec.op == TOKINC)
outi("inc %%rax\n");
else
outi("dec %%rax\n");
}
outi("movq %%rax, %%rcx\n");
popq("rax");
store(n->type);
outi("movq %%rcx, %%rax\n");
if(n->Incdec.post == 1) {
if(n->Incdec.op == TOKINC)
outi("dec %%rax\n");
else
outi("inc %%rax\n");
}
}
static void
ident(Node *n)
{
Sym *sym;
sym = n->Ident.sym;
if(sym->k == SYMENUM) {
outi("movq $%d, %%rax\n", sym->Enum.v);
return;
}
addr(n);
if(sym->k == SYMLOCAL)
if(sym->Local.isparam)
if(isarray(sym->type))
outi("movq (%%rax), %%rax\n");
load(n->type);
}
static void
eif(Node *n)
{
char *end;
end = newlabel();
expr(n->If.expr);
outi("test %%rax, %%rax\n");
outi("jz %s\n", n->If.lelse);
stmt(n->If.iftrue);
outi("jmp %s\n", end);
out("%s:\n", n->If.lelse);
if(n->If.iffalse)
stmt(n->If.iffalse);
out("%s:\n", end);
}
static void
efor(Node *n)
{
if(n->For.init)
expr(n->For.init);
out("%s:\n", n->For.lstart);
if(n->For.cond) {
expr(n->For.cond);
outi("test %%rax, %%rax\n");
outi("jz %s\n", n->For.lend);
}
stmt(n->For.stmt);
outi("%s:\n", n->For.lstep);
if(n->For.step)
expr(n->For.step);
outi("jmp %s\n", n->For.lstart);
out("%s:\n", n->For.lend);
}
static void
ewhile(Node *n)
{
out("%s:\n", n->While.lstart);
expr(n->While.expr);
outi("test %%rax, %%rax\n");
outi("jz %s\n", n->While.lend);
stmt(n->While.stmt);
outi("jmp %s\n", n->While.lstart);
out("%s:\n", n->While.lend);
}
static void
dowhile(Node *n)
{
out("%s:\n", n->DoWhile.lstart);
stmt(n->DoWhile.stmt);
out("%s:\n", n->DoWhile.lcond);
expr(n->DoWhile.expr);
outi("test %%rax, %%rax\n");
outi("jz %s\n", n->DoWhile.lend);
outi("jmp %s\n", n->DoWhile.lstart);
out("%s:\n", n->DoWhile.lend);
}
static void
eswitch(Node *n)
{
int i;
Node *c;
expr(n->Switch.expr);
for(i = 0; i < n->Switch.cases->len; i++) {
c = vecget(n->Switch.cases, i);
outi("mov $%lld, %%rcx\n", c->Case.cond);
outi("cmp %%rax, %%rcx\n");
outi("je %s\n", c->Case.l);
}
if(n->Switch.ldefault) {
outi("jmp %s\n", n->Switch.ldefault);
} else {
outi("jmp %s\n", n->Switch.lend);
}
stmt(n->Switch.stmt);
out("%s:\n", n->Switch.lend);
}
static void
cond(Node *n)
{
char *lfalse, *lend;
if(!isitype(n->type) && !isptr(n->type))
panic("unimplemented emit cond");
expr(n->Cond.cond);
lfalse = newlabel();
lend = newlabel();
outi("test %%rax, %%rax\n");
outi("jz %s\n", lfalse);
expr(n->Cond.iftrue);
outi("jmp %s\n", lend);
out("%s:\n", lfalse);
expr(n->Cond.iffalse);
out("%s:\n", lend);
}
static void
cast(Node *n)
{
CTy *from;
CTy *to;
expr(n->Cast.operand);
from = n->Cast.operand->type;
to = n->type;
if(isptr(from) && isptr(to))
return;
if(isptr(to) && isitype(from))
return;
if(isptr(from) && isitype(to))
return;
if(isitype(from) && isitype(to))
return;
if(isfunc(from) && isptr(to))
return;
if(isarray(from) && isptr(to))
return;
errorf("unimplemented cast %d %d\n", from->t, to->t);
}
static void
sel(Node *n)
{
CTy *t;
int offset;
expr(n->Sel.operand);
t = n->Sel.operand->type;
if(isptr(t))
offset = structoffsetfromname(t->Ptr.subty, n->Sel.name);
else if(isstruct(t))
offset = structoffsetfromname(t, n->Sel.name);
else
panic("internal error");
if(offset < 0)
panic("internal error");
if(offset != 0)
outi("add $%d, %%rax\n", offset);
load(n->type);
}
static void
idx(Node *n)
{
int sz;
expr(n->Idx.idx);
sz = n->type->size;
if(sz != 1)
outi("imul $%d, %%rax\n", sz);
outi("push %%rax\n");
expr(n->Idx.operand);
outi("pop %%rcx\n");
outi("addq %%rcx, %%rax\n");
load(n->type);
}
static void
ptradd(Node *n)
{
int sz;
sz = n->type->Ptr.subty->size;
expr(n->Ptradd.offset);
if(sz != 1)
outi("imul $%d, %%rax\n", sz);
outi("push %%rax\n");
expr(n->Ptradd.ptr);
outi("pop %%rcx\n");
outi("addq %%rcx, %%rax\n");
}
static void
comma(Node *n)
{
int i;
for(i = 0; i < n->Comma.exprs->len; i++) {
expr(vecget(n->Comma.exprs, i));
}
}
static void
str(Node *n)
{
char *l;
l = newlabel();
penddata(l, n->type, n, 0);
outi("leaq %s(%%rip), %%rax\n", l);
outi("movq (%%rax), %%rax\n", l);
}
static void
vastart(Node *n)
{
int argend;
expr(n->Builtin.Vastart.valist);
/* XXX currently only support int args */
argend = (n->Builtin.Vastart.param->Ident.sym->Local.paramidx + 1) * 8;
pushq("rcx");
outi("movl $%d, (%%rax)\n", argend);
outi("movl $%d, 4(%%rax)\n", 48 + 0 * 16);
outi("leaq %d(%%rbp), %%rcx\n", -176);
outi("movq %%rcx, 16(%%rax)\n");
popq("rcx");
}
static void
expr(Node *n)
{
switch(n->t){
case NCOMMA:
comma(n);
break;
case NCAST:
cast(n);
break;
case NSTR:
str(n);
break;
case NSIZEOF:
outi("movq $%lld, %%rax\n", n->Sizeof.type->size);
break;
case NNUM:
outi("movq $%lld, %%rax\n", n->Num.v);
break;
case NIDENT:
ident(n);
break;
case NUNOP:
unop(n);
break;
case NASSIGN:
assign(n);
break;
case NBINOP:
binop(n);
break;
case NIDX:
idx(n);
break;
case NSEL:
sel(n);
break;
case NCOND:
cond(n);
break;
case NCALL:
call(n);
break;
case NPTRADD:
ptradd(n);
break;
case NINCDEC:
incdec(n);
break;
case NBUILTIN:
switch(n->Builtin.t) {
case BUILTIN_VASTART:
vastart(n);
break;
default:
errorposf(&n->pos, "unimplemented builtin");
}
break;
default:
errorf("unimplemented emit expr %d\n", n->t);
}
}
static void
stmt(Node *n)
{
switch(n->t){
case NDECL:
decl(n);
out(".text\n");
break;
case NRETURN:
ereturn(n);
break;
case NIF:
eif(n);
break;
case NWHILE:
ewhile(n);
break;
case NFOR:
efor(n);
break;
case NDOWHILE:
dowhile(n);
break;
case NBLOCK:
block(n);
break;
case NSWITCH:
eswitch(n);
break;
case NGOTO:
outi("jmp %s\n", n->Goto.l);
break;
case NCASE:
out("%s:\n", n->Case.l);
stmt(n->Case.stmt);
break;
case NLABELED:
out("%s:\n", n->Labeled.l);
stmt(n->Labeled.stmt);
break;
case NEXPRSTMT:
if(n->ExprStmt.expr)
expr(n->ExprStmt.expr);
break;
default:
errorf("unimplemented emit stmt %d\n", n->t);
}
}
static void
itypedata(Node *prim)
{
Const *c;
if(!isitype(prim->type) && !isptr(prim->type))
panic("internal error %d");
c = foldexpr(prim);
if(!c)
errorposf(&prim->pos, "not a constant expression");
if(c->p) {
switch(prim->type->size) {
case 8:
out(".quad %s + %d\n", c->p, c->v);
return;
case 4:
out(".long %s + %d\n", c->p, c->v);
return;
case 2:
out(".short %s + %d\n", c->p, c->v);
return;
case 1:
out(".byte %s + %d\n", c->p, c->v);
return;
default:
panic("unimplemented");
}
}
switch(prim->type->size) {
case 8:
out(".quad %d\n", c->v);
return;
case 4:
out(".long %d\n", c->v);
return;
case 2:
out(".short %d\n", c->v);
return;
case 1:
out(".byte %d\n", c->v);
return;
default:
panic("unimplemented");
}
panic("internal error");
}
static void
data(Data *d)
{
InitMember *initmemb;
int i, offset;
char *l;
if(!d->init) {
out(".comm %s, %d, %d\n", d->label, d->type->size, d->type->align);
return;
}
if(d->isglobal)
out(".globl %s\n", d->label);
out("%s:\n", d->label);
if(ischararray(d->type))
if(d->init->t == NSTR) {
out(".string %s\n", d->init->Str.v);
return;
}
if(ischarptr(d->type))
if(d->init->t == NSTR) {
l = newlabel();
out(".quad %s\n", l);
out("%s:\n", l);
out(".string %s\n", d->init->Str.v);
return;
}
if(isitype(d->type) || isptr(d->type)) {
itypedata(d->init);
return;
}
if(isarray(d->type) || isstruct(d->type)) {
if(d->init->t != NINIT)
errorposf(&d->init->pos, "array/struct expects a '{' style initializer");
offset = 0;
for(i = 0; i < d->init->Init.inits->len ; i++) {
initmemb = vecget(d->init->Init.inits, i);
if(initmemb->offset != offset)
out(".fill %d, 1, 0\n", initmemb->offset - offset);
itypedata(initmemb->n);
offset = initmemb->offset + initmemb->n->type->size;
}
if(offset < d->type->size)
out(".fill %d, 1, 0\n", d->type->size - offset);
return;
}
panic("internal error");
}
void
emitsym(Sym *sym)
{
out("# emit sym %s\n", sym->name);
switch(sym->k){
case SYMGLOBAL:
if(sym->Global.sclass == SCEXTERN)
break;
if(isfunc(sym->type)) {
func(sym->init, sym->Global.label, sym->Global.sclass == SCGLOBAL);
break;
}
penddata(sym->Global.label, sym->type, sym->init, sym->Global.sclass == SCGLOBAL);
break;
case SYMLOCAL:
if(sym->init) {
expr(sym->init);
pushq("rax");
outi("leaq %d(%%rbp), %%rax\n", sym->Local.slot->offset);
popq("rcx");
if(!isptr(sym->type) && !isitype(sym->type) && !isstruct(sym->type))
errorf("unimplemented init\n");
store(sym->type);
}
break;
case SYMENUM:
case SYMTYPE:
panic("internal error");
}
out("\n");
}
void
emitend()
{
int i;
out(".data\n\n");
for(i = 0; i < pendingdata->len; i++)
data(vecget(pendingdata, i));
}
| 9,742 |
528 | <reponame>SisMaker/jiffy<filename>c_src/doubles.cc<gh_stars>100-1000
#include "double-conversion/double-conversion.h"
#define BEGIN_C extern "C" {
#define END_C }
namespace dc = double_conversion;
BEGIN_C
int
double_to_shortest(unsigned char* buf, size_t size, size_t* len, double val)
{
int flags = dc::DoubleToStringConverter::UNIQUE_ZERO |
dc::DoubleToStringConverter::EMIT_POSITIVE_EXPONENT_SIGN |
dc::DoubleToStringConverter::EMIT_TRAILING_DECIMAL_POINT |
dc::DoubleToStringConverter::EMIT_TRAILING_ZERO_AFTER_POINT;
dc::StringBuilder builder(reinterpret_cast<char*>(buf), size);
dc::DoubleToStringConverter conv(flags, NULL, NULL, 'e', -6, 21, 6, 0);
if(!conv.ToShortest(val, &builder)) {
return 0;
}
*len = (size_t) builder.position();
builder.Finalize();
return 1;
}
END_C
| 390 |
3,710 | <reponame>rozhuk-im/opentoonz
#pragma once
#include <traster.h>
namespace TScannerUtil {
/* copia un rettangolo da rin a rout,
* specchiandolo orizzontalmente se mirror e' dispari,
* e poi ruotandolo del multiplo di novanta gradi specificato
* da ninety in senso antiorario
*
*/
void copyRGBBufferToTRaster32(unsigned char *rgbBuffer, int rgbLx, int rgbLy,
const TRaster32P &rout, bool internal);
void copyRGBBufferToTRasterGR8(unsigned char *rgbBuffer, int rgbLx, int rgbLy,
int rgbWrap, const TRasterGR8P &rout);
void copyGR8BufferToTRasterGR8(unsigned char *gr8Buffer, int rgbLx, int rgbLy,
const TRasterGR8P &rout, bool internal);
void copyGR8BufferToTRasterBW(unsigned char *gr8Buffer, int rgbLx, int rgbLy,
const TRasterGR8P &rout, bool internal,
float thres);
void copyBWBufferToTRasterGR8(const unsigned char *buffer, int rgbLx, int rgbLy,
const TRasterGR8P &rout, bool isBW,
bool internal);
void copy90BWBufferToRasGR8(unsigned char *bwBuffer, int bwLx, int bwLy,
int bwWrap, bool isBW, TRasterGR8P &rout,
int mirror, int ninety);
};
| 650 |
2,690 | <gh_stars>1000+
# Copyright 2016 ClusterHQ Inc. See LICENSE file for details.
from zope.interface import implementer
from twisted.internet.defer import succeed
from twisted.internet.task import Clock
from flocker.testtools import TestCase
from benchmark._interfaces import IRequest
from benchmark.scenarios._request_load import RequestLoadScenario
@implementer(IRequest)
class TestRequest:
"""
A very simple request that does nothing but always succeeds.
"""
def run_setup(self):
return succeed(None)
def make_request(self):
return succeed(None)
def run_cleanup(self):
return succeed(None)
class RequestMeasureTests(TestCase):
"""
Tests for ``_request_and_measure``.
"""
def test_single_count(self):
"""
Adds ``request_rate`` samples per call.
"""
calls_per_second = 10
clock = Clock()
request = TestRequest()
scenario = RequestLoadScenario(
clock, request, request_rate=calls_per_second
)
scenario._request_and_measure(1)
self.assertEqual(
scenario.rate_measurer.get_metrics()['ok_count'], calls_per_second
)
def test_multiple_count(self):
"""
The count controls how many requests are made.
"""
calls_per_second = 10
seconds = 2
clock = Clock()
request = TestRequest()
scenario = RequestLoadScenario(
clock, request, request_rate=calls_per_second
)
scenario._request_and_measure(seconds)
self.assertEqual(
scenario.rate_measurer.get_metrics()['ok_count'],
calls_per_second * seconds
)
| 699 |
305 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference entrance."""
# pylint: disable=g-multiple-import
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
# pylint: disable=unused-variable
import argparse
import collections
import itertools
import numpy as np
import os
import shutil
import cv2
import tqdm
import deepdish as dd
import json
import tensorflow as tf
from tensorflow.python.framework import test_util
import tensorpack.utils.viz as tpviz
from tensorpack.predict import MultiTowerOfflinePredictor, OfflinePredictor, PredictConfig
from tensorpack.tfutils import SmartInit, get_tf_version_tuple
from tensorpack.tfutils.export import ModelExporter
from tensorpack.utils import fs, logger
from dataset import register_coco, register_voc
from config import config as cfg
from config import finalize_configs
from data import get_eval_unlabeled_dataflow
# from third_party
from FasterRCNN.dataset import DatasetRegistry
from FasterRCNN.data import get_eval_dataflow
from FasterRCNN.eval import DetectionResult, multithread_predict_dataflow, predict_image
from FasterRCNN.modeling.generalized_rcnn import ResNetFPNModel, ResNetC4Model
from FasterRCNN.viz import draw_final_outputs, draw_predictions
from FasterRCNN.utils import custom
from FasterRCNN.predict import do_evaluate, do_predict, do_visualize
def predict_unlabeled(model,
model_path,
nr_visualize=100,
output_dir='output_patch_samples'):
"""Predict the pseudo label information of unlabeled data."""
assert cfg.EVAL.PSEUDO_INFERENCE, 'set cfg.EVAL.PSEUDO_INFERENCE=True'
df, dataset_size = get_eval_unlabeled_dataflow(
cfg.DATA.TRAIN, return_size=True)
df.reset_state()
predcfg = PredictConfig(
model=model,
session_init=SmartInit(model_path),
input_names=['image'], # ['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format(
'fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format(
'fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores', # score of the labels
'output/labels',
])
pred = OfflinePredictor(predcfg)
if os.path.isdir(output_dir):
if os.path.isfile(os.path.join(output_dir, 'pseudo_data.npy')):
os.remove(os.path.join(output_dir, 'pseudo_data.npy'))
if not os.path.isdir(os.path.join(output_dir, 'vis')):
os.makedirs(os.path.join(output_dir, 'vis'))
else:
shutil.rmtree(os.path.join(output_dir, 'vis'))
fs.mkdir_p(output_dir + '/vis')
else:
fs.mkdir_p(output_dir)
fs.mkdir_p(output_dir + '/vis')
logger.warning('-' * 100)
logger.warning('Write to {}'.format(output_dir))
logger.warning('-' * 100)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, img_id = dp # dp['image'], dp['img_id']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img)
outs = {
'proposals_boxes': rpn_boxes, # (?,4)
'proposals_scores': rpn_scores, # (?,)
'boxes': final_boxes,
'scores': final_scores,
'labels': final_labels
}
ratios = [10, 10] # [top 20% as background, bottom 20% as background]
bg_ind, fg_ind = custom.find_bg_and_fg_proposals(
all_scores, ratios=ratios)
bg_viz = draw_predictions(img, rpn_boxes[bg_ind], all_scores[bg_ind])
fg_viz = draw_predictions(img, rpn_boxes[fg_ind], all_scores[fg_ind])
results = [
DetectionResult(*args)
for args in zip(final_boxes, final_scores, final_labels, [None] *
len(final_labels))
]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([bg_viz, fg_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
assert cv2.imwrite('{}/vis/{:03d}.png'.format(output_dir, idx), viz)
pbar.update()
logger.info('Write {} samples to {}'.format(nr_visualize, output_dir))
## Parallel inference the whole unlabled data
pseudo_preds = collections.defaultdict(list)
num_tower = max(cfg.TRAIN.NUM_GPUS, 1)
graph_funcs = MultiTowerOfflinePredictor(predcfg, list(
range(num_tower))).get_predictors()
dataflows = [
get_eval_unlabeled_dataflow(
cfg.DATA.TRAIN, shard=k, num_shards=num_tower)
for k in range(num_tower)
]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
for id, result in tqdm.tqdm(enumerate(all_results)):
img_id = result['image_id']
outs = {
'proposals_boxes': result['proposal_box'].astype(np.float16), # (?,4)
'proposals_scores': result['proposal_score'].astype(np.float16), # (?,)
# 'frcnn_all_scores': result['frcnn_score'].astype(np.float16),
'boxes': result['bbox'].astype(np.float16), # (?,4)
'scores': result['score'].astype(np.float16), # (?,)
'labels':
result['category_id'].astype(np.float16) # (?,)
}
pseudo_preds[img_id] = outs
logger.warn('Writing to {}'.format(
os.path.join(output_dir, 'pseudo_data.npy')))
try:
dd.io.save(os.path.join(output_dir, 'pseudo_data.npy'), pseudo_preds)
except RuntimeError:
logger.error('Save failed. Check reasons manually...')
def do_evaluate_unlabeled(pred_config, output_file, reuse=True):
"""Evaluate unlabled data."""
for i, dataset in enumerate(cfg.DATA.VAL):
output = output_file + '-' + dataset
if not os.path.isfile(output) or not reuse:
if i == 0:
num_tower = max(cfg.TRAIN.NUM_GPUS, 1)
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_tower))).get_predictors()
logger.info('Evaluating {} ...'.format(dataset))
dataflows = [
get_eval_dataflow(dataset, shard=k, num_shards=num_tower)
for k in range(num_tower)
]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
eval_metrics = DatasetRegistry.get(dataset).eval_inference_results2(
all_results, output, threshold=cfg.TRAIN.CONFIDENCE)
else:
all_results = json.load(open(output, 'r'))
eval_metrics = DatasetRegistry.get(dataset).eval_inference_results2(
all_results, output, threshold=cfg.TRAIN.CONFIDENCE, metric_only=True)
with open(output + '_cocometric.json', 'w') as f:
json.dump(eval_metrics, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--load', help='load a model for evaluation.', required=True)
parser.add_argument(
'--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument(
'--predict_unlabeled', help='visualize intermediate results')
parser.add_argument('--eval_unlabeled', help='visualize intermediate results')
parser.add_argument(
'--evaluate',
help='Run evaluation. '
'This argument is the path to the output json evaluation file')
parser.add_argument(
'--predict',
help='Run prediction on a given image. '
'This argument is the path to the input image file',
nargs='+')
parser.add_argument(
'--benchmark',
action='store_true',
help='Benchmark the speed of the model + postprocessing')
parser.add_argument(
'--config',
help='A list of KEY=VALUE to overwrite those defined in config.py',
nargs='+')
parser.add_argument('--output-pb', help='Save a model to .pb')
parser.add_argument('--output-serving', help='Save a model to serving file')
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
try:
register_voc(cfg.DATA.BASEDIR) # add VOC datasets to the registry
except NotImplementedError:
logger.warning('VOC does not find!')
register_coco(cfg.DATA.BASEDIR) # add COCO datasets to the registry
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
if not tf.test.is_gpu_available():
assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
'Inference requires either GPU support or MKL support!'
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
# let the output has the same path logic as checkpoint
if args.predict_unlabeled:
output_dir = args.predict_unlabeled
predict_unlabeled(MODEL, args.load, output_dir=output_dir)
if args.visualize:
do_visualize(MODEL, args.load, output_dir=output_dir)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=SmartInit(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.output_pb:
ModelExporter(predcfg).export_compact(args.output_pb, optimize=False)
elif args.output_serving:
ModelExporter(predcfg).export_serving(args.output_serving, optimize=False)
if args.predict:
predictor = OfflinePredictor(predcfg)
for image_file in args.predict:
do_predict(predictor, image_file)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
do_evaluate(predcfg, args.evaluate)
elif args.eval_unlabeled:
assert args.eval_unlabeled.endswith('.json'), args.eval_unlabeled
do_evaluate_unlabeled(predcfg, args.eval_unlabeled)
elif args.benchmark:
df = get_eval_dataflow(cfg.DATA.VAL[0])
df.reset_state()
predictor = OfflinePredictor(predcfg)
for _, img in enumerate(tqdm.tqdm(df, total=len(df), smoothing=0.5)):
predict_image(img[0], predictor)
| 4,240 |
1,522 | <reponame>akung0324/mtools<filename>mtools/mloginfo/sections/rs_state_section.py
from .base_section import BaseSection
from mtools.util import OrderedDict
from mtools.util.print_table import print_table
class RsStateSection(BaseSection):
"""
RsStateSection class.
This section determines if there were any Replica Set state changes in
the log file and prints out the times and information about the restarts
found.
"""
name = "rsstate"
def __init__(self, mloginfo):
BaseSection.__init__(self, mloginfo)
# add --restarts flag to argparser
helptext = 'outputs information about every detected RS state change'
self.mloginfo.argparser_sectiongroup.add_argument('--rsstate',
action='store_true',
help=helptext)
@property
def active(self):
"""Return boolean if this section is active."""
return self.mloginfo.args['rsstate']
def run(self):
"""Run this section and print out information."""
titles = ['date', 'host', 'state/message']
table_rows = []
for host, state, logevent in self.mloginfo.logfile.rs_state:
stats = OrderedDict()
stats['date'] = logevent.datetime.strftime("%b %d %H:%M:%S")
stats['host'] = host
stats['state/message'] = state
table_rows.append(stats)
print_table(table_rows, titles, uppercase_headers=False)
if len(self.mloginfo.logfile.rs_state) == 0:
print(" no rs state changes found")
| 713 |
1,975 | <filename>pycls/models/blocks.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Common model blocks."""
import numpy as np
import torch
import torch.nn as nn
from pycls.core.config import cfg
from torch.nn import Module
# ----------------------- Shortcuts for common torch.nn layers ----------------------- #
def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False):
"""Helper for building a conv2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, g, b = stride, (k - 1) // 2, groups, bias
return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
def patchify2d(w_in, w_out, k, *, bias=True):
"""Helper for building a patchify layer as used by ViT models."""
return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias)
def norm2d(w_in):
"""Helper for building a norm2d layer."""
return nn.BatchNorm2d(num_features=w_in, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
def pool2d(_w_in, k, *, stride=1):
"""Helper for building a pool2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
def gap2d(_w_in):
"""Helper for building a gap2d layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def layernorm(w_in):
"""Helper for building a layernorm layer."""
return nn.LayerNorm(w_in, eps=cfg.LN.EPS)
def linear(w_in, w_out, *, bias=False):
"""Helper for building a linear layer."""
return nn.Linear(w_in, w_out, bias=bias)
def activation(activation_fun=None):
"""Helper for building an activation layer."""
activation_fun = (activation_fun or cfg.MODEL.ACTIVATION_FUN).lower()
if activation_fun == "relu":
return nn.ReLU(inplace=cfg.MODEL.ACTIVATION_INPLACE)
elif activation_fun == "silu" or activation_fun == "swish":
try:
return torch.nn.SiLU()
except AttributeError:
return SiLU()
elif activation_fun == "gelu":
return torch.nn.GELU()
else:
raise AssertionError("Unknown MODEL.ACTIVATION_FUN: " + activation_fun)
# --------------------------- Complexity (cx) calculations --------------------------- #
def conv2d_cx(cx, w_in, w_out, k, *, stride=1, groups=1, bias=False):
"""Accumulates complexity of conv2d into cx = (h, w, flops, params, acts)."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
h, w, flops, params, acts = cx["h"], cx["w"], cx["flops"], cx["params"], cx["acts"]
h, w = (h - 1) // stride + 1, (w - 1) // stride + 1
flops += k * k * w_in * w_out * h * w // groups + (w_out * h * w if bias else 0)
params += k * k * w_in * w_out // groups + (w_out if bias else 0)
acts += w_out * h * w
return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
def patchify2d_cx(cx, w_in, w_out, k, *, bias=True):
"""Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts)."""
err_str = "Only kernel sizes divisible by the input size are supported."
assert cx["h"] % k == 0 and cx["w"] % k == 0, err_str
h, w, flops, params, acts = cx["h"], cx["w"], cx["flops"], cx["params"], cx["acts"]
h, w = h // k, w // k
flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0)
params += k * k * w_in * w_out + (w_out if bias else 0)
acts += w_out * h * w
return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
def norm2d_cx(cx, w_in):
"""Accumulates complexity of norm2d into cx = (h, w, flops, params, acts)."""
h, w, flops, params, acts = cx["h"], cx["w"], cx["flops"], cx["params"], cx["acts"]
params += 2 * w_in
return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
def pool2d_cx(cx, w_in, k, *, stride=1):
"""Accumulates complexity of pool2d into cx = (h, w, flops, params, acts)."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
h, w, flops, params, acts = cx["h"], cx["w"], cx["flops"], cx["params"], cx["acts"]
h, w = (h - 1) // stride + 1, (w - 1) // stride + 1
acts += w_in * h * w
return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
def gap2d_cx(cx, _w_in):
"""Accumulates complexity of gap2d into cx = (h, w, flops, params, acts)."""
flops, params, acts = cx["flops"], cx["params"], cx["acts"]
return {"h": 1, "w": 1, "flops": flops, "params": params, "acts": acts}
def layernorm_cx(cx, w_in):
"""Accumulates complexity of layernorm into cx = (h, w, flops, params, acts)."""
h, w, flops, params, acts = cx["h"], cx["w"], cx["flops"], cx["params"], cx["acts"]
params += 2 * w_in
return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
def linear_cx(cx, w_in, w_out, *, bias=False, num_locations=1):
"""Accumulates complexity of linear into cx = (h, w, flops, params, acts)."""
h, w, flops, params, acts = cx["h"], cx["w"], cx["flops"], cx["params"], cx["acts"]
flops += w_in * w_out * num_locations + (w_out * num_locations if bias else 0)
params += w_in * w_out + (w_out if bias else 0)
acts += w_out * num_locations
return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
# ---------------------------------- Shared blocks ----------------------------------- #
class SiLU(Module):
"""SiLU activation function (also known as Swish): x * sigmoid(x)."""
# Note: will be part of Pytorch 1.7, at which point can remove this.
def __init__(self):
super(SiLU, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class SE(Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid."""
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self.avg_pool = gap2d(w_in)
self.f_ex = nn.Sequential(
conv2d(w_in, w_se, 1, bias=True),
activation(),
conv2d(w_se, w_in, 1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
@staticmethod
def complexity(cx, w_in, w_se):
h, w = cx["h"], cx["w"]
cx = gap2d_cx(cx, w_in)
cx = conv2d_cx(cx, w_in, w_se, 1, bias=True)
cx = conv2d_cx(cx, w_se, w_in, 1, bias=True)
cx["h"], cx["w"] = h, w
return cx
class MultiheadAttention(Module):
"""Multi-head Attention block from Transformer models."""
def __init__(self, hidden_d, n_heads):
super(MultiheadAttention, self).__init__()
self.block = nn.MultiheadAttention(hidden_d, n_heads)
def forward(self, query, key, value, need_weights=False):
return self.block(query=query, key=key, value=value, need_weights=need_weights)
@staticmethod
def complexity(cx, hidden_d, n_heads, seq_len):
# See https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py
h, w = cx["h"], cx["w"]
flops, params, acts = cx["flops"], cx["params"], cx["acts"]
# q, k, v = linear(input).chunk(3)
flops += seq_len * (hidden_d * hidden_d * 3 + hidden_d * 3)
params += hidden_d * hidden_d * 3 + hidden_d * 3
acts += hidden_d * 3 * seq_len
# attn_output_weights = torch.bmm(q, k.transpose)
head_d = hidden_d // n_heads
flops += n_heads * (seq_len * head_d * seq_len)
acts += n_heads * seq_len * seq_len
# attn_output = torch.bmm(attn_output_weights, v)
flops += n_heads * (seq_len * seq_len * head_d)
acts += n_heads * seq_len * head_d
# attn_output = linear(attn_output)
flops += seq_len * (hidden_d * hidden_d + hidden_d)
params += hidden_d * hidden_d + hidden_d
acts += hidden_d * seq_len
return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
# ---------------------------------- Miscellaneous ----------------------------------- #
def adjust_block_compatibility(ws, bs, gs):
"""Adjusts the compatibility of widths, bottlenecks, and groups."""
assert len(ws) == len(bs) == len(gs)
assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs))
assert all(b < 1 or b % 1 == 0 for b in bs)
vs = [int(max(1, w * b)) for w, b in zip(ws, bs)]
gs = [int(min(g, v)) for g, v in zip(gs, vs)]
ms = [np.lcm(g, int(b)) if b > 1 else g for g, b in zip(gs, bs)]
vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)]
ws = [int(v / b) for v, b in zip(vs, bs)]
assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs))
return ws, bs, gs
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA
zero_init_gamma = hasattr(m, "final_bn") and m.final_bn and zero_init_gamma
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
def drop_connect(x, drop_ratio):
"""Drop connect (adapted from DARTS)."""
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
| 4,165 |
2,151 | <gh_stars>1000+
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package com.android.systemui.statusbar;
import android.content.Context;
import android.util.AttributeSet;
import android.view.View;
import android.view.animation.Interpolator;
import com.android.systemui.Interpolators;
/**
* A common base class for all views in the notification stack scroller which don't have a
* background.
*/
public abstract class StackScrollerDecorView extends ExpandableView {
protected View mContent;
private boolean mIsVisible;
private boolean mAnimating;
public StackScrollerDecorView(Context context, AttributeSet attrs) {
super(context, attrs);
}
@Override
protected void onFinishInflate() {
super.onFinishInflate();
mContent = findContentView();
setInvisible();
}
@Override
protected void onLayout(boolean changed, int left, int top, int right, int bottom) {
super.onLayout(changed, left, top, right, bottom);
setOutlineProvider(null);
}
@Override
public boolean isTransparent() {
return true;
}
public void performVisibilityAnimation(boolean nowVisible) {
animateText(nowVisible, null /* onFinishedRunnable */);
}
public void performVisibilityAnimation(boolean nowVisible, Runnable onFinishedRunnable) {
animateText(nowVisible, onFinishedRunnable);
}
public boolean isVisible() {
return mIsVisible || mAnimating;
}
/**
* Animate the text to a new visibility.
*
* @param nowVisible should it now be visible
* @param onFinishedRunnable A runnable which should be run when the animation is
* finished.
*/
private void animateText(boolean nowVisible, final Runnable onFinishedRunnable) {
if (nowVisible != mIsVisible) {
// Animate text
float endValue = nowVisible ? 1.0f : 0.0f;
Interpolator interpolator;
if (nowVisible) {
interpolator = Interpolators.ALPHA_IN;
} else {
interpolator = Interpolators.ALPHA_OUT;
}
mAnimating = true;
mContent.animate()
.alpha(endValue)
.setInterpolator(interpolator)
.setDuration(260)
.withEndAction(new Runnable() {
@Override
public void run() {
mAnimating = false;
if (onFinishedRunnable != null) {
onFinishedRunnable.run();
}
}
});
mIsVisible = nowVisible;
} else {
if (onFinishedRunnable != null) {
onFinishedRunnable.run();
}
}
}
public void setInvisible() {
mContent.setAlpha(0.0f);
mIsVisible = false;
}
@Override
public void performRemoveAnimation(long duration, float translationDirection,
Runnable onFinishedRunnable) {
// TODO: Use duration
performVisibilityAnimation(false);
}
@Override
public void performAddAnimation(long delay, long duration) {
// TODO: use delay and duration
performVisibilityAnimation(true);
}
@Override
public boolean hasOverlappingRendering() {
return false;
}
public void cancelAnimation() {
mContent.animate().cancel();
}
protected abstract View findContentView();
}
| 1,737 |
2,151 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package com.android.webview.chromium;
import android.annotation.SuppressLint;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.webkit.TracingConfig;
import android.webkit.TracingController;
import org.chromium.android_webview.AwTracingController;
import org.chromium.base.ThreadUtils;
import org.chromium.base.TraceRecordMode;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.Callable;
import java.util.concurrent.Executor;
/**
* Chromium implementation of TracingController -- forwards calls to
* the chromium internal implementation and makes sure the calls happen on the
* UI thread. Translates predefined categories and posts callbacks.
*/
@SuppressLint({"NewApi", // TracingController is new in API level 28.
"Override"}) // Remove this once lint is targeting API level 28.
public class TracingControllerAdapter extends TracingController {
private final AwTracingController mAwTracingController;
private final WebViewChromiumFactoryProvider mFactory;
public TracingControllerAdapter(
WebViewChromiumFactoryProvider factory, AwTracingController controller) {
mFactory = factory;
mAwTracingController = controller;
}
@Override
public void start(@NonNull TracingConfig tracingConfig) {
if (tracingConfig == null) {
throw new IllegalArgumentException("tracingConfig cannot be null");
}
int result = 0;
if (checkNeedsPost()) {
result = mFactory.runOnUiThreadBlocking(new Callable<Integer>() {
@Override
public Integer call() {
return startOnUI(tracingConfig);
}
});
} else {
result = startOnUI(tracingConfig);
}
if (result != AwTracingController.RESULT_SUCCESS) {
// make sure to throw on the original calling thread.
switch (result) {
case AwTracingController.RESULT_ALREADY_TRACING:
throw new IllegalStateException(
"cannot start tracing: tracing is already enabled");
case AwTracingController.RESULT_INVALID_CATEGORIES:
throw new IllegalArgumentException(
"category patterns starting with '-' or containing ','"
+ " are not allowed");
case AwTracingController.RESULT_INVALID_MODE:
throw new IllegalArgumentException("invalid tracing mode");
}
}
}
@Override
public boolean stop(@Nullable OutputStream outputStream, @NonNull Executor executor) {
if (checkNeedsPost()) {
return mFactory.runOnUiThreadBlocking(new Callable<Boolean>() {
@Override
public Boolean call() {
return stopOnUI(outputStream, executor);
}
});
}
return stopOnUI(outputStream, executor);
}
@Override
public boolean isTracing() {
if (checkNeedsPost()) {
return mFactory.runOnUiThreadBlocking(new Callable<Boolean>() {
@Override
public Boolean call() {
return mAwTracingController.isTracing();
}
});
}
return mAwTracingController.isTracing();
}
private int convertAndroidTracingMode(int tracingMode) {
switch (tracingMode) {
case TracingConfig.RECORD_UNTIL_FULL:
return TraceRecordMode.RECORD_UNTIL_FULL;
case TracingConfig.RECORD_CONTINUOUSLY:
return TraceRecordMode.RECORD_CONTINUOUSLY;
}
return TraceRecordMode.RECORD_CONTINUOUSLY;
}
private boolean categoryIsSet(int bitmask, int categoryMask) {
return (bitmask & categoryMask) == categoryMask;
}
private Collection<Integer> collectPredefinedCategories(int bitmask) {
ArrayList<Integer> predefinedIndices = new ArrayList<Integer>();
// CATEGORIES_NONE is skipped on purpose.
if (categoryIsSet(bitmask, TracingConfig.CATEGORIES_ALL)) {
predefinedIndices.add(AwTracingController.CATEGORIES_ALL);
}
if (categoryIsSet(bitmask, TracingConfig.CATEGORIES_ANDROID_WEBVIEW)) {
predefinedIndices.add(AwTracingController.CATEGORIES_ANDROID_WEBVIEW);
}
if (categoryIsSet(bitmask, TracingConfig.CATEGORIES_WEB_DEVELOPER)) {
predefinedIndices.add(AwTracingController.CATEGORIES_WEB_DEVELOPER);
}
if (categoryIsSet(bitmask, TracingConfig.CATEGORIES_INPUT_LATENCY)) {
predefinedIndices.add(AwTracingController.CATEGORIES_INPUT_LATENCY);
}
if (categoryIsSet(bitmask, TracingConfig.CATEGORIES_RENDERING)) {
predefinedIndices.add(AwTracingController.CATEGORIES_RENDERING);
}
if (categoryIsSet(bitmask, TracingConfig.CATEGORIES_JAVASCRIPT_AND_RENDERING)) {
predefinedIndices.add(AwTracingController.CATEGORIES_JAVASCRIPT_AND_RENDERING);
}
if (categoryIsSet(bitmask, TracingConfig.CATEGORIES_FRAME_VIEWER)) {
predefinedIndices.add(AwTracingController.CATEGORIES_FRAME_VIEWER);
}
return predefinedIndices;
}
private int startOnUI(TracingConfig tracingConfig) {
return mAwTracingController.start(
collectPredefinedCategories(tracingConfig.getPredefinedCategories()),
tracingConfig.getCustomIncludedCategories(),
convertAndroidTracingMode(tracingConfig.getTracingMode()));
}
public boolean stopOnUI(@Nullable OutputStream outputStream, @NonNull Executor executor) {
if (outputStream == null) {
return mAwTracingController.stopAndFlush((OutputStream) null);
}
final OutputStream localOutputStream = outputStream;
return mAwTracingController.stopAndFlush(new OutputStream() {
@Override
public void write(byte[] chunk) {
executor.execute(() -> {
try {
localOutputStream.write(chunk);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
@Override
public void close() {
executor.execute(() -> {
try {
localOutputStream.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
@Override
public void write(int b) { /* should not be called */
}
@Override
public void flush() { /* should not be called */
}
@Override
public void write(byte[] b, int off, int len) { /* should not be called */
}
});
}
private static boolean checkNeedsPost() {
return !ThreadUtils.runningOnUiThread();
}
}
| 3,298 |
3,189 | //
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "NSObject.h"
#import "DTXBlockCompressor.h"
@class NSString;
@interface DTXBlockCompressorLibCompression : NSObject <DTXBlockCompressor>
{
// Error parsing type: AQ, name: _lzfseScratchBuffer
// Error parsing type: AQ, name: _lz4ScratchBuffer
}
- (_Bool)uncompressBuffer:(const char *)arg1 ofLength:(unsigned long long)arg2 toBuffer:(char *)arg3 withKnownUncompressedLength:(unsigned long long)arg4 usingCompressionType:(int)arg5;
- (unsigned long long)compressBuffer:(const char *)arg1 ofLength:(unsigned long long)arg2 toBuffer:(char *)arg3 ofLength:(unsigned long long)arg4 usingCompressionType:(int)arg5 withFinalCompressionType:(int *)arg6;
- (void)dealloc;
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
| 338 |
9,425 | import pytest
import salt.config
import salt.output.highstate as highstate
@pytest.fixture
def configure_loader_modules():
minion_opts = salt.config.DEFAULT_MINION_OPTS.copy()
minion_opts.update({"color": False, "state_output_pct": True})
return {highstate: {"__opts__": minion_opts}}
@pytest.mark.parametrize("data", [None, {"return": None}, {"return": {"data": None}}])
def test_when_data_result_is_None_output_should_be_string_None(data):
expected_output = "None"
actual_output = highstate.output(data=data)
assert actual_output == expected_output
def test_when_data_is_dict_with_return_key_and_return_value_has_data_key_and_data_dict_has_one_dict_element_with_jid_and_fun_keys_and_return_value_is_None_then_output_should_return_literal_None_string():
expected_output = "None"
data = {
"return": {
"data": {
"foo bar quux fnord": {
"jid": "fnordy fnordy fnordy",
"fun": "fnordy fnordy fnord",
"return": {"data": None},
},
}
},
}
actual_output = highstate.output(data=data)
assert actual_output == expected_output
@pytest.mark.parametrize(
"return_value",
[42, "fnord"],
)
def test_when_data_is_dict_with_return_key_and_return_value_has_data_key_and_data_dict_has_one_dict_element_with_jid_and_fun_keys_and_return_value_is_int_or_str_that_value_should_be_returned(
return_value,
):
expected_output = return_value
data = {
"return": {
"data": {
"foo bar quux fnord": {
"jid": "fnordy fnordy fnordy",
"fun": "fnordy fnordy fnord",
"return": {"data": return_value},
},
}
},
}
actual_output = highstate.output(data=data)
assert actual_output == expected_output
def test_when_orchestrator_output_retcode_in_data_the_retcode_should_be_removed():
data = {"something_master": None, "retcode": 42}
actual_output = highstate.output(data)
assert "retcode" not in data
def test_when_more_than_one_local_master_retcode_should_not_be_removed():
expected_retcode = 42
data = {
"something_master": None,
"another_master": None,
"retcode": expected_retcode,
}
actual_output = highstate.output(data)
assert data["retcode"] == expected_retcode
def test_pct_summary_output():
data = {
"data": {
"master": {
"salt_|-call_sleep_state_|-call_sleep_state_|-state": {
"__id__": "call_sleep_state",
"__jid__": "20170418153529810135",
"__run_num__": 0,
"__sls__": "orch.simple",
"changes": {
"out": "highstate",
"ret": {
"minion": {
"module_|-simple-ping_|-test.ping_|-run": {
"__id__": "simple-ping",
"__run_num__": 0,
"__sls__": "simple-ping",
"changes": {"ret": True},
"comment": "Module function test.ping executed",
"duration": 56.179,
"name": "test.ping",
"result": True,
"start_time": "15:35:31.282099",
}
},
"sub_minion": {
"module_|-simple-ping_|-test.ping_|-run": {
"__id__": "simple-ping",
"__run_num__": 0,
"__sls__": "simple-ping",
"changes": {"ret": True},
"comment": "Module function test.ping executed",
"duration": 54.103,
"name": "test.ping",
"result": True,
"start_time": "15:35:31.005606",
}
},
},
},
"comment": (
"States ran successfully. Updating sub_minion, minion."
),
"duration": 1638.047,
"name": "call_sleep_state",
"result": True,
"start_time": "15:35:29.762657",
},
"salt_|-cmd_run_example_|-cmd.run_|-function": {
"__id__": "cmd_run_example",
"__jid__": "20200411195112288850",
"__run_num__": 1,
"__sls__": "orch.simple",
"changes": {
"out": "highstate",
"ret": {"minion": "file1\nfile2\nfile3"},
},
"comment": (
"Function ran successfully. Function cmd.run ran on minion."
),
"duration": 412.397,
"name": "cmd.run",
"result": True,
"start_time": "21:51:12.185868",
},
}
},
"outputter": "highstate",
"retcode": 0,
}
actual_output = highstate.output(data)
assert "Succeeded: 1 (changed=1)" in actual_output
assert "Failed: 0" in actual_output
assert "Success %: 100.0" in actual_output
assert "Failure %: 0.0" in actual_output
assert "Total states run: 1" in actual_output
assert " file2" in actual_output
| 3,476 |
5,169 | {
"name": "SwiftEvent",
"version": "1.0.1",
"summary": "A Event of Swift.",
"description": "It is a marquee view used on iOS, which implement by Swift.",
"homepage": "https://github.com/Dada7357/SwiftEvent",
"license": "MIT",
"authors": {
"dada7357": "<EMAIL>"
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/dada7357/SwiftEvent.git",
"tag": "1.0.1"
},
"source_files": "SwiftEvent/**/*.{h,swift}",
"exclude_files": "SwiftEventTest/**/*.*",
"public_header_files": "SwiftEvent/**/*.h",
"requires_arc": true
}
| 244 |
373 | <gh_stars>100-1000
/** @file
Register names for PCH PCI-E root port devices
Conventions:
- Register definition format:
Prefix_[GenerationName]_[ComponentName]_SubsystemName_RegisterSpace_RegisterName
- Prefix:
Definitions beginning with "R_" are registers
Definitions beginning with "B_" are bits within registers
Definitions beginning with "V_" are meaningful values within the bits
Definitions beginning with "S_" are register size
Definitions beginning with "N_" are the bit position
- [GenerationName]:
Three letter acronym of the generation is used .
Register name without GenerationName applies to all generations.
- [ComponentName]:
This field indicates the component name that the register belongs to (e.g. PCH, SA etc.)
Register name without ComponentName applies to all components.
Register that is specific to -H denoted by "_PCH_H_" in component name.
Register that is specific to -LP denoted by "_PCH_LP_" in component name.
- SubsystemName:
This field indicates the subsystem name of the component that the register belongs to
(e.g. PCIE, USB, SATA, GPIO, PMC etc.).
- RegisterSpace:
MEM - MMIO space register of subsystem.
IO - IO space register of subsystem.
PCR - Private configuration register of subsystem.
CFG - PCI configuration space register of subsystem.
- RegisterName:
Full register name.
Copyright (c) 2019 Intel Corporation. All rights reserved. <BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#ifndef _PCH_REGS_PCIE_H_
#define _PCH_REGS_PCIE_H_
//
// Number of PCIe ports per PCIe controller
//
#define PCH_PCIE_CONTROLLER_PORTS 4u
//
// PCH PCI Express Root Ports (D28:F0..7, D29:F0..7, D27:F0..7)
//
#define PCI_DEVICE_NUMBER_PCH_PCIE_DEVICE_1 28
#define PCI_DEVICE_NUMBER_PCH_PCIE_DEVICE_2 29
#define PCI_DEVICE_NUMBER_PCH_PCIE_DEVICE_3 27
#define PCI_DEVICE_NUMBER_PCH_PCIE_ROOT_PORTS 28
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_1 0
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_2 1
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_3 2
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_4 3
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_5 4
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_6 5
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_7 6
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_8 7
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_9 0
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_10 1
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_11 2
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_12 3
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_13 4
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_14 5
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_15 6
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_16 7
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_17 0
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_18 1
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_19 2
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_20 3
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_21 4
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_22 5
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_23 6
#define PCI_FUNCTION_NUMBER_PCH_PCIE_ROOT_PORT_24 7
#define V_PCH_PCIE_CFG_VENDOR_ID V_PCH_INTEL_VENDOR_ID
#define R_PCH_PCIE_CFG_CLIST 0x40
#define R_PCH_PCIE_CFG_XCAP (R_PCH_PCIE_CFG_CLIST + R_PCIE_XCAP_OFFSET)
#define R_PCH_PCIE_CFG_DCAP (R_PCH_PCIE_CFG_CLIST + R_PCIE_DCAP_OFFSET)
#define R_PCH_PCIE_CFG_DCTL (R_PCH_PCIE_CFG_CLIST + R_PCIE_DCTL_OFFSET)
#define R_PCH_PCIE_CFG_DSTS (R_PCH_PCIE_CFG_CLIST + R_PCIE_DSTS_OFFSET)
#define R_PCH_PCIE_CFG_LCAP (R_PCH_PCIE_CFG_CLIST + R_PCIE_LCAP_OFFSET)
#define B_PCH_PCIE_CFG_LCAP_PN 0xFF000000
#define N_PCH_PCIE_CFG_LCAP_PN 24
#define R_PCH_PCIE_CFG_LCTL (R_PCH_PCIE_CFG_CLIST + R_PCIE_LCTL_OFFSET)
#define R_PCH_PCIE_CFG_LSTS (R_PCH_PCIE_CFG_CLIST + R_PCIE_LSTS_OFFSET)
#define R_PCH_PCIE_CFG_SLCAP (R_PCH_PCIE_CFG_CLIST + R_PCIE_SLCAP_OFFSET)
#define R_PCH_PCIE_CFG_SLCTL (R_PCH_PCIE_CFG_CLIST + R_PCIE_SLCTL_OFFSET)
#define R_PCH_PCIE_CFG_SLSTS (R_PCH_PCIE_CFG_CLIST + R_PCIE_SLSTS_OFFSET)
#define R_PCH_PCIE_CFG_RCTL (R_PCH_PCIE_CFG_CLIST + R_PCIE_RCTL_OFFSET)
#define R_PCH_PCIE_CFG_RSTS (R_PCH_PCIE_CFG_CLIST + R_PCIE_RSTS_OFFSET)
#define R_PCH_PCIE_CFG_DCAP2 (R_PCH_PCIE_CFG_CLIST + R_PCIE_DCAP2_OFFSET)
#define R_PCH_PCIE_CFG_DCTL2 (R_PCH_PCIE_CFG_CLIST + R_PCIE_DCTL2_OFFSET)
#define R_PCH_PCIE_CFG_LCTL2 (R_PCH_PCIE_CFG_CLIST + R_PCIE_LCTL2_OFFSET)
#define R_PCH_PCIE_CFG_LSTS2 (R_PCH_PCIE_CFG_CLIST + R_PCIE_LSTS2_OFFSET)
#define R_PCH_PCIE_CFG_MID 0x80
#define S_PCH_PCIE_CFG_MID 2
#define R_PCH_PCIE_CFG_MC 0x82
#define S_PCH_PCIE_CFG_MC 2
#define R_PCH_PCIE_CFG_MA 0x84
#define S_PCH_PCIE_CFG_MA 4
#define R_PCH_PCIE_CFG_MD 0x88
#define S_PCH_PCIE_CFG_MD 2
#define R_PCH_PCIE_CFG_SVCAP 0x90
#define S_PCH_PCIE_CFG_SVCAP 2
#define R_PCH_PCIE_CFG_SVID 0x94
#define S_PCH_PCIE_CFG_SVID 4
#define R_PCH_PCIE_CFG_PMCAP 0xA0
#define R_PCH_PCIE_CFG_PMCS (R_PCH_PCIE_CFG_PMCAP + R_PCIE_PMCS_OFFST)
#define R_PCH_PCIE_CFG_CCFG 0xD0
#define B_PCH_PCIE_CFG_CCFG_UNRS (BIT6 | BIT5 | BIT4)
#define N_PCH_PCIE_CFG_CCFG_UNRS 4
#define R_PCH_PCIE_CFG_MPC2 0xD4
#define S_PCH_PCIE_CFG_MPC2 4
#define B_PCH_PCIE_CFG_MPC2_PTNFAE BIT12
#define B_PCH_PCIE_CFG_MPC2_LSTP BIT6
#define B_PCH_PCIE_CFG_MPC2_IEIME BIT5
#define B_PCH_PCIE_CFG_MPC2_ASPMCOEN BIT4
#define B_PCH_PCIE_CFG_MPC2_ASPMCO (BIT3 | BIT2)
#define V_PCH_PCIE_CFG_MPC2_ASPMCO_DISABLED 0
#define V_PCH_PCIE_CFG_MPC2_ASPMCO_L0S (1 << 2)
#define V_PCH_PCIE_CFG_MPC2_ASPMCO_L1 (2 << 2)
#define V_PCH_PCIE_CFG_MPC2_ASPMCO_L0S_L1 (3 << 2)
#define B_PCH_PCIE_CFG_MPC2_EOIFD BIT1
#define R_PCH_PCIE_CFG_MPC 0xD8
#define S_PCH_PCIE_CFG_MPC 4
#define B_PCH_PCIE_CFG_MPC_PMCE BIT31
#define B_PCH_PCIE_CFG_MPC_HPCE BIT30
#define B_PCH_PCIE_CFG_MPC_MMBNCE BIT27
#define B_PCH_PCIE_CFG_MPC_P8XDE BIT26
#define B_PCH_PCIE_CFG_MPC_IRRCE BIT25
#define B_PCH_PCIE_CFG_MPC_SRL BIT23
#define B_PCH_PCIE_CFG_MPC_UCEL (BIT20 | BIT19 | BIT18)
#define N_PCH_PCIE_CFG_MPC_UCEL 18
#define B_PCH_PCIE_CFG_MPC_CCEL (BIT17 | BIT16 | BIT15)
#define N_PCH_PCIE_CFG_MPC_CCEL 15
#define B_PCH_PCIE_CFG_MPC_PCIESD (BIT14 | BIT13)
#define N_PCH_PCIE_CFG_MPC_PCIESD 13
#define V_PCH_PCIE_CFG_MPC_PCIESD_GEN1 1
#define V_PCH_PCIE_CFG_MPC_PCIESD_GEN2 2
#define B_PCH_PCIE_CFG_MPC_MCTPSE BIT3
#define B_PCH_PCIE_CFG_MPC_HPME BIT1
#define N_PCH_PCIE_CFG_MPC_HPME 1
#define B_PCH_PCIE_CFG_MPC_PMME BIT0
#define R_PCH_PCIE_CFG_SMSCS 0xDC
#define S_PCH_PCIE_CFG_SMSCS 4
#define B_PCH_PCIE_CFG_SMSCS_PMCS BIT31
#define N_PCH_PCIE_CFG_SMSCS_LERSMIS 5
#define N_PCH_PCIE_CFG_SMSCS_HPLAS 4
#define N_PCH_PCIE_CFG_SMSCS_HPPDM 1
#define R_PCH_PCIE_CFG_RPDCGEN 0xE1
#define S_PCH_PCIE_CFG_RPDCGEN 1
#define B_PCH_PCIE_CFG_RPDCGEN_RPSCGEN BIT7
#define B_PCH_PCIE_CFG_RPDCGEN_PTOCGE BIT6
#define B_PCH_PCIE_CFG_RPDCGEN_LCLKREQEN BIT5
#define B_PCH_PCIE_CFG_RPDCGEN_BBCLKREQEN BIT4
#define B_PCH_PCIE_CFG_RPDCGEN_SRDBCGEN BIT2
#define B_PCH_PCIE_CFG_RPDCGEN_RPDLCGEN BIT1
#define B_PCH_PCIE_CFG_RPDCGEN_RPDBCGEN BIT0
#define R_PCH_PCIE_CFG_PWRCTL 0xE8
#define B_PCH_PCIE_CFG_PWRCTL_LTSSMRTC BIT20
#define B_PCH_PCIE_CFG_PWRCTL_WPDMPGEP BIT17
#define B_PCH_PCIE_CFG_PWRCTL_DBUPI BIT15
#define B_PCH_PCIE_CFG_PWRCTL_TXSWING BIT13
#define B_PCH_PCIE_CFG_PWRCTL_RPL1SQPOL BIT1
#define B_PCH_PCIE_CFG_PWRCTL_RPDTSQPOL BIT0
#define R_PCH_PCIE_CFG_DC 0xEC
#define B_PCH_PCIE_CFG_DC_PCIBEM BIT2
#define R_PCH_PCIE_CFG_PHYCTL2 0xF5
#define B_PCH_PCIE_CFG_PHYCTL2_TDFT (BIT7 | BIT6)
#define B_PCH_PCIE_CFG_PHYCTL2_TXCFGCHGWAIT (BIT5 | BIT4)
#define N_PCH_PCIE_CFG_PHYCTL2_TXCFGCHGWAIT 4
#define B_PCH_PCIE_CFG_PHYCTL2_PXPG3PLLOFFEN BIT1
#define B_PCH_PCIE_CFG_PHYCTL2_PXPG2PLLOFFEN BIT0
#define R_PCH_PCIE_CFG_IOSFSBCS 0xF7
#define B_PCH_PCIE_CFG_IOSFSBCS_SCPTCGE BIT6
#define B_PCH_PCIE_CFG_IOSFSBCS_SIID (BIT3 | BIT2)
#define R_PCH_PCIE_CFG_STRPFUSECFG 0xFC
#define B_PCH_PCIE_CFG_STRPFUSECFG_PXIP (BIT27 | BIT26 | BIT25 | BIT24)
#define N_PCH_PCIE_CFG_STRPFUSECFG_PXIP 24
#define B_PCH_PCIE_CFG_STRPFUSECFG_RPC (BIT15 | BIT14)
#define V_PCH_PCIE_CFG_STRPFUSECFG_RPC_1_1_1_1 0
#define V_PCH_PCIE_CFG_STRPFUSECFG_RPC_2_1_1 1
#define V_PCH_PCIE_CFG_STRPFUSECFG_RPC_2_2 2
#define V_PCH_PCIE_CFG_STRPFUSECFG_RPC_4 3
#define N_PCH_PCIE_CFG_STRPFUSECFG_RPC 14
#define B_PCH_PCIE_CFG_STRPFUSECFG_MODPHYIOPMDIS BIT9
#define B_PCH_PCIE_CFG_STRPFUSECFG_PLLSHTDWNDIS BIT8
#define B_PCH_PCIE_CFG_STRPFUSECFG_STPGATEDIS BIT7
#define B_PCH_PCIE_CFG_STRPFUSECFG_ASPMDIS BIT6
#define B_PCH_PCIE_CFG_STRPFUSECFG_LDCGDIS BIT5
#define B_PCH_PCIE_CFG_STRPFUSECFG_LTCGDIS BIT4
#define B_PCH_PCIE_CFG_STRPFUSECFG_CDCGDIS BIT3
#define B_PCH_PCIE_CFG_STRPFUSECFG_DESKTOPMOB BIT2
//
//PCI Express Extended Capability Registers
//
#define R_PCH_PCIE_CFG_EXCAP_OFFSET 0x100
#define R_PCH_PCIE_CFG_EX_AECH 0x100 ///< Advanced Error Reporting Capability Header
#define V_PCH_PCIE_CFG_EX_AEC_CV 0x1
#define R_PCH_PCIE_CFG_EX_UEM (R_PCH_PCIE_CFG_EX_AECH + R_PCIE_EX_UEM_OFFSET) // Uncorrectable Error Mask
#define R_PCH_PCIE_CFG_EX_CES 0x110 ///< Correctable Error Status
#define B_PCH_PCIE_CFG_EX_CES_BD BIT7 ///< Bad DLLP Status
#define B_PCH_PCIE_CFG_EX_CES_BT BIT6 ///< Bad TLP Status
#define B_PCH_PCIE_CFG_EX_CES_RE BIT0 ///< Receiver Error Status
//CES.RE, CES.BT, CES.BD
#define R_PCH_PCIE_CFG_EX_ACSECH 0x140 ///< ACS Extended Capability Header
#define V_PCH_PCIE_CFG_EX_ACS_CV 0x1
#define R_PCH_PCIE_CFG_EX_ACSCAPR (R_PCH_PCIE_CFG_EX_ACSECH + R_PCIE_EX_ACSCAPR_OFFSET)
#define R_PCH_PCIE_CFG_EX_L1SECH 0x200 ///< L1 Sub-States Extended Capability Header
#define V_PCH_PCIE_CFG_EX_L1S_CV 0x1
#define R_PCH_PCIE_CFG_EX_L1SCAP (R_PCH_PCIE_CFG_EX_L1SECH + R_PCIE_EX_L1SCAP_OFFSET)
#define R_PCH_PCIE_CFG_EX_L1SCTL1 (R_PCH_PCIE_CFG_EX_L1SECH + R_PCIE_EX_L1SCTL1_OFFSET)
#define R_PCH_PCIE_CFG_EX_L1SCTL2 (R_PCH_PCIE_CFG_EX_L1SECH + R_PCIE_EX_L1SCTL2_OFFSET)
#define R_PCH_PCIE_CFG_EX_SPEECH 0x220 ///< Secondary PCI Express Extended Capability Header
#define V_PCH_PCIE_CFG_EX_SPEECH_CV 0x1
#define R_PCH_PCIE_CFG_EX_LCTL3 (R_PCH_PCIE_CFG_EX_SPEECH + R_PCIE_EX_LCTL3_OFFSET)
#define R_PCH_PCIE_CFG_EX_LES (R_PCH_PCIE_CFG_EX_SPEECH + R_PCIE_EX_LES_OFFSET)
#define R_PCH_PCIE_CFG_EX_LECTL (R_PCH_PCIE_CFG_EX_SPEECH + R_PCIE_EX_L01EC_OFFSET)
#define B_PCH_PCIE_CFG_EX_LECTL_UPTPH (BIT14 | BIT13 | BIT12)
#define N_PCH_PCIE_CFG_EX_LECTL_UPTPH 12
#define B_PCH_PCIE_CFG_EX_LECTL_UPTP 0x0F00
#define N_PCH_PCIE_CFG_EX_LECTL_UPTP 8
#define B_PCH_PCIE_CFG_EX_LECTL_DPTPH (BIT6 | BIT5 | BIT4)
#define N_PCH_PCIE_CFG_EX_LECTL_DPTPH 4
#define B_PCH_PCIE_CFG_EX_LECTL_DPTP 0x000F
#define N_PCH_PCIE_CFG_EX_LECTL_DPTP 0
#define R_PCH_PCIE_CFG_EX_L01EC (R_PCH_PCIE_CFG_EX_SPEECH + R_PCIE_EX_L01EC_OFFSET)
#define R_PCH_PCIE_CFG_EX_L23EC (R_PCH_PCIE_CFG_EX_SPEECH + R_PCIE_EX_L23EC_OFFSET)
#define R_PCH_PCIE_CFG_PCIERTP1 0x300
#define R_PCH_PCIE_CFG_PCIERTP2 0x304
#define R_PCH_PCIE_CFG_PCIENFTS 0x314
#define R_PCH_PCIE_CFG_PCIEL0SC 0x318
#define R_PCH_PCIE_CFG_PCIECFG2 0x320
#define B_PCH_PCIE_CFG_PCIECFG2_LBWSSTE BIT30
#define B_PCH_PCIE_CFG_PCIECFG2_RLLG3R BIT27
#define B_PCH_PCIE_CFG_PCIECFG2_CROAOV BIT24
#define B_PCH_PCIE_CFG_PCIECFG2_CROAOE BIT23
#define B_PCH_PCIE_CFG_PCIECFG2_CRSREN BIT22
#define B_PCH_PCIE_CFG_PCIECFG2_PMET (BIT21 | BIT20)
#define V_PCH_PCIE_CFG_PCIECFG2_PMET 1
#define N_PCH_PCIE_CFG_PCIECFG2_PMET 20
#define R_PCH_PCIE_CFG_PCIEDBG 0x324
#define B_PCH_PCIE_CFG_PCIEDBG_LBWSSTE BIT30
#define B_PCH_PCIE_CFG_PCIEDBG_USSP (BIT27 | BIT26)
#define B_PCH_PCIE_CFG_PCIEDBG_LGCLKSQEXITDBTIMERS (BIT25 | BIT24)
#define B_PCH_PCIE_CFG_PCIEDBG_CTONFAE BIT14
#define B_PCH_PCIE_CFG_PCIEDBG_SQOL0 BIT7
#define B_PCH_PCIE_CFG_PCIEDBG_SPCE BIT5
#define B_PCH_PCIE_CFG_PCIEDBG_LR BIT4
#define R_PCH_PCIE_CFG_PCIESTS1 0x328
#define B_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE 0xFF000000
#define N_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE 24
#define V_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE_DETRDY 0x01
#define V_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE_DETRDYECINP1CG 0x0E
#define V_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE_L0 0x33
#define V_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE_DISWAIT 0x5E
#define V_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE_DISWAITPG 0x60
#define V_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE_RECOVERYSPEEDREADY 0x6C
#define V_PCH_PCIE_CFG_PCIESTS1_LTSMSTATE_RECOVERYLNK2DETECT 0x6F
#define B_PCH_PCIE_CFG_PCIESTS1_LNKSTAT (BIT22 | BIT21 | BIT20 | BIT19)
#define N_PCH_PCIE_CFG_PCIESTS1_LNKSTAT 19
#define V_PCH_PCIE_CFG_PCIESTS1_LNKSTAT_L0 0x7
#define R_PCH_PCIE_CFG_PCIESTS2 0x32C
#define B_PCH_PCIE_CFG_PCIESTS2_P4PNCCWSSCMES BIT31
#define B_PCH_PCIE_CFG_PCIESTS2_P3PNCCWSSCMES BIT30
#define B_PCH_PCIE_CFG_PCIESTS2_P2PNCCWSSCMES BIT29
#define B_PCH_PCIE_CFG_PCIESTS2_P1PNCCWSSCMES BIT28
#define B_PCH_PCIE_CFG_PCIESTS2_CLRE 0x0000F000
#define N_PCH_PCIE_CFG_PCIESTS2_CLRE 12
#define R_PCH_PCIE_CFG_PCIEALC 0x338
#define B_PCH_PCIE_CFG_PCIEALC_ITLRCLD BIT29
#define B_PCH_PCIE_CFG_PCIEALC_ILLRCLD BIT28
#define B_PCH_PCIE_CFG_PCIEALC_BLKDQDA BIT26
#define R_PCH_PCIE_CFG_LTROVR 0x400
#define B_PCH_PCIE_CFG_LTROVR_LTRNSROVR BIT31 ///< LTR Non-Snoop Requirement Bit Override
#define B_PCH_PCIE_CFG_LTROVR_LTRSROVR BIT15 ///< LTR Snoop Requirement Bit Override
#define R_PCH_PCIE_CFG_LTROVR2 0x404
#define B_PCH_PCIE_CFG_LTROVR2_FORCE_OVERRIDE BIT3 ///< LTR Force Override Enable
#define B_PCH_PCIE_CFG_LTROVR2_LOCK BIT2 ///< LTR Override Lock
#define B_PCH_PCIE_CFG_LTROVR2_LTRNSOVREN BIT1 ///< LTR Non-Snoop Override Enable
#define B_PCH_PCIE_CFG_LTROVR2_LTRSOVREN BIT0 ///< LTR Snoop Override Enable
#define R_PCH_PCIE_CFG_PHYCTL4 0x408
#define B_PCH_PCIE_CFG_PHYCTL4_SQDIS BIT27
#define R_PCH_PCIE_CFG_PCIEPMECTL 0x420
#define B_PCH_PCIE_CFG_PCIEPMECTL_DLSULPPGE BIT30
#define B_PCH_PCIE_CFG_PCIEPMECTL_L1LE BIT17
#define B_PCH_PCIE_CFG_PCIEPMECTL_L1FSOE BIT0
#define R_PCH_PCIE_CFG_PCIEPMECTL2 0x424
#define B_PCH_PCIE_CFG_PCIEPMECTL2_PHYCLPGE BIT11
#define B_PCH_PCIE_CFG_PCIEPMECTL2_FDCPGE BIT8
#define B_PCH_PCIE_CFG_PCIEPMECTL2_DETSCPGE BIT7
#define B_PCH_PCIE_CFG_PCIEPMECTL2_L23RDYSCPGE BIT6
#define B_PCH_PCIE_CFG_PCIEPMECTL2_DISSCPGE BIT5
#define B_PCH_PCIE_CFG_PCIEPMECTL2_L1SCPGE BIT4
#define R_PCH_PCIE_CFG_PCE 0x428
#define B_PCH_PCIE_CFG_PCE_HAE BIT5
#define B_PCH_PCIE_CFG_PCE_PMCRE BIT0
#define R_PCH_PCIE_CFG_EQCFG1 0x450
#define S_PCH_PCIE_CFG_EQCFG1 4
#define B_PCH_PCIE_CFG_EQCFG1_REC 0xFF000000
#define N_PCH_PCIE_CFG_EQCFG1_REC 24
#define B_PCH_PCIE_CFG_EQCFG1_REIFECE BIT23
#define N_PCH_PCIE_CFG_EQCFG1_LERSMIE 21
#define B_PCH_PCIE_CFG_EQCFG1_LEP23B BIT18
#define B_PCH_PCIE_CFG_EQCFG1_LEP3B BIT17
#define B_PCH_PCIE_CFG_EQCFG1_RTLEPCEB BIT16
#define B_PCH_PCIE_CFG_EQCFG1_RTPCOE BIT15
#define B_PCH_PCIE_CFG_EQCFG1_HPCMQE BIT13
#define B_PCH_PCIE_CFG_EQCFG1_HAED BIT12
#define B_PCH_PCIE_CFG_EQCFG1_EQTS2IRRC BIT7
#define B_PCH_PCIE_CFG_EQCFG1_TUPP BIT1
#define R_PCH_PCIE_CFG_RTPCL1 0x454
#define B_PCH_PCIE_CFG_RTPCL1_PCM BIT31
#define B_PCH_PCIE_CFG_RTPCL1_RTPRECL2PL4 0x3F000000
#define B_PCH_PCIE_CFG_RTPCL1_RTPOSTCL1PL3 0xFC0000
#define B_PCH_PCIE_CFG_RTPCL1_RTPRECL1PL2 0x3F000
#define B_PCH_PCIE_CFG_RTPCL1_RTPOSTCL0PL1 0xFC0
#define B_PCH_PCIE_CFG_RTPCL1_RTPRECL0PL0 0x3F
#define R_PCH_PCIE_CFG_RTPCL2 0x458
#define B_PCH_PCIE_CFG_RTPCL2_RTPOSTCL3PL 0x3F000
#define B_PCH_PCIE_CFG_RTPCL2_RTPRECL3PL6 0xFC0
#define B_PCH_PCIE_CFG_RTPCL2_RTPOSTCL2PL5 0x3F
#define R_PCH_PCIE_CFG_RTPCL3 0x45C
#define B_PCH_PCIE_CFG_RTPCL3_RTPRECL7 0x3F000000
#define B_PCH_PCIE_CFG_RTPCL3_RTPOSTCL6 0xFC0000
#define B_PCH_PCIE_CFG_RTPCL3_RTPRECL6 0x3F000
#define B_PCH_PCIE_CFG_RTPCL3_RTPOSTCL5 0xFC0
#define B_PCH_PCIE_CFG_RTPCL3_RTPRECL5PL10 0x3F
#define R_PCH_PCIE_CFG_RTPCL4 0x460
#define B_PCH_PCIE_CFG_RTPCL4_RTPOSTCL9 0x3F000000
#define B_PCH_PCIE_CFG_RTPCL4_RTPRECL9 0xFC0000
#define B_PCH_PCIE_CFG_RTPCL4_RTPOSTCL8 0x3F000
#define B_PCH_PCIE_CFG_RTPCL4_RTPRECL8 0xFC0
#define B_PCH_PCIE_CFG_RTPCL4_RTPOSTCL7 0x3F
#define R_PCH_PCIE_CFG_FOMS 0x464
#define B_PCH_PCIE_CFG_FOMS_I (BIT30 | BIT29)
#define N_PCH_PCIE_CFG_FOMS_I 29
#define B_PCH_PCIE_CFG_FOMS_LN 0x1F000000
#define N_PCH_PCIE_CFG_FOMS_LN 24
#define B_PCH_PCIE_CFG_FOMS_FOMSV 0x00FFFFFF
#define B_PCH_PCIE_CFG_FOMS_FOMSV0 0x000000FF
#define N_PCH_PCIE_CFG_FOMS_FOMSV0 0
#define B_PCH_PCIE_CFG_FOMS_FOMSV1 0x0000FF00
#define N_PCH_PCIE_CFG_FOMS_FOMSV1 8
#define B_PCH_PCIE_CFG_FOMS_FOMSV2 0x00FF0000
#define N_PCH_PCIE_CFG_FOMS_FOMSV2 16
#define R_PCH_PCIE_CFG_HAEQ 0x468
#define B_PCH_PCIE_CFG_HAEQ_HAPCCPI (BIT31 | BIT30 | BIT29 | BIT28)
#define N_PCH_PCIE_CFG_HAEQ_HAPCCPI 28
#define B_PCH_PCIE_CFG_HAEQ_MACFOMC BIT19
#define R_PCH_PCIE_CFG_LTCO1 0x470
#define B_PCH_PCIE_CFG_LTCO1_L1TCOE BIT25
#define B_PCH_PCIE_CFG_LTCO1_L0TCOE BIT24
#define B_PCH_PCIE_CFG_LTCO1_L1TPOSTCO 0xFC0000
#define N_PCH_PCIE_CFG_LTCO1_L1TPOSTCO 18
#define B_PCH_PCIE_CFG_LTCO1_L1TPRECO 0x3F000
#define N_PCH_PCIE_CFG_LTCO1_L1TPRECO 12
#define B_PCH_PCIE_CFG_LTCO1_L0TPOSTCO 0xFC0
#define N_PCH_PCIE_CFG_LTCO1_L0TPOSTCO 6
#define B_PCH_PCIE_CFG_LTCO1_L0TPRECO 0x3F
#define N_PCH_PCIE_CFG_LTCO1_L0TPRECO 0
#define R_PCH_PCIE_CFG_LTCO2 0x474
#define B_PCH_PCIE_CFG_LTCO2_L3TCOE BIT25
#define B_PCH_PCIE_CFG_LTCO2_L2TCOE BIT24
#define B_PCH_PCIE_CFG_LTCO2_L3TPOSTCO 0xFC0000
#define B_PCH_PCIE_CFG_LTCO2_L3TPRECO 0x3F000
#define B_PCH_PCIE_CFG_LTCO2_L2TPOSTCO 0xFC0
#define B_PCH_PCIE_CFG_LTCO2_L2TPRECO 0x3F
#define R_PCH_PCIE_CFG_G3L0SCTL 0x478
#define B_PCH_PCIE_CFG_G3L0SCTL_G3UCNFTS 0x0000FF00
#define B_PCH_PCIE_CFG_G3L0SCTL_G3CCNFTS 0x000000FF
#define R_PCH_PCIE_CFG_EQCFG2 0x47C
#define B_PCH_PCIE_CFG_EQCFG2_NTIC 0xFF000000
#define B_PCH_PCIE_CFG_EQCFG2_EMD BIT23
#define B_PCH_PCIE_CFG_EQCFG2_NTSS (BIT22 | BIT21 | BIT20)
#define B_PCH_PCIE_CFG_EQCFG2_PCET (BIT19 | BIT18 | BIT17 | BIT16)
#define N_PCH_PCIE_CFG_EQCFG2_PCET 16
#define B_PCH_PCIE_CFG_EQCFG2_HAPCSB (BIT15 | BIT14 | BIT13 | BIT12)
#define N_PCH_PCIE_CFG_EQCFG2_HAPCSB 12
#define B_PCH_PCIE_CFG_EQCFG2_NTEME BIT11
#define B_PCH_PCIE_CFG_EQCFG2_MPEME BIT10
#define B_PCH_PCIE_CFG_EQCFG2_REWMETM (BIT9 | BIT8)
#define B_PCH_PCIE_CFG_EQCFG2_REWMET 0xFF
#define R_PCH_PCIE_CFG_MM 0x480
#define B_PCH_PCIE_CFG_MM_MSST 0xFFFFFF00
#define N_PCH_PCIE_CFG_MM_MSST 8
#define B_PCH_PCIE_CFG_MM_MSS 0xFF
//
// PCIE PCRs (PID:SPA SPB SPC SPD SPE SPF)
//
#define R_SPX_PCR_PCD 0 ///< Port configuration and disable
#define B_SPX_PCR_PCD_RP1FN (BIT2 | BIT1 | BIT0) ///< Port 1 Function Number
#define B_SPX_PCR_PCD_RP1CH BIT3 ///< Port 1 config hide
#define B_SPX_PCR_PCD_RP2FN (BIT6 | BIT5 | BIT4) ///< Port 2 Function Number
#define B_SPX_PCR_PCD_RP2CH BIT7 ///< Port 2 config hide
#define B_SPX_PCR_PCD_RP3FN (BIT10 | BIT9 | BIT8) ///< Port 3 Function Number
#define B_SPX_PCR_PCD_RP3CH BIT11 ///< Port 3 config hide
#define B_SPX_PCR_PCD_RP4FN (BIT14 | BIT13 | BIT12) ///< Port 4 Function Number
#define B_SPX_PCR_PCD_RP4CH BIT15 ///< Port 4 config hide
#define S_SPX_PCR_PCD_RP_FIELD 4 ///< 4 bits for each RP FN
#define B_SPX_PCR_PCD_P1D BIT16 ///< Port 1 disable
#define B_SPX_PCR_PCD_P2D BIT17 ///< Port 2 disable
#define B_SPX_PCR_PCD_P3D BIT18 ///< Port 3 disable
#define B_SPX_PCR_PCD_P4D BIT19 ///< Port 4 disable
#define B_SPX_PCR_PCD_SRL BIT31 ///< Secured Register Lock
#define R_SPX_PCR_PCIEHBP 0x0004 ///< PCI Express high-speed bypass
#define B_SPX_PCR_PCIEHBP_PCIEHBPME BIT0 ///< PCIe HBP mode enable
#define B_SPX_PCR_PCIEHBP_PCIEGMO (BIT2 | BIT1) ///< PCIe gen mode override
#define B_SPX_PCR_PCIEHBP_PCIETIL0O BIT3 ///< PCIe transmitter-in-L0 override
#define B_SPX_PCR_PCIEHBP_PCIERIL0O BIT4 ///< PCIe receiver-in-L0 override
#define B_SPX_PCR_PCIEHBP_PCIELRO BIT5 ///< PCIe link recovery override
#define B_SPX_PCR_PCIEHBP_PCIELDO BIT6 ///< PCIe link down override
#define B_SPX_PCR_PCIEHBP_PCIESSM BIT7 ///< PCIe SKP suppression mode
#define B_SPX_PCR_PCIEHBP_PCIESST BIT8 ///< PCIe suppress SKP transmission
#define B_SPX_PCR_PCIEHBP_PCIEHBPPS (BIT13 | BIT12) ///< PCIe HBP port select
#define B_SPX_PCR_PCIEHBP_CRCSEL (BIT15 | BIT14) ///< CRC select
#define B_SPX_PCR_PCIEHBP_PCIEHBPCRC 0xFFFF0000 ///< PCIe HBP CRC
//
// ICC PCR (PID: ICC)
//
#define R_ICC_PCR_TMCSRCCLK 0x1000 ///< Timing Control SRC Clock Register
#define R_ICC_PCR_TMCSRCCLK2 0x1004 ///< Timing Control SRC Clock Register 2
#define R_ICC_PCR_MSKCKRQ 0x100C ///< Mask Control CLKREQ
#endif
| 17,754 |
953 | /*! @file
@brief WSHインタフェースオブジェクト基本クラス
@date 2009.10.29 syat CWSH.cppから切り出し
*/
/*
Copyright (C) 2002, 鬼, genta
Copyright (C) 2003, FILE
Copyright (C) 2004, genta
Copyright (C) 2005, FILE, zenryaku
Copyright (C) 2009, syat
Copyright (C) 2018-2021, Sakura Editor Organization
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented;
you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment
in the product documentation would be appreciated but is
not required.
2. Altered source versions must be plainly marked as such,
and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
*/
#include "StdAfx.h"
#include <memory>
#include "macro/CWSHIfObj.h"
#include "macro/CSMacroMgr.h" // MacroFuncInfo
#include "Funccode_enum.h" // EFunctionCode::FA_FROMMACRO
//コマンド・関数を準備する
void CWSHIfObj::ReadyMethods( CEditView* pView, int flags )
{
this->m_pView = pView;
// 2007.07.20 genta : コマンドに混ぜ込むフラグを渡す
ReadyCommands(GetMacroCommandInfo(), flags | FA_FROMMACRO );
ReadyCommands(GetMacroFuncInfo(), 0);
/* CWSHIfObjを継承したサブクラスからReadyMethodsを呼び出した場合、
* サブクラスのGetMacroCommandInfo,GetMacroFuncInfoが呼び出される。 */
}
/** WSHマクロエンジンへコマンド登録を行う
@date 2007.07.20 genta flags追加.flagはコマンド登録段階で混ぜておく.
*/
void CWSHIfObj::ReadyCommands(MacroFuncInfo *Info, int flags)
{
while(Info->m_nFuncID != -1) // Aug. 29, 2002 genta 番人の値が変更されたのでここも変更
{
wchar_t FuncName[256];
wcscpy(FuncName, Info->m_pszFuncName);
int ArgCount = 0;
if( Info->m_pData ){
ArgCount = Info->m_pData->m_nArgMinSize;
}else{
for(int i = 0; i < 4; ++i){
if(Info->m_varArguments[i] != VT_EMPTY)
++ArgCount;
}
}
VARTYPE* varArgTmp = NULL;
VARTYPE* varArg = Info->m_varArguments;
if( 4 < ArgCount ){
varArgTmp = varArg = new VARTYPE[ArgCount];
for( int i = 0; i < ArgCount; i++ ){
if( i < 4 ){
varArg[i] = Info->m_varArguments[i];
}else{
varArg[i] = Info->m_pData->m_pVarArgEx[i-4];
}
}
}
// 2007.07.21 genta : flagを加えた値を登録する
this->AddMethod(
FuncName,
(Info->m_nFuncID | flags),
varArg,
ArgCount,
Info->m_varResult,
reinterpret_cast<CIfObjMethod>(&CWSHIfObj::MacroCommand)
/* CWSHIfObjを継承したサブクラスからReadyCommandsを呼び出した場合、
* サブクラスのMacroCommandが呼び出される。 */
);
delete [] varArgTmp;
++Info;
}
}
/*!
マクロコマンドの実行
@date 2005.06.27 zenryaku 戻り値の受け取りが無くてもエラーにせずに関数を実行する
@date 2013.06.07 Moca 5つ以上の引数の時ずれるのを修正。NULを含む文字列対応
*/
HRESULT CWSHIfObj::MacroCommand(int IntID, DISPPARAMS *Arguments, VARIANT* Result, void *Data)
{
int I;
int ArgCount = Arguments->cArgs;
const EFunctionCode ID = static_cast<EFunctionCode>(IntID);
// 2007.07.22 genta : コマンドは下位16ビットのみ
if(LOWORD(ID) >= F_FUNCTION_FIRST)
{
VARIANT ret; // 2005.06.27 zenryaku 戻り値の受け取りが無くても関数を実行する
VariantInit(&ret);
// 2011.3.18 syat 引数の順序を正しい順にする
auto rgvargParam = std::make_unique<VARIANTARG[]>(ArgCount);
for(I = 0; I < ArgCount; I++){
::VariantInit(&rgvargParam[ArgCount - I - 1]);
::VariantCopy(&rgvargParam[ArgCount - I - 1], &Arguments->rgvarg[I]);
}
// 2009.9.5 syat HandleFunctionはサブクラスでオーバーライドする
bool r = HandleFunction(m_pView, ID, &rgvargParam[0], ArgCount, ret);
if(Result) {::VariantCopyInd(Result, &ret);}
VariantClear(&ret);
for(I = 0; I < ArgCount; I++){
::VariantClear(&rgvargParam[I]);
}
return r ? S_OK : E_FAIL;
}
else
{
// 最低4つは確保
int argCountMin = t_max(4, ArgCount);
// Nov. 29, 2005 FILE 引数を文字列で取得する
auto StrArgs = std::make_unique<LPWSTR[]>(argCountMin);
auto strLengths = std::make_unique<int[]>(argCountMin);
for(I = ArgCount; I < argCountMin; I++ ){
StrArgs[I] = NULL;
strLengths[I] = 0;
}
WCHAR *S = NULL; // 初期化必須
Variant varCopy; // VT_BYREFだと困るのでコピー用
int Len;
for(I = 0; I < ArgCount; ++I)
{
if(VariantChangeType(&varCopy.Data, &(Arguments->rgvarg[I]), 0, VT_BSTR) == S_OK)
{
Wrap(&varCopy.Data.bstrVal)->GetW(&S, &Len);
}
else
{
S = new WCHAR[1];
S[0] = 0;
Len = 0;
}
StrArgs[ArgCount - I - 1] = S; // DISPPARAMSは引数の順序が逆転しているため正しい順に直す
strLengths[ArgCount - I - 1] = Len;
}
// 2009.10.29 syat HandleCommandはサブクラスでオーバーライドする
HandleCommand(m_pView, ID, const_cast<WCHAR const **>(&StrArgs[0]), &strLengths[0], ArgCount);
// Nov. 29, 2005 FILE 配列の破棄なので、[括弧]を追加
for(int J = 0; J < ArgCount; ++J)
delete [] StrArgs[J];
return S_OK;
}
}
| 2,487 |
1,257 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from block_zoo.BaseLayer import BaseLayer, BaseConf
from utils.DocInherit import DocInherit
class LSTMCharEmbeddingConf(BaseConf):
""" Configuration of LSTMCharEmbedding
Args:
dim (int, optional): the dimension of character embedding after lstm. Default: 50
embedding_matrix_dim(int, optional): the dimension of character initialized embedding. Default: 30
padding(int, optional): Zero-padding added to both sides of the input. Default: 0
dropout(float, optional): dropout rate. Default: 0.2
bidirect_flag(Bool, optional): Using BiLSTM or not. Default: True
"""
def __init__(self, **kwargs):
super(LSTMCharEmbeddingConf, self).__init__(**kwargs)
@DocInherit
def default(self):
self.dim = 50 # lstm's output channel dim
self.embedding_matrix_dim = 30
self.padding = 0
self.dropout = 0.2
self.bidirect_flag = True
@DocInherit
def declare(self):
#self.input_channel_num = 1
self.num_of_inputs = 1
self.input_ranks = [3]
@DocInherit
def inference(self):
#self.output_channel_num = self.hidden_dim
self.output_rank = 3
@DocInherit
def verify(self):
# super(LSTMCharEmbeddingConf, self).verify()
necessary_attrs_for_user = ['embedding_matrix_dim', 'dim', 'dropout', 'bidirect_flag', 'vocab_size']
for attr in necessary_attrs_for_user:
self.add_attr_exist_assertion_for_user(attr)
class LSTMCharEmbedding(BaseLayer):
"""
This layer implements the character embedding use LSTM
Args:
layer_conf (LSTMCharEmbeddingConf): configuration of LSTMCharEmbedding
"""
def __init__(self, layer_conf):
super(LSTMCharEmbedding, self).__init__(layer_conf)
self.layer_conf = layer_conf
self.char_embeddings = nn.Embedding(layer_conf.vocab_size, layer_conf.embedding_matrix_dim, padding_idx=self.layer_conf.padding)
nn.init.uniform_(self.char_embeddings.weight, -0.001, 0.001)
if layer_conf.bidirect_flag:
self.dim = layer_conf.dim // 2
self.dropout = nn.Dropout(layer_conf.dropout)
self.char_lstm = nn.LSTM(layer_conf.embedding_matrix_dim, self.dim, num_layers=1, batch_first=True, bidirectional=layer_conf.bidirect_flag)
if self.is_cuda():
self.char_embeddings = self.char_embeddings.cuda()
self.dropout = self.dropout.cuda()
self.char_lstm = self.char_lstm.cuda()
def forward(self, string):
"""
Step1: [batch_size, seq_len, char num in words] -> [batch_size*seq_len, char num in words]
Step2: lookup embedding matrix -> [batch_size*seq_len, char num in words, embedding_dim]
Step3: after lstm operation, got [num_layer* num_directions, batch_size * seq_len, dim]
Step5: reshape -> [batch_size, seq_len, dim]
Args:
string (Variable): [[char ids of word1], [char ids of word2], [...], ...], shape: [batch_size, seq_len, char num in words]
Returns:
Variable: [batch_size, seq_len, output_dim]
"""
#print ('string shape: ', string.size())
string_reshaped = string.view(string.size()[0]*string.size()[1], -1) #[batch_size, seq_len * char num in words]
char_embs_lookup = self.char_embeddings(string_reshaped).float() # [batch_size, seq_len * char num in words, embedding_dim]
char_embs_drop = self.dropout(char_embs_lookup)
char_hidden = None
char_rnn_out, char_hidden = self.char_lstm(char_embs_drop, char_hidden)
#print('char_hidden shape: ', char_hidden[0].size())
string_out = char_hidden[0].transpose(1,0).contiguous().view(string.size()[0], string.size()[1], -1)
#print('string_out shape: ', string_out.size())
return string_out
if __name__ == '__main__':
conf = {
'embedding_matrix_dim': 30,
'dim': 30, # lstm's output channel dim
'padding': 0,
'dropout': 0.2,
'bidirect_flag': True,
# should be infered from the corpus
'vocab_size': 10,
'input_dims': [5],
'input_ranks': [3],
'use_gpu': True
}
layer_conf = LSTMCharEmbeddingConf(**conf)
# make a fake input: [bs, seq_len, char num in words]
# assume in this batch, the padded sentence length is 3 and the each word has 5 chars, including padding 0.
input_chars = np.array([
[[3, 1, 2, 5, 4], [1, 2, 3, 4, 0], [0, 0, 0, 0, 0]],
[[1, 1, 0, 0, 0], [2, 3, 1, 0, 0], [1, 2, 3, 4, 5]]
])
char_emb_layer = LSTMCharEmbedding(layer_conf)
input_chars = torch.LongTensor(input_chars)
output = char_emb_layer(input_chars)
print(output)
| 2,155 |
335 | <reponame>Safal08/Hacktoberfest-1<gh_stars>100-1000
{
"word": "Superimposition",
"definitions": [
"The action of placing or laying one thing over another, typically so that both are still evident."
],
"parts-of-speech": "Noun"
} | 97 |
318 | package com.wapchief.jpushim.greendao.model;
import org.greenrobot.greendao.annotation.Entity;
import org.greenrobot.greendao.annotation.Generated;
import org.greenrobot.greendao.annotation.Id;
/**
* Created by Wu on 2017/5/8 0008 下午 2:32.
* 描述:基础类
*/
@Entity
public class User {
@Id(autoincrement = true)
private Long id;
@Generated(hash = 1248599927)
public User(Long id) {
this.id = id;
}
@Generated(hash = 586692638)
public User() {
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
}
| 279 |
2,177 | <reponame>TheIdhem/cruise-control
/*
* Copyright 2021 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License"). See License in the project root for license information.
*/
package com.linkedin.kafka.cruisecontrol.detector;
import com.linkedin.kafka.cruisecontrol.KafkaCruiseControlUtils;
import com.linkedin.kafka.cruisecontrol.analyzer.ProvisionRecommendation;
import com.linkedin.kafka.cruisecontrol.analyzer.ProvisionStatus;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.regex.Pattern;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.CreatePartitionsResult;
import org.apache.kafka.clients.admin.DescribeTopicsResult;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.easymock.EasyMock;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static com.linkedin.kafka.cruisecontrol.KafkaCruiseControlUtils.CLIENT_REQUEST_TIMEOUT_MS;
import static com.linkedin.kafka.cruisecontrol.KafkaCruiseControlUtils.CompletionType.COMPLETED;
import static com.linkedin.kafka.cruisecontrol.servlet.handler.sync.RightsizeRequest.RECOMMENDER_UP;
import static org.junit.Assert.assertEquals;
/**
* A partition provisioner test based on using {@link BasicProvisioner} for partition provisioning.
*/
public class PartitionProvisionerTest extends AbstractProvisionerTest {
private static final AdminClient MOCK_ADMIN_CLIENT = EasyMock.createMock(AdminClient.class);
// Mocks for describe topics
private static final DescribeTopicsResult MOCK_DESCRIBE_TOPICS_RESULT = EasyMock.createMock(DescribeTopicsResult.class);
private static final KafkaFuture<TopicDescription> MOCK_TOPIC_DESCRIPTION_FUTURE = EasyMock.createMock(KafkaFuture.class);
// Mocks for create partitions
private static final CreatePartitionsResult MOCK_CREATE_PARTITIONS_RESULT = EasyMock.createMock(CreatePartitionsResult.class);
private static final KafkaFuture<Void> MOCK_CREATE_PARTITIONS_FUTURE = EasyMock.createMock(KafkaFuture.class);
private static final String MOCK_TOPIC = "mock-topic";
public static final List<Node> NODES = Collections.singletonList(new Node(0, "host0", 0));
private static final Cluster MOCK_KAFKA_CLUSTER;
static {
Set<PartitionInfo> partitions = Collections.singleton(new PartitionInfo(MOCK_TOPIC, 0, NODES.get(0), NODES.toArray(new Node[1]),
NODES.toArray(new Node[1])));
MOCK_KAFKA_CLUSTER = new Cluster("id", NODES, partitions, Collections.emptySet(), Collections.emptySet(), NODES.get(0));
}
private static final TopicPartitionInfo MOCK_TOPIC_PARTITION_INFO = new TopicPartitionInfo(0, NODES.get(0), NODES, NODES);
private static final TopicDescription MOCK_TOPIC_DESCRIPTION = new TopicDescription(MOCK_TOPIC, false,
Collections.singletonList(MOCK_TOPIC_PARTITION_INFO));
// Create resources for (1) the cluster and (2) each broker.
private static final Pattern MOCK_TOPIC_PATTERN = Pattern.compile(MOCK_TOPIC);
private static final int MOCK_IGNORED_PARTITION_COUNT = 1;
private static final int MOCK_PARTITION_COUNT = 2;
/**
* Execute before every test case.
*/
@Before
@Override
public void setUp() {
super.setUp();
EasyMock.expect(MOCK_KAFKA_CRUISE_CONTROL.adminClient()).andReturn(MOCK_ADMIN_CLIENT);
EasyMock.expect(MOCK_KAFKA_CRUISE_CONTROL.kafkaCluster()).andReturn(MOCK_KAFKA_CLUSTER);
EasyMock.replay(MOCK_KAFKA_CRUISE_CONTROL);
}
/**
* Execute after every test case.
*/
@After
public void teardown() {
EasyMock.verify(MOCK_KAFKA_CRUISE_CONTROL, MOCK_ADMIN_CLIENT, MOCK_DESCRIBE_TOPICS_RESULT, MOCK_TOPIC_DESCRIPTION_FUTURE,
MOCK_CREATE_PARTITIONS_FUTURE, MOCK_CREATE_PARTITIONS_RESULT);
EasyMock.reset(MOCK_KAFKA_CRUISE_CONTROL, MOCK_ADMIN_CLIENT, MOCK_DESCRIBE_TOPICS_RESULT, MOCK_TOPIC_DESCRIPTION_FUTURE,
MOCK_CREATE_PARTITIONS_FUTURE, MOCK_CREATE_PARTITIONS_RESULT);
}
@Test
public void testProvisionPartitionIncreaseConstructsCompletedResponse()
throws ExecutionException, InterruptedException, TimeoutException {
ProvisionerState.State expectedState = ProvisionerState.State.COMPLETED;
String expectedSummary = String.format("[Recommender-Under-Provisioned] Setting partition count by topic || Succeeded: {%s=%d}.",
MOCK_TOPIC, MOCK_PARTITION_COUNT);
assertProvisionPartitionIncreaseConstructsCorrectResponse(COMPLETED, expectedState, expectedSummary);
}
@Test
public void testProvisionPartitionIncreaseConstructsCompletedWithIgnoreResponse()
throws ExecutionException, InterruptedException, TimeoutException {
ProvisionerState.State expectedState = ProvisionerState.State.COMPLETED;
String expectedSummary = String.format("[Recommender-Under-Provisioned] Setting partition count by topic || Ignored: {%s=%d}.",
MOCK_TOPIC, MOCK_IGNORED_PARTITION_COUNT);
assertProvisionPartitionIncreaseConstructsCorrectResponse(KafkaCruiseControlUtils.CompletionType.NO_ACTION, expectedState, expectedSummary);
}
@Test
public void testProvisionPartitionIncreaseConstructsCompletedWithErrorResponse()
throws ExecutionException, InterruptedException, TimeoutException {
ProvisionerState.State expectedState = ProvisionerState.State.COMPLETED_WITH_ERROR;
String expectedSummary = String.format("[Recommender-Under-Provisioned] Setting partition count by topic || Failed: {%s=%d}.",
MOCK_TOPIC, MOCK_PARTITION_COUNT);
assertProvisionPartitionIncreaseConstructsCorrectResponse(KafkaCruiseControlUtils.CompletionType.COMPLETED_WITH_ERROR, expectedState,
expectedSummary);
}
@Test
public void testProvisionPartitionIncreaseWithBrokerRecommendation()
throws ExecutionException, InterruptedException, TimeoutException {
ProvisionerState.State expectedState = ProvisionerState.State.COMPLETED;
String expectedBrokerSummary = String.format("Provisioner support is missing. Skip recommendation: %s", BROKER_REC_TO_EXECUTE);
String expectedSummary = String.format("[Recommender-Under-Provisioned] Setting partition count by topic || Succeeded: {%s=%d}. || %s",
MOCK_TOPIC, MOCK_PARTITION_COUNT, expectedBrokerSummary);
assertProvisionPartitionIncreaseConstructsCorrectResponse(COMPLETED, expectedState, expectedSummary, true);
}
private void assertProvisionPartitionIncreaseConstructsCorrectResponse(KafkaCruiseControlUtils.CompletionType partitionIncreaseCompletion,
ProvisionerState.State expectedState,
String expectedSummary)
throws ExecutionException, InterruptedException, TimeoutException {
assertProvisionPartitionIncreaseConstructsCorrectResponse(partitionIncreaseCompletion, expectedState, expectedSummary, false);
}
private void assertProvisionPartitionIncreaseConstructsCorrectResponse(KafkaCruiseControlUtils.CompletionType partitionIncreaseCompletion,
ProvisionerState.State expectedState,
String expectedSummary,
boolean hasBrokerRecommendation)
throws ExecutionException, InterruptedException, TimeoutException {
int recommendedPartitionCount = expectedSummary.contains("Ignored") ? MOCK_IGNORED_PARTITION_COUNT : MOCK_PARTITION_COUNT;
ProvisionRecommendation recommendation =
new ProvisionRecommendation.Builder(ProvisionStatus.UNDER_PROVISIONED).numPartitions(recommendedPartitionCount)
.topicPattern(MOCK_TOPIC_PATTERN).build();
Map<String, ProvisionRecommendation> provisionRecommendation;
if (hasBrokerRecommendation) {
provisionRecommendation = Map.of(RECOMMENDER_UP, recommendation, RECOMMENDER_TO_EXECUTE, BROKER_REC_TO_EXECUTE);
} else {
provisionRecommendation = Collections.singletonMap(RECOMMENDER_UP, recommendation);
}
Map<String, KafkaFuture<TopicDescription>> describeTopicsValues = Collections.singletonMap(MOCK_TOPIC, MOCK_TOPIC_DESCRIPTION_FUTURE);
EasyMock.expect(MOCK_ADMIN_CLIENT.describeTopics(Collections.singletonList(MOCK_TOPIC))).andReturn(MOCK_DESCRIBE_TOPICS_RESULT);
EasyMock.expect(MOCK_DESCRIBE_TOPICS_RESULT.values()).andReturn(describeTopicsValues);
if (partitionIncreaseCompletion == COMPLETED) {
EasyMock.expect(MOCK_TOPIC_DESCRIPTION_FUTURE.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)).andReturn(MOCK_TOPIC_DESCRIPTION);
// Create partitions: for this test, we ignore the fact that the mock cluster has one node -- i.e. in reality a request to increase
// partition count to two would fail in a cluster with one node.
EasyMock.expect(MOCK_ADMIN_CLIENT.createPartitions(Collections.singletonMap(MOCK_TOPIC, EasyMock.anyObject())))
.andReturn(MOCK_CREATE_PARTITIONS_RESULT);
Map<String, KafkaFuture<Void>> createPartitionsResultValues = Collections.singletonMap(MOCK_TOPIC, MOCK_CREATE_PARTITIONS_FUTURE);
EasyMock.expect(MOCK_CREATE_PARTITIONS_RESULT.values()).andReturn(createPartitionsResultValues);
EasyMock.expect(MOCK_CREATE_PARTITIONS_FUTURE.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)).andReturn(null);
} else if (partitionIncreaseCompletion == KafkaCruiseControlUtils.CompletionType.COMPLETED_WITH_ERROR) {
EasyMock.expect(MOCK_TOPIC_DESCRIPTION_FUTURE.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS))
.andThrow(new ExecutionException(new InvalidTopicException()));
} else {
EasyMock.expect(MOCK_TOPIC_DESCRIPTION_FUTURE.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)).andReturn(MOCK_TOPIC_DESCRIPTION);
}
EasyMock.replay(MOCK_ADMIN_CLIENT, MOCK_DESCRIBE_TOPICS_RESULT, MOCK_TOPIC_DESCRIPTION_FUTURE,
MOCK_CREATE_PARTITIONS_FUTURE, MOCK_CREATE_PARTITIONS_RESULT);
ProvisionerState results = _provisioner.rightsize(provisionRecommendation, RIGHTSIZE_OPTIONS);
assertEquals(expectedState, results.state());
assertEquals(expectedSummary, results.summary());
}
}
| 4,264 |
1,303 | /*
* Tencent is pleased to support the open source community by making TBase available.
*
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
*
* TBase is licensed under the BSD 3-Clause License, except for the third-party component listed below.
*
* A copy of the BSD 3-Clause License is included in this file.
*
* Other dependencies and licenses:
*
* Open Source Software Licensed Under the PostgreSQL License:
* --------------------------------------------------------------------
* 1. Postgres-XL XL9_5_STABLE
* Portions Copyright (c) 2015-2016, 2ndQuadrant Ltd
* Portions Copyright (c) 2012-2015, TransLattice, Inc.
* Portions Copyright (c) 2010-2017, Postgres-XC Development Group
* Portions Copyright (c) 1996-2015, The PostgreSQL Global Development Group
* Portions Copyright (c) 1994, The Regents of the University of California
*
* Terms of the PostgreSQL License:
* --------------------------------------------------------------------
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
* is hereby granted, provided that the above copyright notice and this
* paragraph and the following two paragraphs appear in all copies.
*
* IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
*
* Terms of the BSD 3-Clause License:
* --------------------------------------------------------------------
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of THL A29 Limited nor the names of its contributors may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
/*
* pgp-cfb.c
* Implements both normal and PGP-specific CFB mode.
*
* Copyright (c) 2005 <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* contrib/pgcrypto/pgp-cfb.c
*/
#include "postgres.h"
#include "contrib/pgcrypto/px.h"
#include "contrib/pgcrypto/pgp.h"
typedef int (*mix_data_t) (PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
struct PGP_CFB
{
PX_Cipher *ciph;
int block_size;
int pos;
int block_no;
int resync;
uint8 fr[PGP_MAX_BLOCK];
uint8 fre[PGP_MAX_BLOCK];
uint8 encbuf[PGP_MAX_BLOCK];
};
int
pgp_cfb_create(PGP_CFB **ctx_p, int algo, const uint8 *key, int key_len,
int resync, uint8 *iv)
{
int res;
PX_Cipher *ciph;
PGP_CFB *ctx;
res = pgp_load_cipher(algo, &ciph);
if (res < 0)
return res;
res = px_cipher_init(ciph, key, key_len, NULL);
if (res < 0)
{
px_cipher_free(ciph);
return res;
}
ctx = crypt_alloc(sizeof(*ctx));
memset(ctx, 0, sizeof(*ctx));
ctx->ciph = ciph;
ctx->block_size = px_cipher_block_size(ciph);
ctx->resync = resync;
if (iv)
memcpy(ctx->fr, iv, ctx->block_size);
*ctx_p = ctx;
return 0;
}
void
pgp_cfb_free(PGP_CFB *ctx)
{
px_cipher_free(ctx->ciph);
crypt_memset(ctx, 0, sizeof(*ctx));
crypt_free(ctx);
}
/*
* Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
*/
static int
mix_encrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
{
int i;
for (i = ctx->pos; i < ctx->pos + len; i++)
*dst++ = ctx->encbuf[i] = ctx->fre[i] ^ (*data++);
ctx->pos += len;
return len;
}
static int
mix_decrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
{
int i;
for (i = ctx->pos; i < ctx->pos + len; i++)
{
ctx->encbuf[i] = *data++;
*dst++ = ctx->fre[i] ^ ctx->encbuf[i];
}
ctx->pos += len;
return len;
}
/*
* Data processing for old PGP CFB mode. (PGP_PKT_SYMENCRYPTED_DATA)
*
* The goal is to hide the horror from the rest of the code,
* thus its all concentrated here.
*/
static int
mix_encrypt_resync(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
{
int i,
n;
/* block #2 is 2 bytes long */
if (ctx->block_no == 2)
{
n = 2 - ctx->pos;
if (len < n)
n = len;
for (i = ctx->pos; i < ctx->pos + n; i++)
*dst++ = ctx->encbuf[i] = ctx->fre[i] ^ (*data++);
ctx->pos += n;
len -= n;
if (ctx->pos == 2)
{
memcpy(ctx->fr, ctx->encbuf + 2, ctx->block_size - 2);
memcpy(ctx->fr + ctx->block_size - 2, ctx->encbuf, 2);
ctx->pos = 0;
return n;
}
}
for (i = ctx->pos; i < ctx->pos + len; i++)
*dst++ = ctx->encbuf[i] = ctx->fre[i] ^ (*data++);
ctx->pos += len;
return len;
}
static int
mix_decrypt_resync(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
{
int i,
n;
/* block #2 is 2 bytes long */
if (ctx->block_no == 2)
{
n = 2 - ctx->pos;
if (len < n)
n = len;
for (i = ctx->pos; i < ctx->pos + n; i++)
{
ctx->encbuf[i] = *data++;
*dst++ = ctx->fre[i] ^ ctx->encbuf[i];
}
ctx->pos += n;
len -= n;
if (ctx->pos == 2)
{
memcpy(ctx->fr, ctx->encbuf + 2, ctx->block_size - 2);
memcpy(ctx->fr + ctx->block_size - 2, ctx->encbuf, 2);
ctx->pos = 0;
return n;
}
}
for (i = ctx->pos; i < ctx->pos + len; i++)
{
ctx->encbuf[i] = *data++;
*dst++ = ctx->fre[i] ^ ctx->encbuf[i];
}
ctx->pos += len;
return len;
}
/*
* common code for both encrypt and decrypt.
*/
static int
cfb_process(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst,
mix_data_t mix_data)
{
int n;
int res;
while (len > 0 && ctx->pos > 0)
{
n = ctx->block_size - ctx->pos;
if (len < n)
n = len;
n = mix_data(ctx, data, n, dst);
data += n;
dst += n;
len -= n;
if (ctx->pos == ctx->block_size)
{
memcpy(ctx->fr, ctx->encbuf, ctx->block_size);
ctx->pos = 0;
}
}
while (len > 0)
{
px_cipher_encrypt(ctx->ciph, ctx->fr, ctx->block_size, ctx->fre);
if (ctx->block_no < 5)
ctx->block_no++;
n = ctx->block_size;
if (len < n)
n = len;
res = mix_data(ctx, data, n, dst);
data += res;
dst += res;
len -= res;
if (ctx->pos == ctx->block_size)
{
memcpy(ctx->fr, ctx->encbuf, ctx->block_size);
ctx->pos = 0;
}
}
return 0;
}
/*
* public interface
*/
int
pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
{
mix_data_t mix = ctx->resync ? mix_encrypt_resync : mix_encrypt_normal;
return cfb_process(ctx, data, len, dst, mix);
}
int
pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
{
mix_data_t mix = ctx->resync ? mix_decrypt_resync : mix_decrypt_normal;
return cfb_process(ctx, data, len, dst, mix);
}
| 4,300 |
5,169 | <filename>Specs/5/0/7/EnergyBar/0.1.1/EnergyBar.podspec.json
{
"name": "EnergyBar",
"version": "0.1.1",
"summary": "Simeple Message bar like Android's SnackBar",
"description": "Simeple Message bar like Android's SnackBar",
"homepage": "https://github.com/Koosj/EnergyBar",
"license": {
"type": "MIT"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": "12.0"
},
"source": {
"git": "https://github.com/Koosj/EnergyBar.git",
"tag": "0.1.1"
},
"source_files": "energybar/*.swift"
}
| 233 |
733 | import pytest
import random
import os
import numpy as np
import torch
class CustomCommandLineOption(object):
"""An object for storing command line options parsed by pytest.
Since `pytest.config` global object is deprecated and removed in version
5.0, this class is made to work as a store of command line options for
those components which are not able to access them via `request.config`.
"""
def __init__(self):
self._content = {}
def __str__(self):
return str(self._content)
def add(self, key, value):
self._content.update({key: value})
def delete(self, key):
del self._content[key]
def __getattr__(self, key):
if key in self._content:
return self._content[key]
else:
return super(CustomCommandLineOption, self).__getattr__(key)
def pytest_addoption(parser):
parser.addoption(
"--cpu_only", action="store_true", help="Forcibly run all tests on CPU."
)
def pytest_configure(config):
# Bind a config object to `pytest` module instance
pytest.custom_cmdopt = CustomCommandLineOption()
pytest.custom_cmdopt.add("cpu_only", config.getoption("--cpu_only"))
# Set the random seed so that the tests are reproducible between test runs and
# hopefully torch and numpy versions. This seed should also allow all range tests
# with a starting lr of 1e-5 and an ending lr of 1e-1 to run the full test without
# diverging
seed = 1
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
| 610 |
1,676 | package sdk.chat.profile.pictures;
import android.app.AlertDialog;
import android.os.Bundle;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.GridLayout;
import android.widget.ImageView;
import android.widget.LinearLayout;
import android.widget.Toast;
import androidx.annotation.LayoutRes;
import androidx.annotation.Nullable;
import com.bumptech.glide.Glide;
import java.util.ArrayList;
import java.util.List;
import butterknife.BindView;
import sdk.chat.core.dao.Keys;
import sdk.chat.core.dao.User;
import sdk.chat.core.image.ImageUploadResult;
import sdk.chat.core.session.ChatSDK;
import sdk.chat.core.utils.PermissionRequestHandler;
import sdk.chat.ui.activities.ImagePreviewActivity;
import sdk.chat.ui.chat.MediaSelector;
import sdk.chat.ui.icons.Icons;
import sdk.chat.ui.module.UIModule;
import sdk.chat.ui.utils.ImagePickerUploader;
import sdk.chat.ui.utils.ToastHelper;
import sdk.guru.common.RX;
/**
* Created by Pepe on 01/12/19.
*/
public class ProfilePicturesActivity extends ImagePreviewActivity {
protected User user;
protected MenuItem addMenuItem;
protected ImagePickerUploader imagePickerUploader = new ImagePickerUploader(MediaSelector.CropType.Circle);
protected int gridPadding = 4;
protected int pictureMargin = 8;
protected int picturesPerRow = 2;
protected int maxPictures = 6;
protected boolean hideButton = false;
protected String limitWarning = null;
@BindView(R2.id.imageView) protected ImageView imageView;
@BindView(R2.id.gridLayout) protected GridLayout gridLayout;
@BindView(R2.id.root) protected LinearLayout root;
@Override
protected @LayoutRes int getLayout() {
return R.layout.activity_profile_pictures;
}
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
String userEntityID = getIntent().getStringExtra(Keys.IntentKeyUserEntityID);
if (userEntityID != null && !userEntityID.isEmpty()) {
user = ChatSDK.db().fetchUserWithEntityID(userEntityID);
if (user == null) {
ToastHelper.show(this, R.string.user_entity_id_not_set);
finish();
return;
}
}
gridPadding = getIntent().getIntExtra(BaseProfilePicturesHandler.KeyGridPadding, gridPadding);
pictureMargin = getIntent().getIntExtra(BaseProfilePicturesHandler.KeyPictureMargin, maxPictures);
picturesPerRow = getIntent().getIntExtra(BaseProfilePicturesHandler.KeyPicturesPerRow, picturesPerRow);
maxPictures = getIntent().getIntExtra(BaseProfilePicturesHandler.KeyMaxPictures, maxPictures);
hideButton = getIntent().getBooleanExtra(BaseProfilePicturesHandler.KeyHideButton, hideButton);
String warning = getIntent().getStringExtra(BaseProfilePicturesHandler.KeyLimitWarning);
if (warning != null) {
limitWarning = warning;
}
setActionBarTitle(R.string.profile);
initViews();
}
@Override
protected void setupViews() {
super.setupViews();
gridLayout.setPadding(gridPadding, gridPadding, gridPadding, gridPadding);
gridLayout.setColumnCount(picturesPerRow);
}
protected View createCellView(String url) {
ImageView cell = new ImageView(this);
// Get the screen width
int size = getResources().getDisplayMetrics().widthPixels / 2 - gridPadding;
Glide.with(this).load(url)
.placeholder(UIModule.config().defaultProfilePlaceholder)
.error(UIModule.config().defaultProfilePlaceholder)
.dontAnimate().override(size, size).centerCrop().into(cell);
cell.setOnClickListener(v -> {
zoomImageFromThumbnail(cell, url);
});
if (getUser().isMe()) {
cell.setOnLongClickListener(v -> {
boolean isDefault = ChatSDK.profilePictures().fromUser(getUser()).indexOf(url) == 0;
AlertDialog.Builder builder = new AlertDialog.Builder(this);
if (!isDefault) {
builder.setTitle(getString(R.string.set_as_default));
builder.setPositiveButton(getString(R.string.set_as_default), (dialog, which) -> {
showOrUpdateProgressDialog(getString(R.string.updating_pictures));
ChatSDK.profilePictures().setDefaultPicture(user, url);
dm.add(ChatSDK.core().pushUser().observeOn(RX.main()).subscribe(() -> {
dismissProgressDialog();
updateGallery();
}));
});
} else {
builder.setTitle(getString(R.string.action_delete_picture));
}
builder.setNegativeButton(getString(R.string.delete), (dialog, which) -> {
showOrUpdateProgressDialog(getString(R.string.deleting_picture));
ChatSDK.profilePictures().removePicture(user, url);
dm.add(ChatSDK.core().pushUser().observeOn(RX.main()).subscribe(() -> {
dismissProgressDialog();
updateGallery();
}));
});
builder.setNeutralButton(R.string.cancel, (dialog, which) -> dialog.cancel());
builder.show();
return true;
});
}
return cell;
}
protected void addCellToGridLayout(GridLayout gridLayout, View cell) {
if (cell != null) {
gridLayout.addView(cell);
GridLayout.LayoutParams params = (GridLayout.LayoutParams) cell.getLayoutParams();
int size = gridLayout.getWidth() / gridLayout.getColumnCount() - pictureMargin * 2;
params.topMargin = pictureMargin;
params.leftMargin = pictureMargin;
params.rightMargin = pictureMargin;
params.bottomMargin = pictureMargin;
params.width = size;
params.height = size;
cell.setLayoutParams(params);
}
}
protected void updateGallery() {
ArrayList<String> urls = ChatSDK.profilePictures().fromUser(getUser());
gridLayout.removeAllViews();
for (String url : urls) {
addCellToGridLayout(gridLayout, createCellView(url));
}
if (addMenuItem != null) {
addMenuItem.setVisible(shouldShowAddButton(urls));
}
}
protected boolean shouldShowAddButton(List<String> urls) {
return !hideButton || urls.size() < maxPictures;
}
protected void addProfilePicture() {
if (ChatSDK.profilePictures().fromUser(getUser()).size() >= maxPictures && maxPictures > 0) {
if (!limitWarning.isEmpty()) {
ToastHelper.show(this, limitWarning);
}
return;
}
dm.add(PermissionRequestHandler.requestImageMessage(this).subscribe(() -> {
dm.add(imagePickerUploader.choosePhoto(this, false).subscribe((results, throwable) -> {
if (throwable != null) {
Toast.makeText(ProfilePicturesActivity.this, throwable.getLocalizedMessage(), Toast.LENGTH_SHORT).show();
}
else {
for (ImageUploadResult result : results) {
ChatSDK.profilePictures().addPicture(getUser(), result.url);
}
updateGallery();
dm.add(ChatSDK.core().pushUser()
.observeOn(RX.main())
.subscribe(() -> {
}, this));
}
}));
}, this));
}
protected User getUser() {
return user != null ? user : ChatSDK.currentUser();
}
@Override
protected void onResume() {
super.onResume();
updateGallery();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
updateGallery();
if (!getUser().isMe())
return super.onCreateOptionsMenu(menu);
getMenuInflater().inflate(R.menu.add_menu, menu);
addMenuItem = menu.findItem(R.id.action_add).setIcon(Icons.get(this, Icons.choose().add, Icons.shared().actionBarIconColor));
addMenuItem.setVisible(shouldShowAddButton(ChatSDK.profilePictures().fromUser(getUser())));
return super.onCreateOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == R.id.action_add) {
addProfilePicture();
return true;
}
return super.onOptionsItemSelected(item);
}
}
| 3,908 |
1,145 | <reponame>foss4/pontoon
import pytest
from pontoon.base.tests import ProjectFactory, ProjectLocaleFactory
from pontoon.base.models import Project, ProjectLocale
@pytest.fixture
def public_project():
yield ProjectFactory.create(visibility=Project.Visibility.PUBLIC)
@pytest.fixture
def private_project():
yield ProjectFactory.create()
@pytest.fixture
def public_project_locale():
yield ProjectLocaleFactory.create(project__visibility=Project.Visibility.PUBLIC)
@pytest.fixture
def private_project_locale():
yield ProjectLocaleFactory.create()
@pytest.mark.django_db
def test_project_visibility_filters_on_superuser(
public_project, private_project, admin
):
visible_projects = Project.objects.visible_for(admin).filter(
pk__in=[public_project.pk, private_project.pk]
)
assert list(visible_projects) == [public_project, private_project]
@pytest.mark.django_db
def test_project_visibility_filters_on_contributors(
public_project, private_project, user_a
):
visible_projects = Project.objects.visible_for(user_a).filter(
pk__in=[public_project.pk, private_project.pk]
)
assert list(visible_projects) == [public_project]
@pytest.mark.django_db
def test_project_locale_visibility_filters_on_superuser(
public_project_locale, private_project_locale, admin
):
visible_project_locales = ProjectLocale.objects.visible_for(admin).filter(
pk__in=[public_project_locale.pk, private_project_locale.pk]
)
assert list(visible_project_locales) == [
public_project_locale,
private_project_locale,
]
@pytest.mark.django_db
def test_project_locale_visibility_filters_on_contributors(
public_project_locale, private_project_locale, user_a
):
visible_project_locales = ProjectLocale.objects.visible_for(user_a).filter(
pk__in=[public_project_locale.pk, private_project_locale.pk]
)
assert list(visible_project_locales) == [public_project_locale]
| 723 |
354 | /*-------------------------------------------------------------------------
* drawElements Quality Program OpenGL ES 3.0 Module
* -------------------------------------------------
*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Read pixels tests
*//*--------------------------------------------------------------------*/
#include "es3fReadPixelsTests.hpp"
#include "tcuTexture.hpp"
#include "tcuTextureUtil.hpp"
#include "tcuImageCompare.hpp"
#include "tcuTestLog.hpp"
#include "tcuRenderTarget.hpp"
#include "deRandom.hpp"
#include "deMath.h"
#include "deString.h"
#include "deStringUtil.hpp"
#include "gluDefs.hpp"
#include "gluShaderProgram.hpp"
#include "gluStrUtil.hpp"
#include "gluTextureUtil.hpp"
#include <cstring>
#include <sstream>
#include "glw.h"
using std::vector;
namespace deqp
{
namespace gles3
{
namespace Functional
{
namespace
{
class ReadPixelsTest : public TestCase
{
public:
enum
{
FLAG_NO_FLAGS = 0x0,
FLAG_CHOOSE_FORMAT = 0x1,
FLAG_USE_RBO = 0x2,
};
ReadPixelsTest (Context& context, const char* name, const char* description, int flags, int alignment, GLint rowLength, GLint skipRows, GLint skipPixels, GLenum format = GL_RGBA, GLenum type = GL_UNSIGNED_BYTE);
IterateResult iterate (void);
void render (tcu::Texture2D& reference);
private:
int m_seed;
bool m_chooseFormat;
bool m_useRenderBuffer;
int m_alignment;
GLint m_rowLength;
GLint m_skipRows;
GLint m_skipPixels;
GLint m_format;
GLint m_type;
const int m_width;
const int m_height;
void getFormatInfo (tcu::TextureFormat& format, int& pixelSize);
void clearColor (tcu::Texture2D& reference, vector<deUint8>& pixelData, int pixelSize);
};
ReadPixelsTest::ReadPixelsTest (Context& context, const char* name, const char* description, int flags, int alignment, GLint rowLength, GLint skipRows, GLint skipPixels, GLenum format, GLenum type)
: TestCase (context, name, description)
, m_seed (deStringHash(name))
, m_chooseFormat ((flags & FLAG_CHOOSE_FORMAT) != 0)
, m_useRenderBuffer ((flags & FLAG_USE_RBO) != 0)
, m_alignment (alignment)
, m_rowLength (rowLength)
, m_skipRows (skipRows)
, m_skipPixels (skipPixels)
, m_format (format)
, m_type (type)
, m_width (13)
, m_height (13)
{
}
void ReadPixelsTest::render (tcu::Texture2D& reference)
{
// Create program
const char* vertexSource =
"#version 300 es\n"
"in mediump vec2 i_coord;\n"
"void main (void)\n"
"{\n"
"\tgl_Position = vec4(i_coord, 0.0, 1.0);\n"
"}\n";
std::stringstream fragmentSource;
fragmentSource <<
"#version 300 es\n";
if (reference.getFormat().type == tcu::TextureFormat::SIGNED_INT32)
fragmentSource << "layout(location = 0) out mediump ivec4 o_color;\n";
else if (reference.getFormat().type == tcu::TextureFormat::UNSIGNED_INT32)
fragmentSource << "layout(location = 0) out mediump uvec4 o_color;\n";
else
fragmentSource << "layout(location = 0) out mediump vec4 o_color;\n";
fragmentSource <<
"void main (void)\n"
"{\n";
if (reference.getFormat().type == tcu::TextureFormat::UNSIGNED_INT32)
fragmentSource << "\to_color = uvec4(0, 0, 0, 1000);\n";
else if (reference.getFormat().type == tcu::TextureFormat::SIGNED_INT32)
fragmentSource << "\to_color = ivec4(0, 0, 0, 1000);\n";
else
fragmentSource << "\to_color = vec4(0.0, 0.0, 0.0, 1.0);\n";
fragmentSource <<
"}\n";
glu::ShaderProgram program(m_context.getRenderContext(), glu::makeVtxFragSources(vertexSource, fragmentSource.str()));
m_testCtx.getLog() << program;
TCU_CHECK(program.isOk());
GLU_CHECK_CALL(glUseProgram(program.getProgram()));
// Render
{
const float coords[] =
{
-0.5f, -0.5f,
0.5f, -0.5f,
0.5f, 0.5f,
0.5f, 0.5f,
-0.5f, 0.5f,
-0.5f, -0.5f
};
GLuint coordLoc;
coordLoc = glGetAttribLocation(program.getProgram(), "i_coord");
GLU_CHECK_MSG("glGetAttribLocation()");
GLU_CHECK_CALL(glEnableVertexAttribArray(coordLoc));
GLU_CHECK_CALL(glVertexAttribPointer(coordLoc, 2, GL_FLOAT, GL_FALSE, 0, coords));
GLU_CHECK_CALL(glDrawArrays(GL_TRIANGLES, 0, 6));
GLU_CHECK_CALL(glDisableVertexAttribArray(coordLoc));
}
// Render reference
const int coordX1 = (int)((-0.5f * (float)reference.getWidth() / 2.0f) + (float)reference.getWidth() / 2.0f);
const int coordY1 = (int)((-0.5f * (float)reference.getHeight() / 2.0f) + (float)reference.getHeight() / 2.0f);
const int coordX2 = (int)(( 0.5f * (float)reference.getWidth() / 2.0f) + (float)reference.getWidth() / 2.0f);
const int coordY2 = (int)(( 0.5f * (float)reference.getHeight() / 2.0f) + (float)reference.getHeight() / 2.0f);
for (int x = 0; x < reference.getWidth(); x++)
{
if (x < coordX1 || x > coordX2)
continue;
for (int y = 0; y < reference.getHeight(); y++)
{
if (y >= coordY1 && y <= coordY2)
{
if (reference.getFormat().type == tcu::TextureFormat::SIGNED_INT32)
reference.getLevel(0).setPixel(tcu::IVec4(0, 0, 0, 1000), x, y);
else if (reference.getFormat().type == tcu::TextureFormat::UNSIGNED_INT32)
reference.getLevel(0).setPixel(tcu::UVec4(0, 0, 0, 1000), x, y);
else
reference.getLevel(0).setPixel(tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f), x, y);
}
}
}
}
void ReadPixelsTest::getFormatInfo (tcu::TextureFormat& format, int& pixelSize)
{
if (m_chooseFormat)
{
GLU_CHECK_CALL(glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &m_format));
GLU_CHECK_CALL(glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &m_type));
if (m_format != GL_RGBA && m_format != GL_BGRA && m_format != GL_RGB)
TCU_THROW(NotSupportedError, ("Unsupported IMPLEMENTATION_COLOR_READ_FORMAT: " + de::toString(glu::getTextureFormatStr(m_format))).c_str());
if (glu::getTypeName(m_type) == DE_NULL)
TCU_THROW(NotSupportedError, ("Unsupported GL_IMPLEMENTATION_COLOR_READ_TYPE: " + de::toString(tcu::Format::Hex<4>(m_type))).c_str());
}
format = glu::mapGLTransferFormat(m_format, m_type);
pixelSize = format.getPixelSize();
}
void ReadPixelsTest::clearColor (tcu::Texture2D& reference, vector<deUint8>& pixelData, int pixelSize)
{
de::Random rnd(m_seed);
GLuint framebuffer = 0;
GLuint renderbuffer = 0;
if (m_useRenderBuffer)
{
if (m_type == GL_UNSIGNED_INT)
{
GLU_CHECK_CALL(glGenRenderbuffers(1, &renderbuffer));
GLU_CHECK_CALL(glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer));
GLU_CHECK_CALL(glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA32UI, m_width, m_height));
}
else if (m_type == GL_INT)
{
GLU_CHECK_CALL(glGenRenderbuffers(1, &renderbuffer));
GLU_CHECK_CALL(glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer));
GLU_CHECK_CALL(glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA32I, m_width, m_height));
}
else
DE_ASSERT(false);
GLU_CHECK_CALL(glBindRenderbuffer(GL_RENDERBUFFER, 0));
GLU_CHECK_CALL(glGenFramebuffers(1, &framebuffer));
GLU_CHECK_CALL(glBindFramebuffer(GL_FRAMEBUFFER, framebuffer));
GLU_CHECK_CALL(glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer));
}
else if (m_format == GL_RGBA || m_format == GL_BGRA || m_format == GL_RGB)
{
// Empty
}
else
DE_ASSERT(false);
GLU_CHECK_CALL(glViewport(0, 0, reference.getWidth(), reference.getHeight()));
// Clear color
if (m_format == GL_RGBA || m_format == GL_BGRA || m_format == GL_RGB)
{
const float red = rnd.getFloat();
const float green = rnd.getFloat();
const float blue = rnd.getFloat();
const float alpha = rnd.getFloat();
const GLfloat color[] = { red, green, blue, alpha };
// Clear target
GLU_CHECK_CALL(glClearColor(red, green, blue, alpha));
m_testCtx.getLog() << tcu::TestLog::Message << "ClearColor: (" << red << ", " << green << ", " << blue << ")" << tcu::TestLog::EndMessage;
GLU_CHECK_CALL(glClearBufferfv(GL_COLOR, 0, color));
tcu::clear(reference.getLevel(0), tcu::Vec4(red, green, blue, alpha));
}
else if (m_format == GL_RGBA_INTEGER)
{
if (m_type == GL_INT)
{
const GLint red = rnd.getUint32();
const GLint green = rnd.getUint32();
const GLint blue = rnd.getUint32();
const GLint alpha = rnd.getUint32();
const GLint color[] = { red, green, blue, alpha };
m_testCtx.getLog() << tcu::TestLog::Message << "ClearColor: (" << red << ", " << green << ", " << blue << ")" << tcu::TestLog::EndMessage;
GLU_CHECK_CALL(glClearBufferiv(GL_COLOR, 0, color));
tcu::clear(reference.getLevel(0), tcu::IVec4(red, green, blue, alpha));
}
else if (m_type == GL_UNSIGNED_INT)
{
const GLuint red = rnd.getUint32();
const GLuint green = rnd.getUint32();
const GLuint blue = rnd.getUint32();
const GLuint alpha = rnd.getUint32();
const GLuint color[] = { red, green, blue, alpha };
m_testCtx.getLog() << tcu::TestLog::Message << "ClearColor: (" << red << ", " << green << ", " << blue << ")" << tcu::TestLog::EndMessage;
GLU_CHECK_CALL(glClearBufferuiv(GL_COLOR, 0, color));
tcu::clear(reference.getLevel(0), tcu::UVec4(red, green, blue, alpha));
}
else
DE_ASSERT(false);
}
else
DE_ASSERT(false);
render(reference);
const int rowWidth = (m_rowLength == 0 ? m_width : m_rowLength) + m_skipPixels;
const int rowPitch = m_alignment * deCeilFloatToInt32(float(pixelSize * rowWidth) / (float)m_alignment);
pixelData.resize(rowPitch * (m_height + m_skipRows), 0);
GLU_CHECK_CALL(glReadPixels(0, 0, m_width, m_height, m_format, m_type, &(pixelData[0])));
if (framebuffer)
GLU_CHECK_CALL(glDeleteFramebuffers(1, &framebuffer));
if (renderbuffer)
GLU_CHECK_CALL(glDeleteRenderbuffers(1, &renderbuffer));
}
TestCase::IterateResult ReadPixelsTest::iterate (void)
{
tcu::TextureFormat format(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
int pixelSize;
getFormatInfo(format, pixelSize);
m_testCtx.getLog() << tcu::TestLog::Message << "Format: " << glu::getTextureFormatStr(m_format) << ", Type: " << glu::getTypeStr(m_type) << tcu::TestLog::EndMessage;
tcu::Texture2D reference(format, m_width, m_height);
reference.allocLevel(0);
GLU_CHECK_CALL(glPixelStorei(GL_PACK_ALIGNMENT, m_alignment));
m_testCtx.getLog() << tcu::TestLog::Message << "GL_PACK_ALIGNMENT: " << m_alignment << tcu::TestLog::EndMessage;
GLU_CHECK_CALL(glPixelStorei(GL_PACK_ROW_LENGTH, m_rowLength));
m_testCtx.getLog() << tcu::TestLog::Message << "GL_PACK_ROW_LENGTH: " << m_rowLength << tcu::TestLog::EndMessage;
GLU_CHECK_CALL(glPixelStorei(GL_PACK_SKIP_ROWS, m_skipRows));
m_testCtx.getLog() << tcu::TestLog::Message << "GL_PACK_SKIP_ROWS: " << m_skipRows << tcu::TestLog::EndMessage;
GLU_CHECK_CALL(glPixelStorei(GL_PACK_SKIP_PIXELS, m_skipPixels));
m_testCtx.getLog() << tcu::TestLog::Message << "GL_PACK_SKIP_PIXELS: " << m_skipPixels << tcu::TestLog::EndMessage;
GLU_CHECK_CALL(glViewport(0, 0, m_width, m_height));
vector<deUint8> pixelData;
clearColor(reference, pixelData, pixelSize);
const int rowWidth = (m_rowLength == 0 ? m_width : m_rowLength);
const int rowPitch = m_alignment * deCeilFloatToInt32((float)(pixelSize * rowWidth) / (float)m_alignment);
const tcu::ConstPixelBufferAccess resultAccess = tcu::ConstPixelBufferAccess(format, m_width, m_height, 1, rowPitch, 0, &(pixelData[pixelSize * m_skipPixels + m_skipRows * rowPitch]));
// \note Renderbuffers are never multisampled
if (!m_useRenderBuffer && m_context.getRenderTarget().getNumSamples() > 1)
{
const tcu::IVec4 formatBitDepths = tcu::getTextureFormatBitDepth(format);
const deUint8 redThreshold = (deUint8)deCeilFloatToInt32(256.0f * (2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().redBits, formatBitDepths.x()))));
const deUint8 greenThreshold = (deUint8)deCeilFloatToInt32(256.0f * (2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().greenBits, formatBitDepths.y()))));
const deUint8 blueThreshold = (deUint8)deCeilFloatToInt32(256.0f * (2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().blueBits, formatBitDepths.z()))));
const deUint8 alphaThreshold = (deUint8)deCeilFloatToInt32(256.0f * (2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().alphaBits, formatBitDepths.w()))));
// bilinearCompare only accepts RGBA, UINT8
tcu::Texture2D referenceRGBA8 (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), m_width, m_height);
tcu::Texture2D resultRGBA8 (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), m_width, m_height);
referenceRGBA8.allocLevel(0);
resultRGBA8.allocLevel(0);
tcu::copy(referenceRGBA8.getLevel(0), reference.getLevel(0));
tcu::copy(resultRGBA8.getLevel(0), resultAccess);
if (tcu::bilinearCompare(m_testCtx.getLog(), "Result", "Result", referenceRGBA8.getLevel(0), resultRGBA8.getLevel(0), tcu::RGBA(redThreshold, greenThreshold, blueThreshold, alphaThreshold), tcu::COMPARE_LOG_RESULT))
m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
else
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Fail");
}
else
{
const tcu::IVec4 formatBitDepths = tcu::getTextureFormatBitDepth(format);
const float redThreshold = 2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().redBits, formatBitDepths.x()));
const float greenThreshold = 2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().greenBits, formatBitDepths.y()));
const float blueThreshold = 2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().blueBits, formatBitDepths.z()));
const float alphaThreshold = 2.0f / (float)(1 << deMin32(m_context.getRenderTarget().getPixelFormat().alphaBits, formatBitDepths.w()));
// Compare
if (tcu::floatThresholdCompare(m_testCtx.getLog(), "Result", "Result", reference.getLevel(0), resultAccess, tcu::Vec4(redThreshold, greenThreshold, blueThreshold, alphaThreshold), tcu::COMPARE_LOG_RESULT))
m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
else
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Fail");
}
return STOP;
}
} // anonymous
ReadPixelsTests::ReadPixelsTests (Context& context)
: TestCaseGroup(context, "read_pixels", "ReadPixel tests")
{
}
void ReadPixelsTests::init (void)
{
{
TestCaseGroup* group = new TestCaseGroup(m_context, "alignment", "Read pixels pack alignment parameter tests");
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_1", "", ReadPixelsTest::FLAG_NO_FLAGS, 1, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_2", "", ReadPixelsTest::FLAG_NO_FLAGS, 2, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_4", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_8", "", ReadPixelsTest::FLAG_NO_FLAGS, 8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_1", "", ReadPixelsTest::FLAG_USE_RBO, 1, 0, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_2", "", ReadPixelsTest::FLAG_USE_RBO, 2, 0, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_4", "", ReadPixelsTest::FLAG_USE_RBO, 4, 0, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_8", "", ReadPixelsTest::FLAG_USE_RBO, 8, 0, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_1", "", ReadPixelsTest::FLAG_USE_RBO, 1, 0, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_2", "", ReadPixelsTest::FLAG_USE_RBO, 2, 0, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_4", "", ReadPixelsTest::FLAG_USE_RBO, 4, 0, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_8", "", ReadPixelsTest::FLAG_USE_RBO, 8, 0, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "choose_1", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 1, 0, 0, 0));
group->addChild(new ReadPixelsTest(m_context, "choose_2", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 2, 0, 0, 0));
group->addChild(new ReadPixelsTest(m_context, "choose_4", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 0, 0, 0));
group->addChild(new ReadPixelsTest(m_context, "choose_8", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 8, 0, 0, 0));
addChild(group);
}
{
TestCaseGroup* group = new TestCaseGroup(m_context, "rowlength", "Read pixels rowlength test");
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_17", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 17, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_19", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 19, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_23", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 23, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_29", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 29, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_17", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_19", "", ReadPixelsTest::FLAG_USE_RBO, 4, 19, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_23", "", ReadPixelsTest::FLAG_USE_RBO, 4, 23, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_29", "", ReadPixelsTest::FLAG_USE_RBO, 4, 29, 0, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_17", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_19", "", ReadPixelsTest::FLAG_USE_RBO, 4, 19, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_23", "", ReadPixelsTest::FLAG_USE_RBO, 4, 23, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_29", "", ReadPixelsTest::FLAG_USE_RBO, 4, 29, 0, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "choose_17", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 17, 0, 0));
group->addChild(new ReadPixelsTest(m_context, "choose_19", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 19, 0, 0));
group->addChild(new ReadPixelsTest(m_context, "choose_23", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 23, 0, 0));
group->addChild(new ReadPixelsTest(m_context, "choose_29", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 29, 0, 0));
addChild(group);
}
{
TestCaseGroup* group = new TestCaseGroup(m_context, "skip", "Read pixels skip pixels and rows test");
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_0_3", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 17, 0, 3, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_3_0", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 17, 3, 0, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_3_3", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 17, 3, 3, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_ubyte_3_5", "", ReadPixelsTest::FLAG_NO_FLAGS, 4, 17, 3, 5, GL_RGBA, GL_UNSIGNED_BYTE));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_0_3", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 0, 3, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_3_0", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 3, 0, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_3_3", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 3, 3, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_int_3_5", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 3, 5, GL_RGBA_INTEGER, GL_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_0_3", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 0, 3, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_3_0", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 3, 0, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_3_3", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 3, 3, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "rgba_uint_3_5", "", ReadPixelsTest::FLAG_USE_RBO, 4, 17, 3, 5, GL_RGBA_INTEGER, GL_UNSIGNED_INT));
group->addChild(new ReadPixelsTest(m_context, "choose_0_3", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 17, 0, 3));
group->addChild(new ReadPixelsTest(m_context, "choose_3_0", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 17, 3, 0));
group->addChild(new ReadPixelsTest(m_context, "choose_3_3", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 17, 3, 3));
group->addChild(new ReadPixelsTest(m_context, "choose_3_5", "", ReadPixelsTest::FLAG_CHOOSE_FORMAT, 4, 17, 3, 5));
addChild(group);
}
}
} // Functional
} // gles3
} // deqp
| 9,022 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.contextualsearch;
import androidx.annotation.IntDef;
import org.chromium.base.metrics.RecordHistogram;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* Centralizes UMA data collection for Related Searches. All calls must be made from the UI thread.
*/
public class RelatedSearchesUma {
// Constants for user permissions histogram.
@IntDef({
Permissions.SEND_NOTHING,
Permissions.SEND_URL,
Permissions.SEND_CONTENT,
Permissions.SEND_URL_AND_CONTENT,
})
@Retention(RetentionPolicy.SOURCE)
private @interface Permissions {
int SEND_NOTHING = 0;
int SEND_URL = 1;
int SEND_CONTENT = 2;
int SEND_URL_AND_CONTENT = 3;
int NUM_ENTRIES = 4;
}
// Constants with ScrollAndClickStatus in enums.xml.
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
@IntDef({
ScrollAndClickStatus.NO_SCROLL_NO_CLICK,
ScrollAndClickStatus.NO_SCROLL_CLICKED,
ScrollAndClickStatus.SCROLLED_NO_CLICK,
ScrollAndClickStatus.SCROLLED_CLICKED,
})
@Retention(RetentionPolicy.SOURCE)
private @interface ScrollAndClickStatus {
int NO_SCROLL_NO_CLICK = 0;
int NO_SCROLL_CLICKED = 1;
int SCROLLED_NO_CLICK = 2;
int SCROLLED_CLICKED = 3;
int NUM_ENTRIES = 4;
}
/**
* Logs a histogram indicating which privacy permissions are available that Related Searches
* cares about. This ignores any language constraint.
* <p>This can be called multiple times for each user from any part of the code that's freqently
* executed.
* @param canSendUrl Whether this user has allowed sending page URL info to Google.
* @param canSendContent Whether the user can send page content to Google (has accepted the
* Contextual Search opt-in).
*/
static void logRelatedSearchesPermissionsForAllUsers(
boolean canSendUrl, boolean canSendContent) {
@Permissions
int permissionsEnum;
if (canSendUrl) {
permissionsEnum =
canSendContent ? Permissions.SEND_URL_AND_CONTENT : Permissions.SEND_URL;
} else {
permissionsEnum = canSendContent ? Permissions.SEND_CONTENT : Permissions.SEND_NOTHING;
}
RecordHistogram.recordEnumeratedHistogram("Search.RelatedSearches.AllUserPermissions",
permissionsEnum, Permissions.NUM_ENTRIES);
}
/**
* Logs a histogram indicating that a user is qualified for the Related Searches experiment
* regardless of whether that feature is enabled. This uses a boolean histogram but always
* logs true in order to get a raw bucket count (without using a user action, as suggested
* in the User Action Guidelines doc).
* <p>We use this to gauge whether each group has a balanced number of qualified users.
* Can be logged multiple times since we'll just look at the user-count of this histogram.
* This should be called any time a gesture is detected that could trigger a Related Search
* if the feature were enabled.
*/
static void logRelatedSearchesQualifiedUsers() {
RecordHistogram.recordBooleanHistogram("Search.RelatedSearches.QualifiedUsers", true);
}
/**
* Logs that a Related Searches suggestion was selected by the user and records its position.
* A position of 0 indicates that the query is the default selection search. This may not be a
* possible position in some implementations. All indices from 1 on are true Related Searches
* suggestions.
* @param position The zero-based position of the suggestion in the UI, or the one-based
* position of the suggestion in the list of those returned by the server in cases where the
* UI does not show the default selection search in position 0.
*/
public static void logSelectedSuggestionIndex(int position) {
RecordHistogram.recordCountHistogram(
"Search.RelatedSearches.SelectedSuggestionIndex", position);
}
/**
* Logs that a Chip was selected by the user in a carousel and records its position.
* The indexes indicate the physical position of the chip in the carousel, not the
* logical association with any suggestion (since the first position is variable).
* @param position The 0-based position in the carousel.
*/
public static void logSelectedCarouselIndex(int position) {
RecordHistogram.recordCountHistogram(
"Search.RelatedSearches.SelectedCarouselIndex", position);
}
/**
* Logs the CTR for a Related Searches user interaction. Call this function with either
* {@code false} or {@code true} when the UI is closed depending on whether the user chose any
* suggestion.
* @param clicked Whether the user clicked any suggestion or not after they were presented.
*/
public static void logCtr(boolean clicked) {
RecordHistogram.recordBooleanHistogram("Search.RelatedSearches.CTR", clicked);
}
/**
* Logs the number of suggestions that were selected in a bottom-bar search session.
* @param numberOfSuggestionsClicked A count of all the clicks on any suggestion in the
* UI, including the default selection search (when shown in within the suggestions UI).
*/
public static void logNumberOfSuggestionsClicked(int numberOfSuggestionsClicked) {
if (numberOfSuggestionsClicked > 0) {
RecordHistogram.recordCountHistogram(
"Search.RelatedSearches.NumberOfSuggestionsClicked2",
numberOfSuggestionsClicked);
}
}
/**
* Logs that the last visible item position in a carousel when a carousel shows.
* @param position The last visible item position in the carousel.
*/
public static void logCarouselLastVisibleItemPosition(int position) {
RecordHistogram.recordCountHistogram(
"Search.RelatedSearches.CarouselLastVisibleItemPosition", position);
}
/**
* Logs weather the users scrolled the carousel or not.
* @param scrolled Whether the user scrolled the carousel after chips were presented.
*/
public static void logCarouselScrolled(boolean scrolled) {
RecordHistogram.recordBooleanHistogram("Search.RelatedSearches.CarouselScrolled", scrolled);
}
/**
* Logs weather the users scrolled and clicked the carousel.
* @param scrolled Whether the user scrolled the carousel after chips were presented.
* @param clicked Whether the user clicked any suggestion or not after they were presented.
*/
public static void logCarouselScrollAndClickStatus(boolean scrolled, boolean clicked) {
@ScrollAndClickStatus
int scrollAndClickStatus;
if (scrolled) {
scrollAndClickStatus = clicked ? ScrollAndClickStatus.SCROLLED_CLICKED
: ScrollAndClickStatus.SCROLLED_NO_CLICK;
} else {
scrollAndClickStatus = clicked ? ScrollAndClickStatus.NO_SCROLL_CLICKED
: ScrollAndClickStatus.NO_SCROLL_NO_CLICK;
}
RecordHistogram.recordEnumeratedHistogram("Search.RelatedSearches.CarouselScrollAndClick",
scrollAndClickStatus, Permissions.NUM_ENTRIES);
}
}
| 2,754 |
778 | <filename>kratos/tests/cpp_tests/utilities/test_sparse_matrix_mutiplication_utilities.cpp<gh_stars>100-1000
// | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: <NAME>
//
//
// Project includes
#include "testing/testing.h"
#include "spaces/ublas_space.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
namespace Kratos {
namespace Testing {
/// The sparse matrix type
typedef typename UblasSpace<double, CompressedMatrix, Vector>::MatrixType SparseMatrixType;
KRATOS_TEST_CASE_IN_SUITE(AssembleSparseMatrixByBlocks, KratosCoreFastSuite)
{
SparseMatrixType identity2x2(2, 2);
for (IndexType i = 0; i < 2; ++i) {
identity2x2.push_back(i, i, 1.0);
}
DenseMatrix<SparseMatrixType*> matrices_p_blocks(2,2);
matrices_p_blocks(0,0) = &identity2x2;
matrices_p_blocks(1,0) = &identity2x2;
matrices_p_blocks(0,1) = &identity2x2;
matrices_p_blocks(1,1) = &identity2x2;
DenseMatrix<double> contribution_coefficients(2,2);
contribution_coefficients(0,0) = 1.0;
contribution_coefficients(1,0) = -1.0;
contribution_coefficients(0,1) = -1.0;
contribution_coefficients(1,1) = 1.0;
SparseMatrixType solution_matrix;
SparseMatrixMultiplicationUtility::AssembleSparseMatrixByBlocks(solution_matrix, matrices_p_blocks, contribution_coefficients);
const double tolerance = 1.0e-16;
KRATOS_CHECK_NEAR(solution_matrix(0,0), 1.0, tolerance);
KRATOS_CHECK_NEAR(solution_matrix(1,1), 1.0, tolerance);
KRATOS_CHECK_NEAR(solution_matrix(2,2), 1.0, tolerance);
KRATOS_CHECK_NEAR(solution_matrix(3,3), 1.0, tolerance);
KRATOS_CHECK_NEAR(solution_matrix(2,0), -1.0, tolerance);
KRATOS_CHECK_NEAR(solution_matrix(3,1), -1.0, tolerance);
KRATOS_CHECK_NEAR(solution_matrix(0,2), -1.0, tolerance);
KRATOS_CHECK_NEAR(solution_matrix(1,3), -1.0, tolerance);
double total = 0.0;
for (std::size_t i = 0; i < 4; ++i) {
for (std::size_t j = 0; j < 4; ++j) {
total += solution_matrix(i,j);
}
}
KRATOS_CHECK_NEAR(total, 0.0, tolerance);
}
} // namespace Testing
} // namespace Kratos.
| 1,058 |
848 | <reponame>bluetiger9/Vitis-AI
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xir_extra_ops
def jit(graph):
graph.set_attr("need_preprocess", True)
graph.set_attr("mean", [104.0, 117.0, 123.0])
graph.set_attr("scale", [1.0, 1.0, 1.0])
graph.set_attr("is_rgb_input", False)
graph.set_attr("color1", [128, 232, 70, 156, 153, 153, 30, 0, 35, 152, 180, 60, 0, 142, 70, 100, 100, 230, 32, 178])
graph.set_attr("color2", [64, 35, 70, 102, 153, 153, 170, 220, 142, 251, 130, 20, 0, 0, 0, 60, 80, 0, 11, 43])
graph.set_attr("color3", [128, 244, 70, 102, 190, 153, 250, 220, 107, 152, 70, 220, 255, 0, 0, 0, 0, 0, 119, 255])
xir_extra_ops.set_postprocessor(
graph, "libxmodel_postprocessor_segmentation.so.1",
{"input": ["pred_up_fixed_"]})
| 498 |
7,604 | /*
---------------------------------------------------------------------------
Open Asset Import Library (assimp)
---------------------------------------------------------------------------
Copyright (c) 2006-2021, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
with or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of the assimp team, nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
written permission of the assimp team.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
*/
#include "UnitTestPCH.h"
/// Ensure this test has asserts on, even if the build type doesn't have asserts by default.
#if !defined(ASSIMP_BUILD_DEBUG)
#define ASSIMP_BUILD_DEBUG
#endif
#include <assimp/ai_assert.h>
#include <code/Common/AssertHandler.h>
namespace
{
/// An exception which is thrown by the testAssertHandler
struct TestAssertException
{
TestAssertException(const char* failedExpression, const char* file, int line)
: m_failedExpression(failedExpression)
, m_file(file)
, m_line(line)
{
}
std::string m_failedExpression;
std::string m_file;
int m_line;
};
/// Swap the default handler, which aborts, by one which throws.
void testAssertHandler(const char* failedExpression, const char* file, int line)
{
throw TestAssertException(failedExpression, file, line);
}
/// Ensure that the default assert handler is restored after the test is finished.
struct ReplaceHandlerScope
{
ReplaceHandlerScope()
{
Assimp::setAiAssertHandler(testAssertHandler);
}
~ReplaceHandlerScope()
{
Assimp::setAiAssertHandler(Assimp::defaultAiAssertHandler);
}
};
}
TEST(utAssertHandler, replaceWithThrow)
{
ReplaceHandlerScope scope;
try
{
ai_assert((2 + 2 == 5) && "Sometimes people put messages here");
EXPECT_TRUE(false);
}
catch(const TestAssertException& e)
{
EXPECT_STREQ(e.m_failedExpression.c_str(), "(2 + 2 == 5) && \"Sometimes people put messages here\"");
EXPECT_STREQ(e.m_file.c_str(), __FILE__);
EXPECT_GT(e.m_line, 0);
EXPECT_LT(e.m_line, __LINE__);
}
catch(...)
{
EXPECT_TRUE(false);
}
}
| 1,220 |
1,042 | /********************************************************************************
* ReactPhysics3D physics library, http://www.reactphysics3d.com *
* Copyright (c) 2010-2020 <NAME> *
*********************************************************************************
* *
* This software is provided 'as-is', without any express or implied warranty. *
* In no event will the authors be held liable for any damages arising from the *
* use of this software. *
* *
* Permission is granted to anyone to use this software for any purpose, *
* including commercial applications, and to alter it and redistribute it *
* freely, subject to the following restrictions: *
* *
* 1. The origin of this software must not be misrepresented; you must not claim *
* that you wrote the original software. If you use this software in a *
* product, an acknowledgment in the product documentation would be *
* appreciated but is not required. *
* *
* 2. Altered source versions must be plainly marked as such, and must not be *
* misrepresented as being the original software. *
* *
* 3. This notice may not be removed or altered from any source distribution. *
* *
********************************************************************************/
#ifndef REACTPHYSICS3D_JOINT_COMPONENTS_H
#define REACTPHYSICS3D_JOINT_COMPONENTS_H
// Libraries
#include <reactphysics3d/mathematics/Transform.h>
#include <reactphysics3d/engine/Entity.h>
#include <reactphysics3d/components/Components.h>
#include <reactphysics3d/containers/Map.h>
// ReactPhysics3D namespace
namespace reactphysics3d {
// Class declarations
class MemoryAllocator;
class EntityManager;
class Joint;
enum class JointType;
// Class JointComponents
/**
* This class represent the component of the ECS that contains generic information about
* all the joints.
*/
class JointComponents : public Components {
private:
// -------------------- Attributes -------------------- //
/// Array of joint entities
Entity* mJointEntities;
/// Array of body entities of the first bodies of the joints
Entity* mBody1Entities;
/// Array of body entities of the first bodies of the joints
Entity* mBody2Entities;
/// Array with pointers to the joints
Joint** mJoints;
/// Array of type of the joints
JointType* mTypes;
/// Array of position correction techniques used for the joints
JointsPositionCorrectionTechnique* mPositionCorrectionTechniques;
/// Array of boolean values to know if the two bodies of the constraint are allowed to collide with each other
bool* mIsCollisionEnabled;
/// True if the joint has already been added into an island during islands creation
bool* mIsAlreadyInIsland;
// -------------------- Methods -------------------- //
/// Allocate memory for a given number of components
virtual void allocate(uint32 nbComponentsToAllocate) override;
/// Destroy a component at a given index
virtual void destroyComponent(uint32 index) override;
/// Move a component from a source to a destination index in the components array
virtual void moveComponentToIndex(uint32 srcIndex, uint32 destIndex) override;
/// Swap two components in the array
virtual void swapComponents(uint32 index1, uint32 index2) override;
public:
/// Structure for the data of a transform component
struct JointComponent {
const Entity body1Entity;
const Entity body2Entity;
Joint* joint;
JointType jointType;
JointsPositionCorrectionTechnique positionCorrectionTechnique;
bool isCollisionEnabled;
/// Constructor
JointComponent(Entity body1Entity, Entity body2Entity, Joint* joint, JointType jointType,
JointsPositionCorrectionTechnique positionCorrectionTechnique, bool isCollisionEnabled)
: body1Entity(body1Entity), body2Entity(body2Entity), joint(joint), jointType(jointType),
positionCorrectionTechnique(positionCorrectionTechnique), isCollisionEnabled(isCollisionEnabled) {
}
};
// -------------------- Methods -------------------- //
/// Constructor
JointComponents(MemoryAllocator& allocator);
/// Destructor
virtual ~JointComponents() override = default;
/// Add a component
void addComponent(Entity jointEntity, bool isSleeping, const JointComponent& component);
/// Return the entity of the first body of a joint
Entity getBody1Entity(Entity jointEntity) const;
/// Return the entity of the second body of a joint
Entity getBody2Entity(Entity jointEntity) const;
/// Return a pointer to the joint
Joint* getJoint(Entity jointEntity) const;
/// Return the type of a joint
JointType getType(Entity jointEntity) const;
/// Return the position correction technique of a joint
JointsPositionCorrectionTechnique getPositionCorrectionTechnique(Entity jointEntity) const;
/// Set the position correction technique of a joint
void getPositionCorrectionTechnique(Entity jointEntity, JointsPositionCorrectionTechnique positionCorrectionTechnique);
/// Return true if the collision is enabled between the two bodies of a joint
bool getIsCollisionEnabled(Entity jointEntity) const;
/// Set whether the collision is enabled between the two bodies of a joint
void setIsCollisionEnabled(Entity jointEntity, bool isCollisionEnabled);
/// Return true if the joint has already been added into an island during island creation
bool getIsAlreadyInIsland(Entity jointEntity) const;
/// Set to true if the joint has already been added into an island during island creation
void setIsAlreadyInIsland(Entity jointEntity, bool isAlreadyInIsland);
// -------------------- Friendship -------------------- //
friend class BroadPhaseSystem;
friend class ConstraintSolverSystem;
friend class PhysicsWorld;
};
// Return the entity of the first body of a joint
inline Entity JointComponents::getBody1Entity(Entity jointEntity) const {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
return mBody1Entities[mMapEntityToComponentIndex[jointEntity]];
}
// Return the entity of the second body of a joint
inline Entity JointComponents::getBody2Entity(Entity jointEntity) const {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
return mBody2Entities[mMapEntityToComponentIndex[jointEntity]];
}
// Return a pointer to the joint
inline Joint* JointComponents::getJoint(Entity jointEntity) const {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
return mJoints[mMapEntityToComponentIndex[jointEntity]];
}
// Return the type of a joint
inline JointType JointComponents::getType(Entity jointEntity) const {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
return mTypes[mMapEntityToComponentIndex[jointEntity]];
}
// Return the position correction technique of a joint
inline JointsPositionCorrectionTechnique JointComponents::getPositionCorrectionTechnique(Entity jointEntity) const {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
return mPositionCorrectionTechniques[mMapEntityToComponentIndex[jointEntity]];
}
// Set the position correction technique of a joint
inline void JointComponents::getPositionCorrectionTechnique(Entity jointEntity, JointsPositionCorrectionTechnique positionCorrectionTechnique) {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
mPositionCorrectionTechniques[mMapEntityToComponentIndex[jointEntity]] = positionCorrectionTechnique;
}
// Return true if the collision is enabled between the two bodies of a joint
inline bool JointComponents::getIsCollisionEnabled(Entity jointEntity) const {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
return mIsCollisionEnabled[mMapEntityToComponentIndex[jointEntity]];
}
// Set whether the collision is enabled between the two bodies of a joint
inline void JointComponents::setIsCollisionEnabled(Entity jointEntity, bool isCollisionEnabled) {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
mIsCollisionEnabled[mMapEntityToComponentIndex[jointEntity]] = isCollisionEnabled;
}
// Return true if the joint has already been added into an island during island creation
inline bool JointComponents::getIsAlreadyInIsland(Entity jointEntity) const {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
return mIsAlreadyInIsland[mMapEntityToComponentIndex[jointEntity]];
}
// Set to true if the joint has already been added into an island during island creation
inline void JointComponents::setIsAlreadyInIsland(Entity jointEntity, bool isAlreadyInIsland) {
assert(mMapEntityToComponentIndex.containsKey(jointEntity));
mIsAlreadyInIsland[mMapEntityToComponentIndex[jointEntity]] = isAlreadyInIsland;
}
}
#endif
| 3,457 |
1,538 | package com.spinytech.macore.router;
import android.content.Context;
import android.text.TextUtils;
import com.spinytech.macore.tools.Logger;
import com.spinytech.macore.tools.ProcessUtil;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Created by wanglei on 2016/12/27.
*/
public class RouterRequest {
private static final String TAG = "RouterRequest";
private static volatile String DEFAULT_PROCESS = "";
private String from;
private String domain;
private String provider;
private String action;
private HashMap<String, String> data;
private Object object;
AtomicBoolean isIdle = new AtomicBoolean(true);
private static final int length = 64;
private static AtomicInteger sIndex = new AtomicInteger(0);
private static final int RESET_NUM = 1000;
private static volatile RouterRequest[] table = new RouterRequest[length];
static {
for (int i = 0; i < length; i++) {
table[i] = new RouterRequest();
}
}
private RouterRequest() {
this.from = DEFAULT_PROCESS;
this.domain = DEFAULT_PROCESS;
this.provider = "";
this.action = "";
this.data = new HashMap<>();
}
private RouterRequest(Context context) {
this.from = getProcess(context);
this.domain = getProcess(context);
this.provider = "";
this.action = "";
this.data = new HashMap<>();
}
private RouterRequest(Builder builder) {
this.from = builder.mFrom;
this.domain = builder.mDomain;
this.provider = builder.mProvider;
this.action = builder.mAction;
this.data = builder.mData;
}
public String getFrom() {
return from;
}
public String getDomain() {
return domain;
}
public String getProvider() {
return provider;
}
public String getAction() {
return action;
}
public HashMap<String, String> getData() {
return data;
}
public Object getAndClearObject() {
Object temp = object;
object = null;
return temp;
}
private static String getProcess(Context context) {
if (TextUtils.isEmpty(DEFAULT_PROCESS) || ProcessUtil.UNKNOWN_PROCESS_NAME.equals(DEFAULT_PROCESS)) {
DEFAULT_PROCESS = ProcessUtil.getProcessName(context, ProcessUtil.getMyProcessId());
}
return DEFAULT_PROCESS;
}
@Override
public String toString() {
//Here remove Gson to save about 10ms.
//String result = new Gson().toJson(this);
JSONObject jsonObject = new JSONObject();
try {
jsonObject.put("from", from);
jsonObject.put("domain", domain);
jsonObject.put("provider", provider);
jsonObject.put("action", action);
try {
JSONObject jsonData = new JSONObject();
for (Map.Entry<String, String> entry : data.entrySet()) {
jsonData.put(entry.getKey(), entry.getValue());
}
jsonObject.put("data", jsonData);
} catch (Exception e) {
e.printStackTrace();
jsonObject.put("data", "{}");
}
} catch (JSONException e) {
e.printStackTrace();
}
return jsonObject.toString();
}
public RouterRequest json(String requestJsonString) {
//Here remove Gson to save about 10ms.
//RouterRequest routerRequest = new Gson().fromJson(requestJsonString, RouterRequest.class);
try {
JSONObject jsonObject = new JSONObject(requestJsonString);
this.from = jsonObject.getString("from");
this.domain = jsonObject.getString("domain");
this.provider = jsonObject.getString("provider");
this.action = jsonObject.getString("action");
try {
JSONObject jsonData = new JSONObject(jsonObject.getString("data"));
Iterator it = jsonData.keys();
while (it.hasNext()) {
String key = String.valueOf(it.next());
String value = (String) jsonData.get(key);
this.data.put(key, value);
}
} catch (Exception e) {
e.printStackTrace();
this.data = new HashMap<>();
}
} catch (JSONException e) {
e.printStackTrace();
}
return this;
}
public RouterRequest url(String url) {
int questIndex = url.indexOf('?');
String[] urls = url.split("\\?");
if (urls.length != 1 && urls.length != 2) {
Logger.e(TAG, "The url is illegal.");
return this;
}
String[] targets = urls[0].split("/");
if (targets.length == 3) {
this.domain = targets[0];
this.provider = targets[1];
this.action = targets[2];
} else {
Logger.e(TAG, "The url is illegal.");
return this;
}
//Add params
if (questIndex != -1) {
String queryString = urls[1];
if (queryString != null && queryString.length() > 0) {
int ampersandIndex, lastAmpersandIndex = 0;
String subStr, key, value;
String[] paramPair, values, newValues;
do {
ampersandIndex = queryString.indexOf('&', lastAmpersandIndex) + 1;
if (ampersandIndex > 0) {
subStr = queryString.substring(lastAmpersandIndex, ampersandIndex - 1);
lastAmpersandIndex = ampersandIndex;
} else {
subStr = queryString.substring(lastAmpersandIndex);
}
paramPair = subStr.split("=");
key = paramPair[0];
value = paramPair.length == 1 ? "" : paramPair[1];
try {
value = URLDecoder.decode(value, "UTF-8");
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
data.put(key, value);
} while (ampersandIndex > 0);
}
}
return this;
}
public RouterRequest domain(String domain) {
this.domain = domain;
return this;
}
public RouterRequest provider(String provider) {
this.provider = provider;
return this;
}
public RouterRequest action(String action) {
this.action = action;
return this;
}
public RouterRequest data(String key, String data) {
this.data.put(key, data);
return this;
}
public RouterRequest object(Object object) {
this.object = object;
return this;
}
public static RouterRequest obtain(Context context) {
return obtain(context, 0);
}
private static RouterRequest obtain(Context context, int retryTime) {
int index = sIndex.getAndIncrement();
if (index > RESET_NUM) {
sIndex.compareAndSet(index, 0);
if (index > RESET_NUM * 2) {
sIndex.set(0);
}
}
int num = index & (length - 1);
RouterRequest target = table[num];
if (target.isIdle.compareAndSet(true, false)) {
target.from = getProcess(context);
target.domain = getProcess(context);
target.provider = "";
target.action = "";
target.data.clear();
return target;
} else {
if (retryTime < 5) {
return obtain(context, retryTime++);
} else {
return new RouterRequest(context);
}
}
}
@Deprecated
public static class Builder {
private String mFrom;
private String mDomain;
private String mProvider;
private String mAction;
private HashMap<String, String> mData;
public Builder(Context context) {
mFrom = getProcess(context);
mDomain = getProcess(context);
mProvider = "";
mAction = "";
mData = new HashMap<>();
}
public Builder json(String requestJsonString) {
//Here remove Gson to save about 10ms.
//RouterRequest routerRequest = new Gson().fromJson(requestJsonString, RouterRequest.class);
try {
JSONObject jsonObject = new JSONObject(requestJsonString);
this.mFrom = jsonObject.getString("from");
this.mDomain = jsonObject.getString("domain");
this.mProvider = jsonObject.getString("provider");
this.mAction = jsonObject.getString("action");
try {
JSONObject jsonData = new JSONObject(jsonObject.getString("data"));
Iterator it = jsonData.keys();
while (it.hasNext()) {
String key = String.valueOf(it.next());
String value = (String) jsonData.get(key);
this.mData.put(key, value);
}
} catch (Exception e) {
e.printStackTrace();
this.mData = new HashMap<>();
}
} catch (JSONException e) {
e.printStackTrace();
}
return this;
}
public Builder url(String url) {
int questIndex = url.indexOf('?');
String[] urls = url.split("\\?");
if (urls.length != 1 && urls.length != 2) {
Logger.e(TAG, "The url is illegal.");
return this;
}
String[] targets = urls[0].split("/");
if (targets.length == 3) {
this.mDomain = targets[0];
this.mProvider = targets[1];
this.mAction = targets[2];
} else {
Logger.e(TAG, "The url is illegal.");
return this;
}
//Add params
if (questIndex != -1) {
String queryString = urls[1];
if (queryString != null && queryString.length() > 0) {
int ampersandIndex, lastAmpersandIndex = 0;
String subStr, key, value;
String[] paramPair, values, newValues;
do {
ampersandIndex = queryString.indexOf('&', lastAmpersandIndex) + 1;
if (ampersandIndex > 0) {
subStr = queryString.substring(lastAmpersandIndex, ampersandIndex - 1);
lastAmpersandIndex = ampersandIndex;
} else {
subStr = queryString.substring(lastAmpersandIndex);
}
paramPair = subStr.split("=");
key = paramPair[0];
value = paramPair.length == 1 ? "" : paramPair[1];
try {
value = URLDecoder.decode(value, "UTF-8");
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
mData.put(key, value);
} while (ampersandIndex > 0);
}
}
return this;
}
public Builder domain(String domain) {
this.mDomain = domain;
return this;
}
public Builder provider(String provider) {
this.mProvider = provider;
return this;
}
public Builder action(String action) {
this.mAction = action;
return this;
}
public Builder data(String key, String data) {
this.mData.put(key, data);
return this;
}
public RouterRequest build() {
return new RouterRequest(this);
}
}
}
| 6,186 |
2,151 | <filename>ios/chrome/browser/browsing_data/browsing_data_remover_observer.h
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_BROWSING_DATA_BROWSING_DATA_REMOVER_OBSERVER_H_
#define IOS_CHROME_BROWSER_BROWSING_DATA_BROWSING_DATA_REMOVER_OBSERVER_H_
#include "base/macros.h"
#include "ios/chrome/browser/browsing_data/browsing_data_remove_mask.h"
class BrowsingDataRemover;
// BrowsingDataRemoverObserver allows for observing browsing data removal
// by BrowsingDataRemover.
class BrowsingDataRemoverObserver {
public:
BrowsingDataRemoverObserver() = default;
virtual ~BrowsingDataRemoverObserver() = default;
// Invoked when data was successfully removed. The |mask| will represent
// the type of removed data. See BrowsingDataRemoveMask for details.
virtual void OnBrowsingDataRemoved(BrowsingDataRemover* remover,
BrowsingDataRemoveMask mask) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(BrowsingDataRemoverObserver);
};
#endif // IOS_CHROME_BROWSER_BROWSING_DATA_BROWSING_DATA_REMOVER_OBSERVER_H_
| 439 |
5,169 | {
"name": "Modality",
"version": "0.0.1",
"authors": {
"<NAME>": "<EMAIL>"
},
"summary": "View focused transitions library. (Modals and other custom transitions)",
"homepage": "https://github.com/kevinwl02/Modality.git",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"platforms": {
"ios": "8.0"
},
"source": {
"git": "https://github.com/kevinwl02/Modality.git",
"tag": "0.0.1"
},
"source_files": [
"Modality/**/*.{h,m,mm}",
"Modality/**/**/*.{h,m,mm}"
],
"requires_arc": true
}
| 243 |
32,544 | package com.baeldung.rpc.finagle;
import com.twitter.finagle.Http;
import com.twitter.finagle.Service;
import com.twitter.finagle.http.Method;
import com.twitter.finagle.http.Request;
import com.twitter.finagle.http.Response;
import com.twitter.util.Await;
import com.twitter.util.Future;
import org.junit.Test;
import scala.runtime.BoxedUnit;
import static org.junit.Assert.assertEquals;
public class FinagleIntegrationTest {
@Test
public void givenServerAndClient_whenRequestSent_thenClientShouldReceiveResponseFromServer() throws Exception {
// given
Service serverService = new LogFilter().andThen(new GreetingService());
Http.serve(":8080", serverService);
Service<Request, Response> clientService = new LogFilter().andThen(Http.newService(":8080"));
// when
Request request = Request.apply(Method.Get(), "/?name=John");
request.host("localhost");
Future<Response> response = clientService.apply(request);
// then
Await.result(response
.onSuccess(r -> {
assertEquals("Hello John", r.getContentString());
return BoxedUnit.UNIT;
})
.onFailure(r -> {
throw new RuntimeException(r);
})
);
}
}
| 545 |
419 | /**
* @file lv_refr.h
*
*/
#ifndef LV_REFR_H
#define LV_REFR_H
#ifdef __cplusplus
extern "C" {
#endif
/*********************
* INCLUDES
*********************/
#include "lv_obj.h"
#include <stdbool.h>
/*********************
* DEFINES
*********************/
/**********************
* TYPEDEFS
**********************/
/**********************
* STATIC PROTOTYPES
**********************/
/**********************
* STATIC VARIABLES
**********************/
/**********************
* MACROS
**********************/
/**********************
* GLOBAL FUNCTIONS
**********************/
/**
* Initialize the screen refresh subsystem
*/
void lv_refr_init(void);
/**
* Invalidate an area
* @param area_p pointer to area which should be invalidated
*/
void lv_inv_area(const lv_area_t * area_p);
/**
* Set a function to call after every refresh to announce the refresh time and the number of refreshed pixels
* @param cb pointer to a callback function (void my_refr_cb(uint32_t time_ms, uint32_t px_num))
*/
void lv_refr_set_monitor_cb(void (*cb)(uint32_t, uint32_t));
/**
* Called when an area is invalidated to modify the coordinates of the area.
* Special display controllers may require special coordinate rounding
* @param cb pointer to the a function which will modify the area
*/
void lv_refr_set_round_cb(void(*cb)(lv_area_t*));
/**
* Get the number of areas in the buffer
* @return number of invalid areas
*/
uint16_t lv_refr_get_buf_size(void);
/**
* Pop (delete) the last 'num' invalidated areas from the buffer
* @param num number of areas to delete
*/
void lv_refr_pop_from_buf(uint16_t num);
/**********************
* STATIC FUNCTIONS
**********************/
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /*LV_REFR_H*/
| 611 |
2,542 | <filename>src/prod/src/Management/HttpTransport/ClientRequest.GetResponseBodyAsyncOperation.cpp
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
using namespace std;
using namespace Common;
using namespace HttpClient;
using namespace HttpCommon;
StringLiteral const TraceType("HttpRequest.GetResponseBodyAsyncOperation");
void ClientRequest::GetResponseBodyAsyncOperation::OnStart(AsyncOperationSPtr const &thisSPtr)
{
StartRead(thisSPtr);
}
void ClientRequest::GetResponseBodyAsyncOperation::StartRead(AsyncOperationSPtr const &thisSPtr)
{
auto status = KBuffer::Create(
HttpConstants::DefaultEntityBodyChunkSize,
currentBodyChunk_,
GetSFDefaultPagedKAllocator(),
HttpConstants::AllocationTag);
if (!NT_SUCCESS(status))
{
TryComplete(thisSPtr, ErrorCode::FromNtStatus(status));
return;
}
KMemRef body(currentBodyChunk_->QuerySize(), currentBodyChunk_->GetBuffer());
auto operation = clientRequest_->BeginGetResponseChunk(
body,
[this](AsyncOperationSPtr const &operation)
{
this->OnReadComplete(operation, false);
},
thisSPtr);
OnReadComplete(operation, true);
}
void ClientRequest::GetResponseBodyAsyncOperation::OnReadComplete(
AsyncOperationSPtr const &operation,
bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
KMemRef memRef;
ULONG winHttpError;
auto error = clientRequest_->EndGetResponseChunk(operation, memRef, &winHttpError);
if (!error.IsSuccess())
{
WriteWarning(
TraceType,
"GetResponseChunk failed for Url {0} with error : {1}, winHttpError : {2}",
clientRequest_->requestUri_,
error,
winHttpError);
TryComplete(operation->Parent, error);
return;
}
if (memRef._Param == 0)
{
//
// Reached EOF
//
TryComplete(operation->Parent, ErrorCodeValue::Success);
return;
}
// Trim the buffer to the exact size read.
currentBodyChunk_->SetSize(memRef._Param, TRUE);
bufferArray_.Append(move(currentBodyChunk_));
if (bufferArray_.Count() * HttpConstants::DefaultEntityBodyChunkSize >=
HttpConstants::MaxEntityBodySize) // TODO: This should be changed to settings.xml
{
WriteWarning(
TraceType,
"Response body too large for url {0}",
clientRequest_->requestUri_);
TryComplete(operation->Parent, ErrorCodeValue::MessageTooLarge);
return;
}
StartRead(operation->Parent);
}
ErrorCode ClientRequest::GetResponseBodyAsyncOperation::End(AsyncOperationSPtr const &asyncOperation, ByteBufferUPtr &body)
{
auto thisPtr = AsyncOperation::End<GetResponseBodyAsyncOperation>(asyncOperation);
if (!thisPtr->Error.IsSuccess())
{
return thisPtr->Error;
}
ULONG bufferSize = 0;
for (ULONG i = 0; i < thisPtr->bufferArray_.Count(); i++)
{
auto hr = ULongAdd(bufferSize, thisPtr->bufferArray_[i]->QuerySize(), &bufferSize);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
}
body = make_unique<ByteBuffer>();
body->resize(bufferSize);
auto bufferPointer = (*body).data();
for (ULONG i = 0; i < thisPtr->bufferArray_.Count(); i++)
{
memcpy_s(bufferPointer, thisPtr->bufferArray_[i]->QuerySize(), thisPtr->bufferArray_[i]->GetBuffer(), thisPtr->bufferArray_[i]->QuerySize());
bufferPointer += thisPtr->bufferArray_[i]->QuerySize();
}
return thisPtr->Error;
}
ErrorCode ClientRequest::GetResponseBodyAsyncOperation::End(AsyncOperationSPtr const &asyncOperation, KBuffer::SPtr &body)
{
auto thisPtr = AsyncOperation::End<GetResponseBodyAsyncOperation>(asyncOperation);
if (!thisPtr->Error.IsSuccess())
{
return thisPtr->Error;
}
ULONG bufferSize = 0;
for (ULONG i = 0; i < thisPtr->bufferArray_.Count(); i++)
{
auto hr = ULongAdd(bufferSize, thisPtr->bufferArray_[i]->QuerySize(), &bufferSize);
if (FAILED(hr)) { return ErrorCode::FromHResult(hr); }
}
auto status = KBuffer::Create(
bufferSize,
body,
GetSFDefaultPagedKAllocator(),
HttpConstants::AllocationTag);
if (!NT_SUCCESS(status))
{
return ErrorCode::FromNtStatus(status);
}
auto bufferPointer = (BYTE *)body->GetBuffer();
for (ULONG i = 0; i < thisPtr->bufferArray_.Count(); i++)
{
memcpy_s(bufferPointer, thisPtr->bufferArray_[i]->QuerySize(), thisPtr->bufferArray_[i]->GetBuffer(), thisPtr->bufferArray_[i]->QuerySize());
bufferPointer += thisPtr->bufferArray_[i]->QuerySize();
}
return thisPtr->Error;
}
| 1,918 |
353 | #pragma once
#include <torch/types.h>
#include <c10/util/Exception.h>
#include <chrono>
#include <condition_variable>
#include <cstddef>
#include <mutex>
#include <queue>
namespace torch {
namespace data {
namespace detail {
/// A basic locked, blocking MPMC queue.
///
/// Every `push` and `pop` is guarded by a mutex. A condition variable is used
/// to communicate insertion of new elements, such that waiting threads will be
/// woken up if they are currently waiting inside a call to `pop()`.
///
/// Note that this data structure is written specifically for use with the
/// `DataLoader`. Its behavior is tailored to this use case and may not be
/// applicable to more general uses.
template <typename T>
class Queue {
public:
/// Pushes a new value to the back of the `Queue` and notifies one thread on
/// the waiting side about this event.
void push(T value) {
{
std::lock_guard<std::mutex> lock(mutex_);
queue_.push(std::move(value));
}
cv_.notify_one();
}
/// Blocks until at least one element is ready to be popped from the front of
/// the queue. An optional `timeout` in seconds can be used to limit the time
/// spent waiting for an element. If the wait times out, an exception is
/// raised.
T pop(optional<std::chrono::milliseconds> timeout = nullopt) {
std::unique_lock<std::mutex> lock(mutex_);
if (timeout) {
if (!cv_.wait_for(
lock, *timeout, [this] { return !this->queue_.empty(); })) {
// clang-format off
AT_ERROR(
"Timeout in DataLoader queue while waiting for next batch"
" (timeout was ", timeout->count(), " ms)");
// clang-format on
}
} else {
cv_.wait(lock, [this] { return !this->queue_.empty(); });
}
AT_ASSERT(!queue_.empty());
T value = queue_.front();
queue_.pop();
lock.unlock();
return value;
}
/// Empties the queue and returns the number of elements that were present at
/// the start of the function. No threads are notified about this event as it
/// is assumed to be used to drain the queue during shutdown of a
/// `DataLoader`.
size_t clear() {
std::lock_guard<std::mutex> lock(this->mutex_);
const auto size = queue_.size();
while (!queue_.empty()) {
queue_.pop();
}
return size;
}
private:
std::queue<T> queue_;
std::mutex mutex_;
std::condition_variable cv_;
};
} // namespace detail
} // namespace data
} // namespace torch
| 942 |
631 | /*
* Copyright 2018 <NAME>. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.meituan.dorado.mock;
import com.meituan.dorado.common.exception.RpcException;
import com.meituan.dorado.config.service.ClientConfig;
import com.meituan.dorado.registry.meta.Provider;
import com.meituan.dorado.rpc.handler.invoker.AbstractInvoker;
import com.meituan.dorado.rpc.meta.RpcInvocation;
import com.meituan.dorado.rpc.meta.RpcResult;
import com.meituan.dorado.transport.meta.Request;
public class MockErrorInvoker extends AbstractInvoker {
public MockErrorInvoker(Provider provider, Class<?> iface, ClientConfig config) {
super(config, provider);
this.provider = provider;
this.serviceInterface = iface;
}
public MockErrorInvoker(Provider provider, Class<?> iface) {
this.provider = provider;
this.serviceInterface = iface;
}
@Override
public Request genRequest() {
return new MockRequest();
}
@Override
public void destroy() {
}
@Override
public RpcResult invoke(RpcInvocation invocation) throws Throwable {
throw new RpcException("Mock invoke error");
}
}
| 559 |
310 | <reponame>dreeves/usesthis
{
"name": "blueSky",
"description": "A reverb pedal.",
"url": "https://www.strymon.net/products/bluesky/"
} | 58 |
3,227 | #include <CGAL/Exact_predicates_inexact_constructions_kernel.h>
#include <CGAL/Constrained_Delaunay_triangulation_2.h>
#include <CGAL/Delaunay_mesh_face_base_2.h>
#include <CGAL/Delaunay_mesh_size_criteria_2.h>
#include <CGAL/Delaunay_mesher_2.h>
#include <CGAL/Triangulation_face_base_with_info_2.h>
#include <CGAL/Polygon_2.h>
#include <CGAL/point_generators_2.h>
#include <iostream>
#include <fstream>
typedef CGAL::Exact_predicates_inexact_constructions_kernel K;
typedef CGAL::Triangulation_vertex_base_2<K> Vb;
typedef CGAL::Delaunay_mesh_face_base_2<K> Fb;
typedef CGAL::Triangulation_data_structure_2<Vb, Fb> Tds;
typedef CGAL::Constrained_Delaunay_triangulation_2<K, Tds> CDT;
typedef CDT::Point Point;
typedef CGAL::Polygon_2<K> Polygon_2;
typedef CGAL::Delaunay_mesh_size_criteria_2<CDT> Mesh_2_criteria;
using namespace CGAL;
int main()
{
// Generated points are in that vector
std::vector<Point> points;
//Construct two non-intersecting nested polygons
::Polygon_2 polygon1;
polygon1.push_back(Point(0,0));
polygon1.push_back(Point(2,0));
polygon1.push_back(Point(2,2));
polygon1.push_back(Point(0,2));
::Polygon_2 polygon2;
polygon2.push_back(Point(4.0,-2.0));
polygon2.push_back(Point(4.0,2.0));
polygon2.push_back(Point(6.0,0.0));
//Insert the polygons into a constrained triangulation
CDT cdt;
cdt.insert_constraint(polygon1.vertices_begin(), polygon1.vertices_end(), true);
cdt.insert_constraint(polygon2.vertices_begin(), polygon2.vertices_end(), true);
// Refine the triangulation (and mark the faces as inside/outside)
CGAL::refine_Delaunay_mesh_2(cdt, Mesh_2_criteria(0.125, 0.5));
// Create the generator, input is the Triangulation_2 cdt
Random_points_in_triangle_mesh_2<Point, CDT> g(cdt);
// Get 100 random points in cdt
std::copy_n(g, 100, std::back_inserter(points));
// Check that we have really created 100 points.
assert(points.size() == 100);
// print the first point that was generated
std::cout << points[0] << std::endl;
return 0;
}
| 1,001 |
1,244 | <reponame>caiohamamura/libcxx<gh_stars>1000+
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// <utility>
// template <class T1, class T2> struct pair
// tuple_size<pair<T1, T2> >::value
#include <utility>
int main()
{
{
typedef std::pair<int, short> P1;
static_assert((std::tuple_size<P1>::value == 2), "");
}
}
| 217 |
399 | /*
* Copyright 2012 <NAME>. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <NAME>HI OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those of the
* authors and should not be interpreted as representing official policies, either expressed
* or implied.
*/
#include "USB/usb.h"
#ifndef DISABLE_BLUETOOTH
#include "usb_host_bluetooth.h"
#endif
#if defined(USB_SUPPORT_HOST) && !(defined(DISABLE_ACCESSORY) && defined(DISABLE_ADB))
#include "usb_host_android.h"
#endif
#ifdef USB_SUPPORT_DEVICE
#include "usb_device.h"
#endif
#ifdef USB_USE_CDC
#include "usb_device_cdc.h"
#endif
void USBInitialize() {
//Enable pull-up on USBID
_CN71PUE = 1;
#ifdef USB_SUPPORT_OTG
USBOTGInitialize();
#else
#ifdef USB_SUPPORT_HOST
USBHostInit(0);
#endif
#ifdef USB_SUPPORT_DEVICE
USBDeviceInit();
#endif
#endif
}
void USBShutdown() {
#ifdef USB_SUPPORT_OTG
USBOTGSession(END_SESSION);
#else
#ifdef USB_SUPPORT_HOST
USBHostShutdown();
USBHostTasks();
#endif
#ifdef USB_SUPPORT_DEVICE
USBSoftDetach();
#endif
#endif
// hard power off of the USB module to ensure no interrupts to follow.
U1PWRCbits.USBPWR = 0;
}
int USBTasks() {
#ifdef USB_SUPPORT_OTG
//If Role Switch Occurred Then
if (USBOTGRoleSwitch()) {
//Clear Role Switch Flag
USBOTGClearRoleSwitch();
}
if (!USBOTGHnpIsActive()) {
if (USBOTGCurrentRoleIs() == ROLE_DEVICE) {
#ifdef USB_SUPPORT_DEVICE
#ifdef USB_INTERRUPT
if (USB_BUS_SENSE && (USBGetDeviceState() == DETACHED_STATE)) {
USBDeviceAttach();
}
#endif
#ifdef USB_POLLING
USBDeviceTasks();
#endif
#ifdef USB_USE_CDC
USBDeviceCDCTasks();
#endif
#else
if (USBIDIF && USBIDIE) {
//Re-detect & Initialize
USBOTGInitialize();
USBClearInterruptFlag(USBIDIFReg, USBIDIFBitNum);
}
#endif
}
#ifdef USB_SUPPORT_HOST
if (USBOTGCurrentRoleIs() == ROLE_HOST) {
USBHostTasks();
#ifndef USB_ENABLE_TRANSFER_EVENT
USBHostAndroidTasks();
#ifndef DISABLE_BLUETOOTH
USBHostBluetoothTasks();
#endif
#endif
#endif
}
}
return USBOTGCurrentRoleIs();
#else
#ifdef USB_SUPPORT_DEVICE
#ifdef USB_POLLING
USBDeviceTasks();
#endif
#ifdef USB_USE_CDC
USBDeviceCDCTasks();
#endif
return 0;
#endif
#ifdef USB_SUPPORT_HOST
USBHostTasks();
#ifndef USB_ENABLE_TRANSFER_EVENT
USBHostAndroidTasks();
#ifndef DISABLE_BLUETOOTH
USBHostBluetoothTasks();
#endif
#endif
return 1;
#endif
#endif
}
| 1,351 |
391 | #!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from mseg_semantic.utils.confusion_matrix_utils import plot_confusion_matrix
class ConfusionMatrixRenderer():
def __init__(self, save_folder, class_names, dataset_name):
"""
"""
self.save_folder = save_folder
self.class_names = np.array(class_names)
self.dataset_name = dataset_name
self.y_pred = np.zeros((0,1), dtype=np.int64)
self.y_true = np.zeros((0,1), dtype=np.int64)
def update(self, pred, target):
"""
Args:
- pred
- target
Returns:
- None
"""
self.y_pred = np.vstack( [self.y_pred, pred.reshape(-1,1)] )
self.y_true = np.vstack( [self.y_true, target.reshape(-1,1)] )
def render(self):
"""
Args:
-
Returns:
- None
"""
self.y_true, self.y_pred = remove_ignored_pixels(self.y_true, self.y_pred)
# Only unlabeled pixels were found (test split), or zero images were processed
if self.y_true.size == 0:
return
title_str = f'{self.dataset_name}_confusion_matrix_unnormalized'
_ = plot_confusion_matrix(self.y_true, self.y_pred, self.class_names, normalize=True, title=title_str)
figure_save_fpath = f'{self.save_folder}/{title_str}.png'
plt.savefig(figure_save_fpath, dpi=400)
def remove_ignored_pixels(y_true, y_pred, ignore_index=255):
"""
Args:
Returns:
- y_true
- y_pred
"""
valid_idx = y_true != ignore_index
y_pred = y_pred[valid_idx]
y_true = y_true[valid_idx]
assert y_true.shape == y_pred.shape, 'Target vector and predicted label vector are not aligned.'
return y_true, y_pred
| 904 |
531 | <reponame>zosimovaa/10-steps-to-become-a-data-scientist
def check_missing_data(df):
flag=df.isna().sum().any()
if flag==True:
total = df.isnull().sum()
percent = (df.isnull().sum())/(df.isnull().count()*100)
output = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
data_type = []
# written by <NAME>
for col in df.columns:
dtype = str(df[col].dtype)
data_type.append(dtype)
output['Types'] = data_type
return(np.transpose(output))
else:
return(False) | 291 |
340 | <reponame>lgorenstein/hpc-container-maker
"""
MILC 7.8.1
Contents:
Ubuntu 16.04
CUDA version 10.2
GNU compilers (upstream)
OFED (upstream)
OpenMPI version 3.1.4
QUDA version 0.8.0
"""
# pylint: disable=invalid-name, undefined-variable, used-before-assignment
# pylama: ignore=E0602
gpu_arch = USERARG.get('GPU_ARCH', 'sm_60')
# add docstring to Dockerfile
Stage0 += comment(__doc__.strip(), reformat=False)
###############################################################################
# Devel stage
###############################################################################
Stage0 += baseimage(image='nvcr.io/nvidia/cuda:10.2-devel-ubuntu18.04',
_as='devel')
Stage0 += gnu()
Stage0 += cmake(eula=True)
Stage0 += ofed()
Stage0 += openmpi(version='3.1.4')
# build QUDA
Stage0 += packages(ospackages=['ca-certificates', 'git'])
Stage0 += generic_cmake(branch='develop',
cmake_opts=['-D CMAKE_BUILD_TYPE=RELEASE',
'-D QUDA_DIRAC_CLOVER=ON',
'-D QUDA_DIRAC_DOMAIN_WALL=ON',
'-D QUDA_DIRAC_STAGGERED=ON',
'-D QUDA_DIRAC_TWISTED_CLOVER=ON',
'-D QUDA_DIRAC_TWISTED_MASS=ON',
'-D QUDA_DIRAC_WILSON=ON',
'-D QUDA_FORCE_GAUGE=ON',
'-D QUDA_FORCE_HISQ=ON',
'-D QUDA_GPU_ARCH={}'.format(gpu_arch),
'-D QUDA_INTERFACE_MILC=ON',
'-D QUDA_INTERFACE_QDP=ON',
'-D QUDA_LINK_HISQ=ON',
'-D QUDA_MPI=ON'],
install=False,
ldconfig=True,
postinstall=['cp -a /var/tmp/quda/build/* /usr/local/quda'],
preconfigure=['mkdir -p /usr/local/quda'],
prefix='/usr/local/quda',
repository='https://github.com/lattice/quda.git')
# build MILC
Stage0 += generic_build(branch='develop',
build=['cp Makefile ks_imp_rhmc',
'cd ks_imp_rhmc',
'make -j 1 su3_rhmd_hisq \
CC=/usr/local/openmpi/bin/mpicc \
LD=/usr/local/openmpi/bin/mpicxx \
QUDA_HOME=/usr/local/quda \
WANTQUDA=true \
WANT_GPU=true \
WANT_CL_BCG_GPU=true \
WANT_FN_CG_GPU=true \
WANT_FL_GPU=true \
WANT_FF_GPU=true \
WANT_GF_GPU=true \
MPP=true \
PRECISION=2 \
WANTQIO=""'],
install=['mkdir -p /usr/local/milc/bin',
'cp /var/tmp/milc_qcd/ks_imp_rhmc/su3_rhmd_hisq /usr/local/milc/bin'],
prefix='/usr/local/milc',
repository='https://github.com/milc-qcd/milc_qcd')
Stage0 += environment(variables={'PATH': '/usr/local/milc/bin:$PATH'})
###############################################################################
# Release stage
###############################################################################
Stage1 += baseimage(image='nvcr.io/nvidia/cuda:10.2-base-ubuntu18.04')
Stage1 += packages(ospackages=['libcublas10'])
Stage1 += Stage0.runtime()
Stage1 += environment(variables={'PATH': '/usr/local/milc/bin:$PATH'})
| 2,264 |
313 | from module.utils import COLORS
page_1 =f''' {COLORS.GNSL}
⠀⠀⠀⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡀ _ _
⠀⠀⠀⡏⢢⡁⠂⠤⣀⣀⣀⣀⣀ ⠤⠐⢈⡔⢹ ___ ___(_)_ __ | |_ ___ __ _ _ __
⠀⠀⠀⢿⡀⠙⠆⠀⠉⠀⠀⠀⠀⠉⠀⠰⠋⢀⡿ / _ \/ __| | '_ \| __|____/ __|/ _` | '_ \
⠀ ⠀⠀⠀⠈⢷⠄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠠⡾⠁ | (_) \__ \ | | | | ||_____\__ \ (_| | | | |
⠀ ⠀⠀⠀⠀⠀⡏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢹ \___/|___/_|_| |_|\__| |___/\__,_|_| |_|
⣰⠊⠉⠉⠉⡇⠀⠢⣤⣄⠀⠀ ⣠⣤⠔⠀⢸
⠙⠓⠒⢦⠀⠱⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⠎ _ _
⠀⠀⠀⠀⡇⠀⠀⠏⠑⠒⠀⠉⠀⠒⠊⠹ _ __ _ _ _ __ ___ | |__ ___ _ __ _ __ ___ ___ __| |
⡎⠉⢹⠀⠙⡶⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⢦⠀⠀⡏⠉⢱ | '_ \| | | | '_ ` _ \| '_ \ / _ \ '__| | '_ ` _ \ / _ \ / _` |
⢧⡈⠛⠉⠉⠀⠀⣠⠀⠀⠀⠀⠀⠀⠀⠀⣄⠀⠉⠉⠋⢁⡼ | | | | |_| | | | | | | |_) | __/ | | | | | | | (_) | (_| |
⠀⢉⣿⠖⠚⠛⢋⢀⠀⠀⠀⠀⠀⠀⠀⡀⡙⠛⠓⠲⣿⣄ |_| |_|\__,_|_| |_| |_|_.__/ \___|_| |_| |_| |_|\___/ \__,_|
⠀⢸⡇⠀⠀⠀⡞⠁⠈⡃⠀⠀⠀⠀⢘⠁⠈⢳⠀⠀⠀⢸⡇
⠀⠈⢷⣄⠀⠀⠙⠦⠌⠑⠢⠤⠔⠊⠁⢠⠎⠀⠀⣠⡾⠁
⠀ ⠀⠀⠈⠛⠲⠤⣤⣀⣀⣀⣀⣠⣤⣚⣡⠤⠖⠛⠁
{COLORS.REDL}[ {COLORS.GNSL}1 {COLORS.REDL}] {COLORS.WHSL} Международный поиск, базовый
{COLORS.REDL}[ {COLORS.GNSL}2 {COLORS.REDL}] {COLORS.WHSL} Страны бывшего советского союза.
{COLORS.REDL}[ {COLORS.GNSL}99 {COLORS.REDL}] {COLORS.WHSL}В главное меню OSINT-SAN
{COLORS.REDL} _________________________________________________________________________________________________________
'''
page_2 =f''' {COLORS.GNSL}
/$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ /$$$$$$$$
/$$__ $$ /$$__ $$|_ $$_/| $$$ | $$|__ $$__/
| $$ \ $$| $$ \__/ | $$ | $$$$| $$ | $$
| $$ | $$| $$$$$$ | $$ | $$ $$ $$ | $$
| $$ | $$ \____ $$ | $$ | $$ $$$$ | $$
| $$ | $$ /$$ \ $$ | $$ | $$\ $$$ | $$
| $$$$$$/| $$$$$$/ /$$$$$$| $$ \ $$ | $$
\______/ \______/ |______/|__/ \__/ |__/
/$$$$$$ /$$ /$$ /$$
/$$__ $$ |__/ | $$ |__/
| $$ \__/ /$$$$$$ /$$$$$$/$$$$ /$$$$$$/$$$$ /$$ /$$ /$$$$$$$ /$$ /$$$$$$ /$$ /$$$$$$ /$$$$$$$
| $$ /$$__ $$| $$_ $$_ $$| $$_ $$_ $$| $$ | $$| $$__ $$| $$|_ $$_/ | $$ /$$__ $$ /$$_____/
| $$ | $$ \ $$| $$ \ $$ \ $$| $$ \ $$ \ $$| $$ | $$| $$ \ $$| $$ | $$ | $$| $$$$$$$$| $$$$$$
| $$ $$| $$ | $$| $$ | $$ | $$| $$ | $$ | $$| $$ | $$| $$ | $$| $$ | $$ /$$| $$| $$_____/ \____ $$
| $$$$$$/| $$$$$$/| $$ | $$ | $$| $$ | $$ | $$| $$$$$$/| $$ | $$| $$ | $$$$/| $$| $$$$$$$ /$$$$$$$/
\______/ \______/ |__/ |__/ |__/|__/ |__/ |__/ \______/ |__/ |__/|__/ \___/ |__/ \_______/|_______/
{COLORS.WHSL}
Вы можете собрать очень большое количество информации по тематике OSINT в telegram каналах.
Список только самых информативных каналов.
{COLORS.WHSL}Канал OSINT CLUB Investigation & Forensic tools Записки сетевого выживальщика OSINT| Сетевая разведка
{COLORS.WHSL}Канал {COLORS.GNSL}@osint_club_channel {COLORS.WHSL} Канал {COLORS.GNSL} @forensictools {COLORS.WHSL} Канал {COLORS.GNSL} @NetSurvivalist {COLORS.WHSL} Канал{COLORS.GNSL} @osintology
{COLORS.WHSL} Управляющий @tochka_osint {COLORS.WHSL}Управляющий{COLORS.GNSL} @beholderishere {COLORS.WHSL} Управляющий {COLORS.GNSL}@beholderishere {COLORS.WHSL} Управляющий {COLORS.GNSL}@OSINT_GROUP
{COLORS.WHSL} Нетипичный ИБ Интернет - розыск Russian OSINT
{COLORS.WHSL} Канал {COLORS.GNSL} @tmgroupsecurity {COLORS.WHSL} Канал {COLORS.GNSL}@irozysk {COLORS.WHSL} Канал{COLORS.GNSL} @Russian_OSINT
{COLORS.WHSL} Управляющий{COLORS.GNSL} @mifkelevra {COLORS.WHSL} Управляющий {COLORS.GNSL}@irozysk_bot {COLORS.WHSL} Управляющий {COLORS.GNSL}@russian_osint_bot
{COLORS.REDL}__________________________________________________________________________________________________________________________________________________
'''
page_3 =f'''
{COLORS.WHSL}Пароли разбиты на архивы. Общий вес более 10 гб
{COLORS.WHSL}1.5 миллиардов паролей {COLORS.GNSL}https://mega.nz/file/9GhkwBLK#nNj5L6qPMGT679lGRjpZFV-kG0kx2m8VXMrmoTUe7cQ
{COLORS.WHSL}14 млн паролей, 250 МБ, зеркало {COLORS.GNSL}
SHA1 hash of the 7-Zip file: 00fc585efad08a4b6323f8e4196aae9207f8b09f
SHA1 hash of the text file: 3fe6457fa8be6da10191bffa0f4cec43603a9f56
https://downloads.pwnedpasswords.com/passwords/pwned-passwords-update-1.txt.7z
{COLORS.WHSL}306 млн паролей, 5,3 ГБ, зеркало {COLORS.GNSL}
SHA1 hash of the 7-Zip file: 90d57d16a2dfe00de6cc58d0fa7882229ace4a53
SHA1 hash of the text file: d3f3ba6d05b9b451c2b59fd857d94ea421001b16
https://downloads.pwnedpasswords.com/passwords/pwned-passwords-1.0.txt.7z
{COLORS.WHSL}Словари для брута wifi сетей. Проверьте поледнее на virus total
{COLORS.GNSL}https://yadi.sk/d/O5FQG3B4zNmS5Q
{COLORS.WHSL}10 млн + паролей
{COLORS.GNSL}https://github.com/Bafomet666/password_one '''
page_4 =f'''
{COLORS.WHSL}Глобальный мод для поиска информации по нику. Cейчас он загрузится в папку OSINT-SAN
{COLORS.WHSL}Сейчас пройдет загрузка, иди в папку{COLORS.GNSL} OSINT-SAN/maigret{COLORS.WHSL} дальше открывай терминал в папке maigret,
вводи {COLORS.GNSL}python3 maigret.py username {COLORS.WHSL}для запуска, {COLORS.GNSL}username {COLORS.WHSL}можешь любой подставить
'''
| 5,435 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.