max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,392 |
import datetime
import pytest
import pytz
from .....product.models import Collection, CollectionChannelListing
from ....tests.utils import assert_graphql_error_with_message, get_graphql_content
@pytest.fixture
def collections_for_sorting_with_channels(channel_USD, channel_PLN):
collections = Collection.objects.bulk_create(
[
Collection(name="Collection1", slug="collection1"),
Collection(name="Collection2", slug="collection2"),
Collection(name="Collection3", slug="collection3"),
Collection(name="Collection4", slug="collection4"),
Collection(name="Collection5", slug="collection5"),
]
)
CollectionChannelListing.objects.bulk_create(
[
CollectionChannelListing(
collection=collections[0],
published_at=None,
is_published=True,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[1],
published_at=None,
is_published=False,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[2],
published_at=datetime.datetime(2004, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_USD,
),
CollectionChannelListing(
collection=collections[3],
published_at=datetime.datetime(2003, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_USD,
),
# second channel
CollectionChannelListing(
collection=collections[0],
published_at=None,
is_published=False,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[1],
published_at=None,
is_published=True,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[2],
published_at=datetime.datetime(2002, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_PLN,
),
CollectionChannelListing(
collection=collections[4],
published_at=datetime.datetime(2001, 1, 1, tzinfo=pytz.UTC),
is_published=False,
channel=channel_PLN,
),
]
)
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING = """
query (
$sortBy: CollectionSortingInput,
$filter: CollectionFilterInput, $channel: String
){
collections (
first: 10, sortBy: $sortBy, filter: $filter, channel: $channel
) {
edges {
node {
name
slug
}
}
}
}
"""
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "ASC"},
{"field": "PUBLICATION_DATE", "direction": "DESC"},
],
)
def test_collections_with_sorting_and_without_channel(
sort_by,
staff_api_client,
permission_manage_products,
):
# given
variables = {"sortBy": sort_by}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
assert_graphql_error_with_message(response, "A default channel does not exist.")
@pytest.mark.parametrize(
"sort_by, collections_order",
[
(
{"field": "PUBLICATION_DATE", "direction": "ASC"},
["Collection4", "Collection3", "Collection1", "Collection2"],
),
(
{"field": "PUBLICATION_DATE", "direction": "DESC"},
["Collection2", "Collection1", "Collection3", "Collection4"],
),
],
)
def test_collections_with_sorting_and_channel_USD(
sort_by,
collections_order,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": channel_USD.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
for index, collection_name in enumerate(collections_order):
assert collection_name == collections_nodes[index]["node"]["name"]
@pytest.mark.parametrize(
"sort_by, collections_order",
[
(
{"field": "PUBLICATION_DATE", "direction": "ASC"},
["Collection5", "Collection3", "Collection1", "Collection2"],
),
(
{"field": "PUBLICATION_DATE", "direction": "DESC"},
["Collection2", "Collection1", "Collection3", "Collection5"],
),
],
)
def test_collections_with_sorting_and_channel_PLN(
sort_by,
collections_order,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_PLN,
):
# given
variables = {"sortBy": sort_by, "channel": channel_PLN.slug}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
collections_nodes = content["data"]["collections"]["edges"]
for index, collection_name in enumerate(collections_order):
assert collection_name == collections_nodes[index]["node"]["name"]
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "ASC"},
{"field": "PUBLICATION_DATE", "direction": "ASC"},
],
)
def test_collections_with_sorting_and_not_existing_channel_asc(
sort_by,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": "Not-existing"}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["collections"]["edges"]
@pytest.mark.parametrize(
"sort_by",
[
{"field": "AVAILABILITY", "direction": "DESC"},
{"field": "PUBLICATION_DATE", "direction": "DESC"},
],
)
def test_collections_with_sorting_and_not_existing_channel_desc(
sort_by,
staff_api_client,
permission_manage_products,
collections_for_sorting_with_channels,
channel_USD,
):
# given
variables = {"sortBy": sort_by, "channel": "Not-existing"}
# when
response = staff_api_client.post_graphql(
QUERY_COLLECTIONS_WITH_SORTING_AND_FILTERING,
variables,
permissions=[permission_manage_products],
check_no_permissions=False,
)
# then
content = get_graphql_content(response)
assert not content["data"]["collections"]["edges"]
| 3,499 |
491 |
<reponame>dharmesh5591/realworld-serverless-application<gh_stars>100-1000
package software.amazon.serverless.apprepo.cucumber.steps;
import static org.assertj.core.api.Assertions.assertThat;
import software.amazon.serverless.apprepo.api.client.AWSServerlessApplicationRepository;
import software.amazon.serverless.apprepo.api.client.model.ApplicationSummary;
import software.amazon.serverless.apprepo.api.client.model.ListApplicationsRequest;
import software.amazon.serverless.apprepo.api.client.model.ListApplicationsResult;
import com.google.common.base.Preconditions;
import com.google.inject.Inject;
import java.util.Comparator;
import java.util.List;
import java.util.UUID;
import java.util.stream.Collectors;
import io.cucumber.java.en.And;
import io.cucumber.java.en.Then;
import io.cucumber.java.en.When;
import lombok.extern.slf4j.Slf4j;
/**
* Implementation of the steps in ListApplications.feature.
*/
@Slf4j
public class ListApplicationsSteps {
@Inject
private AWSServerlessApplicationRepository appRepo;
@When("the user lists applications")
public void the_user_lists_applications() {
try {
appRepo.listApplications(new ListApplicationsRequest());
} catch (Exception e) {
// do nothing and verify exception in the next step
}
}
@When("the user lists applications with ([1-9][0-9]*)? max items")
public void the_user_lists_applications_with_max_items(int maxItems) {
try {
appRepo.listApplications(new ListApplicationsRequest()
.maxItems(Integer.toString(maxItems)));
} catch (Exception e) {
// do nothing and verify exception in the next step
}
}
@When("the user lists applications with next token")
public void the_user_lists_applications_with_next_token() {
Preconditions.checkState(TestEnv.getApplicationList().getNextToken() != null, "Step assumes next token exists.");
try {
appRepo.listApplications(new ListApplicationsRequest()
.nextToken(TestEnv.getApplicationList().getNextToken()));
} catch (Exception e) {
// do nothing and verify exception in the next step
}
}
@When("^an unauthorized user lists applications$")
public void an_unauthorized_user_lists_applications() {
// Set a wrong password
TestEnv.setPassword(<PASSWORD>.randomUUID().<PASSWORD>());
try {
appRepo.listApplications(new ListApplicationsRequest());
} catch (Exception e) {
// do nothing and verify exception in the next step
}
}
@Then("all applications should be listed")
public void all_applications_should_be_listed() {
assertThat(TestEnv.getLastException()).isNull();
Preconditions.checkState(!TestEnv.getApplications().isEmpty(), "Step assumes previous applications exist");
Preconditions.checkState(TestEnv.getApplicationList() != null, "Step assumes listApplications has been called");
List<ApplicationSummary> expectedApplicationSummaries = TestEnv.getApplications().stream()
.map(app -> new ApplicationSummary()
.applicationId(app.getApplicationId())
.description(app.getDescription())
.creationTime(app.getCreationTime()))
.collect(Collectors.toList());
assertThat(TestEnv.getApplicationList().getApplications()).containsAll(expectedApplicationSummaries);
assertThat(TestEnv.getApplicationList().getNextToken()).isNull();
}
@Then("([1-9][0-9]*)? applications should be listed")
public void applications_should_be_listed(int count) {
assertThat(TestEnv.getLastException()).isNull();
Preconditions.checkState(!TestEnv.getApplications().isEmpty(), "Step assumes previous applications exist");
Preconditions.checkState(TestEnv.getApplicationList() != null, "Step assumes listApplications has been called");
List<ApplicationSummary> expectedApplicationSummaries = TestEnv.getApplications().stream()
.map(app -> new ApplicationSummary()
.applicationId(app.getApplicationId())
.description(app.getDescription())
.creationTime(app.getCreationTime()))
.collect(Collectors.toList());
List<ApplicationSummary> applicationSummaries = TestEnv.getApplicationList().getApplications();
assertThat(applicationSummaries).hasSize(count);
assertThat(expectedApplicationSummaries).containsAll(applicationSummaries);
}
@And("the application should no longer be listed")
public void the_application_should_no_longer_be_listed() {
Preconditions.checkState(TestEnv.getApplication() != null, "Step assumes an application has been created before");
ApplicationSummary applicationSummary = new ApplicationSummary()
.applicationId(TestEnv.getApplication().getApplicationId())
.description(TestEnv.getApplication().getDescription())
.creationTime(TestEnv.getApplication().getCreationTime());
ListApplicationsResult listApplicationsResult = appRepo.listApplications(new ListApplicationsRequest());
assertThat(listApplicationsResult.getApplicationList().getApplications())
.doesNotContain(applicationSummary);
}
@And("the listed applications should be in alphabetical order")
public void the_listed_applications_should_be_in_alphabetical_order() {
Preconditions.checkState(TestEnv.getApplicationList() != null, "Step assumes listApplications has been called");
assertThat(TestEnv.getApplicationList().getApplications())
.isSortedAccordingTo(Comparator.comparing(ApplicationSummary::getApplicationId));
}
}
| 1,798 |
347 |
<gh_stars>100-1000
#pragma once
#include "stdafx.h"
#include <string>
class TxtUtils
{
public:
static bool WriteStringToTxt(std::wstring &filePath, std::wstring &content);
};
| 70 |
1,806 |
<reponame>wimax-grapl/pants
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Information on your project, such as listing the targets in your project."""
from pants.backend.project_info import (
count_loc,
dependees,
dependencies,
filedeps,
filter_targets,
list_roots,
list_targets,
paths,
peek,
source_file_validator,
)
def rules():
return [
*count_loc.rules(),
*dependees.rules(),
*dependencies.rules(),
*filedeps.rules(),
*filter_targets.rules(),
*list_roots.rules(),
*list_targets.rules(),
*paths.rules(),
*peek.rules(),
*source_file_validator.rules(),
]
| 328 |
575 |
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/lite_video/lite_video_switches.h"
#include "base/command_line.h"
#include "base/strings/string_number_conversions.h"
namespace lite_video {
namespace switches {
// Overrides the network conditions checks for LiteVideos.
const char kLiteVideoIgnoreNetworkConditions[] =
"lite-video-ignore-network-conditions";
// Overrides all the LiteVideo decision logic to allow it on every navigation.
// This causes LiteVideos to ignore the hints, user blocklist, and
// network condition.
const char kLiteVideoForceOverrideDecision[] =
"lite-video-force-override-decision";
// Forces the coinflip used for a counterfactual experiment to be true.
const char kLiteVideoForceCoinflipHoldback[] =
"lite-video-force-coinflip-holdback";
// The default downlink bandwidth estimate used for throttling media requests.
// Only used when forcing LiteVideos to be allowed.
const char kLiteVideoDefaultDownlinkBandwidthKbps[] =
"lite-video-default-downlink-bandwidth-kbps";
bool ShouldIgnoreLiteVideoNetworkConditions() {
return base::CommandLine::ForCurrentProcess()->HasSwitch(
kLiteVideoIgnoreNetworkConditions);
}
bool ShouldOverrideLiteVideoDecision() {
return base::CommandLine::ForCurrentProcess()->HasSwitch(
kLiteVideoForceOverrideDecision);
}
bool ShouldForceCoinflipHoldback() {
return base::CommandLine::ForCurrentProcess()->HasSwitch(
kLiteVideoForceCoinflipHoldback);
}
int GetDefaultDownlinkBandwidthKbps() {
// Command line override takes priority.
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(
switches::kLiteVideoDefaultDownlinkBandwidthKbps)) {
int downlink_bandwidth_kbps;
if (base::StringToInt(command_line->GetSwitchValueASCII(
switches::kLiteVideoDefaultDownlinkBandwidthKbps),
&downlink_bandwidth_kbps)) {
return downlink_bandwidth_kbps;
}
}
return 400;
}
} // namespace switches
} // namespace lite_video
| 722 |
576 |
<filename>modules/bullet_physics/components_generic.h
#pragma once
#include "../../components/component.h"
#include "../../storage/dense_vector.h"
#include "../../storage/dense_vector_storage.h"
#include "components_area.h"
struct Force {
COMPONENT_BATCH(Force, DenseVector, -1)
static void _bind_methods();
static void _get_storage_config(Dictionary &r_config);
Vector3 location;
Vector3 force;
};
struct Torque {
COMPONENT_BATCH(Torque, DenseVector, -1)
static void _bind_methods();
static void _get_storage_config(Dictionary &r_config);
Vector3 torque;
};
struct Impulse {
COMPONENT_BATCH(Impulse, DenseVector, -1)
static void _bind_methods();
static void _get_storage_config(Dictionary &r_config);
Vector3 location;
Vector3 impulse;
};
struct TorqueImpulse {
COMPONENT_BATCH(TorqueImpulse, DenseVector, -1)
static void _bind_methods();
static void _get_storage_config(Dictionary &r_config);
Vector3 impulse;
};
| 343 |
619 |
/*
* Author: <NAME> <<EMAIL>>
* Copyright (c) 2014 Intel Corporation.
*
* This program and the accompanying materials are made available under the
* terms of the The MIT License which is available at
* https://opensource.org/licenses/MIT.
*
* SPDX-License-Identifier: MIT
*/
#include <iostream>
#include <signal.h>
#include <stdio.h>
#include "mma7455.hpp"
#include "upm_utilities.h"
int doWork = 0;
void
sig_handler(int signo)
{
printf("got signal\n");
if (signo == SIGINT) {
printf("exiting application\n");
doWork = 1;
}
}
int
main(int argc, char** argv)
{
//! [Interesting]
upm::MMA7455 sensor(0, ADDR);
short x, y, z;
while (!doWork) {
sensor.readData(&x, &y, &z);
std::cout << "Accelerometer X(" << x << ") Y(" << y << ") Z(" << z << ")" << std::endl;
upm_delay_us(100000);
}
//! [Interesting]
std::cout << "exiting application" << std::endl;
return 0;
}
| 406 |
348 |
<filename>docs/data/leg-t2/080/08003406.json
{"nom":"Hallencourt","circ":"3ème circonscription","dpt":"Somme","inscrits":1101,"abs":576,"votants":525,"blancs":58,"nuls":25,"exp":442,"res":[{"nuance":"LR","nom":"M. <NAME>","voix":246},{"nuance":"REM","nom":"M. <NAME>","voix":196}]}
| 114 |
417 |
<filename>modules/basic/stream/recordbatch_stream.cc
/** Copyright 2020-2021 Alibaba Group Holding Limited.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "basic/stream/recordbatch_stream.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "arrow/util/config.h"
#include "arrow/util/key_value_metadata.h"
#include "basic/ds/arrow.h"
#include "client/client.h"
#include "client/ds/blob.h"
#include "client/ds/i_object.h"
#include "common/util/uuid.h"
namespace vineyard {
Status RecordBatchStreamWriter::Push(std::shared_ptr<Object> const& chunk) {
return client_.ClientBase::PushNextStreamChunk(id_, chunk->id());
}
Status RecordBatchStreamWriter::Push(ObjectMeta const& chunk) {
return client_.ClientBase::PushNextStreamChunk(id_, chunk.GetId());
}
Status RecordBatchStreamWriter::Push(ObjectID const& chunk) {
return client_.ClientBase::PushNextStreamChunk(id_, chunk);
}
Status RecordBatchStreamWriter::Abort() {
if (stoped_) {
return Status::OK();
}
stoped_ = true;
return client_.StopStream(id_, true);
}
Status RecordBatchStreamWriter::Finish() {
if (stoped_) {
return Status::OK();
}
stoped_ = true;
return client_.StopStream(id_, false);
}
Status RecordBatchStreamWriter::WriteTable(
std::shared_ptr<arrow::Table> table) {
std::vector<std::shared_ptr<arrow::RecordBatch>> batches;
RETURN_ON_ERROR(TableToRecordBatches(table, &batches));
for (auto const& batch : batches) {
RETURN_ON_ERROR(WriteBatch(batch));
}
return Status::OK();
}
Status RecordBatchStreamWriter::WriteBatch(
std::shared_ptr<arrow::RecordBatch> batch) {
RecordBatchBuilder builder(client_, batch);
return this->Push(builder.Seal(client_));
}
Status RecordBatchStreamWriter::WriteDataframe(std::shared_ptr<DataFrame> df) {
return WriteBatch(df->AsBatch());
}
Status RecordBatchStreamReader::GetNext(std::shared_ptr<Object>& chunk) {
return client_.ClientBase::PullNextStreamChunk(id_, chunk);
}
Status RecordBatchStreamReader::ReadRecordBatches(
std::vector<std::shared_ptr<arrow::RecordBatch>>& batches) {
std::shared_ptr<arrow::RecordBatch> batch;
while (true) {
auto status = ReadBatch(batch);
if (status.ok()) {
batches.emplace_back(
std::dynamic_pointer_cast<RecordBatch>(batch)->GetRecordBatch());
} else if (status.IsStreamDrained()) {
break;
} else {
return status;
}
}
return Status::OK();
}
Status RecordBatchStreamReader::ReadTable(
std::shared_ptr<arrow::Table>& table) {
std::vector<std::shared_ptr<arrow::RecordBatch>> batches;
RETURN_ON_ERROR(this->ReadRecordBatches(batches));
#if defined(ARROW_VERSION) && ARROW_VERSION < 17000
RETURN_ON_ARROW_ERROR(arrow::Table::FromRecordBatches(batches, &table));
#else
RETURN_ON_ARROW_ERROR_AND_ASSIGN(table,
arrow::Table::FromRecordBatches(batches));
#endif
return Status::OK();
}
Status RecordBatchStreamReader::ReadBatch(
std::shared_ptr<arrow::RecordBatch>& batch) {
std::shared_ptr<Object> recordbatch;
auto status = this->GetNext(recordbatch);
if (status.ok()) {
batch =
std::dynamic_pointer_cast<RecordBatch>(recordbatch)->GetRecordBatch();
}
return status;
}
Status RecordBatchStreamReader::GetHeaderLine(bool& header_row,
std::string& header_line) {
if (params_.find("header_row") != params_.end()) {
header_row = (params_["header_row"] == "1");
if (params_.find("header_line") != params_.end()) {
header_line = params_["header_line"];
} else {
header_line = "";
}
} else {
header_row = false;
header_line = "";
}
return Status::OK();
}
Status RecordBatchStream::OpenReader(
Client& client, std::unique_ptr<RecordBatchStreamReader>& reader) {
RETURN_ON_ERROR(client.OpenStream(id_, StreamOpenMode::read));
reader = std::unique_ptr<RecordBatchStreamReader>(
new RecordBatchStreamReader(client, id_, meta_, params_));
return Status::OK();
}
Status RecordBatchStream::OpenWriter(
Client& client, std::unique_ptr<RecordBatchStreamWriter>& writer) {
RETURN_ON_ERROR(client.OpenStream(id_, StreamOpenMode::write));
writer = std::unique_ptr<RecordBatchStreamWriter>(
new RecordBatchStreamWriter(client, id_, meta_));
return Status::OK();
}
} // namespace vineyard
| 1,729 |
665 |
<filename>vfs_handlers/testing/azure_blob_storage_test.cc
/** -*- C++ -*-
* azure_blob_storage_test.cc
* Mich, 2017-02-15
* This file is part of MLDB. Copyright 2017 mldb.ai inc. All rights reserved.
**/
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <string>
#include <boost/test/unit_test.hpp>
#include "mldb/arch/file_functions.h"
#include "mldb/utils/testing/fixtures.h"
#include "mldb/vfs/filter_streams.h"
#include "mldb/vfs/fs_utils.h"
#include "mldb/vfs_handlers/azure_blob_storage.h"
#include <boost/test/included/unit_test.hpp>
using namespace boost::unit_test;
using namespace std;
using namespace MLDB;
struct AzureRegistration {
AzureRegistration() : registered(false)
{
const char * home = getenv("HOME");
if (home == NULL) {
return;
}
auto filename = home + string("/.azure_cloud_credentials");
if (MLDB::fileExists(filename)) {
std::ifstream stream(filename.c_str());
int lineNum = 1;
for (; stream; ++lineNum) {
string line;
getline(stream, line);
if (line.empty()) {
continue;
}
try {
registerAzureStorageAccount(line);
}
catch (const MLDB::Exception & exc) {
cerr << "Failed at " << filename << ":" << lineNum << endl;
throw;
}
registered = true;
}
}
}
bool isRegistered()
{
return registered;
}
private:
bool registered;
} azureRegistration;
void test_azure_storage_invalid_registration_string()
{
MLDB_TRACE_EXCEPTIONS(false);
BOOST_REQUIRE_THROW(registerAzureStorageAccount(""), MLDB::Exception);
BOOST_REQUIRE_THROW(registerAzureStorageAccount("patate"), MLDB::Exception);
BOOST_REQUIRE_THROW(registerAzureStorageAccount("a;b;c"), MLDB::Exception);
BOOST_REQUIRE_THROW(
registerAzureStorageAccount(
"DefaultEndpointsProtocol=proto;AccountName=;AccountKey=key1;"),
MLDB::Exception);
}
void test_azure_storage_double_registration()
{
MLDB_TRACE_EXCEPTIONS(false);
string connStr =
"DefaultEndpointsProtocol=proto;AccountName=name;AccountKey=key1234=;";
registerAzureStorageAccount(connStr);
BOOST_REQUIRE_THROW(registerAzureStorageAccount(connStr), MLDB::Exception);
}
void test_azure_storage_read()
{
filter_istream read("azureblob://publicelementai/private/a_propos.txt");
string res = read.readAll();
cerr << res << endl;
BOOST_REQUIRE_EQUAL(res.find("# Ne pas supprimer"), 0);
}
void test_azure_storage_write_and_erase()
{
auto now = Date::now();
string outputUri =
"azureblob://publicelementai/private/subdirectory/ecriture"
+ to_string(now.secondsSinceEpoch()) + ".txt";
filter_ostream w(outputUri);
string outputStr = "writing to azure from filter_ostream" + now.printIso8601();
w << outputStr;
w.close();
auto objectInfo = tryGetUriObjectInfo(outputUri);
BOOST_REQUIRE(objectInfo.exists);
filter_istream read(outputUri);
auto res = read.readAll();
cerr << "READ: " << res << endl;
cerr << "EXPECTED: " << outputStr << endl;
BOOST_REQUIRE_EQUAL(res, outputStr);
// Erase
eraseUriObject(outputUri);
objectInfo = tryGetUriObjectInfo(outputUri);
BOOST_REQUIRE(!objectInfo.exists);
}
void test_azure_file_crawler()
{
// for each uri
auto onObject = [&] (const string & uri,
const FsObjectInfo & info,
const OpenUriObject & open,
int depth) -> bool
{
cerr << "on object: " << uri << " - " << info.size << endl;
return true;
};
auto onSubDir = [&] (const string & dirName, int depth) -> bool
{
return true;
};
forEachUriObject("azureblob://publicelementai/private/", onObject, onSubDir);
}
test_suite*
init_unit_test_suite( int argc, char* argv[] )
{
framework::master_test_suite().add(
BOOST_TEST_CASE(&test_azure_storage_invalid_registration_string));
framework::master_test_suite().add(
BOOST_TEST_CASE(&test_azure_storage_double_registration));
auto azureTests = {&test_azure_storage_read,
&test_azure_storage_write_and_erase,
&test_azure_file_crawler};
if (azureRegistration.isRegistered()) {
for (const auto & test: azureTests) {
framework::master_test_suite().add(
BOOST_TEST_CASE(test));
}
}
else {
cerr << "No valid azure connection string found, skipping "
<< azureTests.size() << " related tests" << endl;
}
return 0;
}
| 2,299 |
794 |
<reponame>xxoolm/Thanox-1
package github.tornaco.android.thanos.service;
import android.app.Service;
import android.content.Intent;
import android.os.IBinder;
import androidx.annotation.Keep;
import androidx.annotation.Nullable;
import com.elvishew.xlog.XLog;
import com.topjohnwu.superuser.Shell;
import java.util.Arrays;
import github.tornaco.android.thanos.core.su.ISu;
import github.tornaco.android.thanos.core.su.SuRes;
@Keep
public class SuSupportService extends Service {
@Nullable
@Override
public IBinder onBind(Intent intent) {
return new ISu.Stub() {
@Override
public SuRes exe(String[] command) {
Shell.Result result = Shell.su(command).exec();
XLog.w("SuSupportService exe: %s, result: %s", Arrays.toString(command), result);
return new SuRes(result.getOut(), result.getErr(), result.getCode());
}
};
}
}
| 390 |
3,651 |
/*
*
* * Copyright 2010-2016 OrientDB LTD (http://orientdb.com)
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* *
* * For more information: http://orientdb.com
*
*/
package com.orientechnologies.orient.server.distributed.impl.task;
import com.orientechnologies.common.log.OLogManager;
import com.orientechnologies.orient.core.command.OCommandDistributedReplicateRequest;
import com.orientechnologies.orient.core.command.OCommandOutputListener;
import com.orientechnologies.orient.core.config.OGlobalConfiguration;
import com.orientechnologies.orient.server.distributed.task.OAbstractReplicatedTask;
import java.util.UUID;
/**
* Abstract task for synchronization of database from a remote node.
*
* @author <NAME> (l.garulli--at--orientdb.com)
*/
public abstract class OAbstractSyncDatabaseTask extends OAbstractReplicatedTask
implements OCommandOutputListener {
public static final int CHUNK_MAX_SIZE = 8388608; // 8MB
public static final String DEPLOYDB = "deploydb.";
public static final int FACTORYID = 14;
protected long random;
public OAbstractSyncDatabaseTask() {
random = UUID.randomUUID().getLeastSignificantBits();
}
@Override
public RESULT_STRATEGY getResultStrategy() {
return RESULT_STRATEGY.UNION;
}
@Override
public OCommandDistributedReplicateRequest.QUORUM_TYPE getQuorumType() {
return OCommandDistributedReplicateRequest.QUORUM_TYPE.ALL;
}
@Override
public long getDistributedTimeout() {
return OGlobalConfiguration.DISTRIBUTED_DEPLOYDB_TASK_SYNCH_TIMEOUT.getValueAsLong();
}
@Override
public void onMessage(String iText) {
if (iText.startsWith("\r\n")) iText = iText.substring(2);
if (iText.startsWith("\n")) iText = iText.substring(1);
OLogManager.instance().info(this, iText);
}
@Override
public boolean isNodeOnlineRequired() {
return false;
}
}
| 773 |
791 |
<gh_stars>100-1000
#ifndef GENERATOR_SVG_HPP
#define GENERATOR_SVG_HPP
#include <algorithm>
#include <array>
#include <map>
#include <memory>
#include <sstream>
#include <vector>
#include "math.hpp"
#include "iterator.hpp"
#include "mesh_vertex.hpp"
#include "path_vertex.hpp"
#include "shape_vertex.hpp"
#include "triangle.hpp"
namespace generator
{
/// A simple svg writer class for generating preview and debug images.
class svg_writer_t
{
private:
class BaseElem
{
public:
double z_;
gml::dvec3 color_;
BaseElem(double z, const gml::dvec3& color);
virtual ~BaseElem();
// Writes this svg element to a stream.
virtual void stream(std::ostream&) const = 0;
};
class VertexElem : public BaseElem
{
public:
gml::dvec3 p_;
VertexElem(const gml::dvec3& p, const gml::dvec3& color);
virtual void stream(std::ostream& os) const override;
};
class LineElem : public BaseElem
{
public:
gml::dvec3 p1_, p2_;
LineElem(const gml::dvec3& p1, const gml::dvec3& p2, const gml::dvec3& color);
virtual void stream(std::ostream& os) const override;
};
class TriangleElem : public BaseElem
{
public:
std::array<gml::dvec3, 3> p_;
TriangleElem(const gml::dvec3& p1, const gml::dvec3& p2, const gml::dvec3& p3,
const gml::dvec3& color);
virtual void stream(std::ostream& os) const override;
};
gml::dvec3 project(const gml::dvec3& p) const;
gml::dvec3 normalToColor(const gml::dvec3& normal) const;
gml::ivec2 size_;
gml::dmat4 viewMatrix_;
gml::dmat4 projMatrix_;
gml::dmat4 viewProjMatrix_;
gml::ivec2 viewportOrigin_;
gml::ivec2 viewportSize_;
gml::dvec3 lightDir_;
bool cullface_;
mutable std::vector<std::unique_ptr<BaseElem>> elems_;
public:
/// @param width Width of the image in pixels
/// @param height Height of the iamge in pixels
svg_writer_t(int width, int height);
/// Sets the model view matrix. Default is the identity matrix.
void modelView(const gml::dmat4& matrix);
/// Sets the projection mode to perspective projection.
/// Default is the orthographic.
/// @param fovy Field of view along the y-axis.
/// @param aspect aspect ratio (should usually match the vieport)
void perspective(double fovy, double aspect, double zNear, double zFar);
/// Sets the projection mode to orthographic projection.
/// This is the default.
/// @param left Coordinate that maps to the left edge.
/// @param right Coordinate that maps to the right edge.
void ortho(double left, double right, double bottom, double top);
/// Sets the viewport. Default fills the whole image.
void viewport(int x, int y, int width, int height);
/// Sets if backfacing triangles should be culled. Default is true.
void cullface(bool cullface);
/// Write one point. Drawn as a circle.
void writePoint(const gml::dvec3& p, const gml::dvec3& color = {0.0, 0.0, 0.0});
/// Write one line.
void writeLine(const gml::dvec3& p1, const gml::dvec3& p2, const gml::dvec3& color = {0.0, 0.0, 0.0});
/// Write one triangle.
void writeTriangle(const gml::dvec3& p1, const gml::dvec3& p2, const gml::dvec3& p3,
const gml::dvec3& color);
/// Write one triangle with color automatically calculated from light.
void writeTriangle(const gml::dvec3& p1, const gml::dvec3& p2, const gml::dvec3& p3);
/// Write all shaped edges and optionally vertices, tangents and normals.
template <typename shape_t>
void writeShape(const shape_t& shape, bool writeVertices = false, bool writeAxis = false)
{
std::vector<shape_vertex_t> vertices{};
for(const auto& vertex : shape.vertices())
{
vertices.push_back(vertex);
}
for(auto e : shape.edges())
{
auto p1 = gml::dvec3{vertices[e.vertices[0]].position, 0.0};
auto p2 = gml::dvec3{vertices[e.vertices[1]].position, 0.0};
writeLine(p1, p2, {0.5, 0.5, 0.5});
}
if(writeAxis)
{
for(auto v : vertices)
{
auto p1 = gml::dvec3{v.position, 0.0};
auto p2 = gml::dvec3{v.position + 0.1 * v.tangent, 0.0};
auto p3 = gml::dvec3{v.position + 0.1 * v.normal(), 0.0};
writeLine(p1, p2, {0.0, 1.0, 0.0});
writeLine(p1, p3, {1.0, 0.0, 0.0});
}
}
if(writeVertices)
{
for(auto v : shape.vertices())
{
writePoint(gml::dvec3{v.position, 0.0});
}
}
}
/// Write all path edges as lines and optionally vertices, tangents, normals
/// and binormals.
template <typename Path>
void writePath(const Path& path, bool writeVertices = false, bool writeAxis = false)
{
std::vector<path_vertex_t> vertices{};
for(const auto& temp : path.vertices())
{
vertices.push_back(temp);
}
if(writeAxis)
{
for(const auto& v : path.vertices())
{
writeLine(v.position, v.position + 0.1 * v.tangent, {0.0, 0.0, 1.0});
writeLine(v.position, v.position + 0.1 * v.normal, {1.0, 0.0, 0.0});
writeLine(v.position, v.position + 0.1 * v.binormal(), {0.0, 1.0, 0.0});
}
}
if(writeVertices)
{
for(const auto& v : path.vertices())
{
writePoint(v.position + 0.001 * v.normal);
}
}
for(const auto& e : path.edges())
{
writeLine(vertices[e.vertices[0]].position, vertices[e.vertices[1]].position);
}
}
/// Write all triangles from a mesh.
template <typename Mesh>
void writeMesh(const Mesh& mesh, bool writeVertices = false, bool writeNormals = false)
{
std::vector<mesh_vertex_t> vertices{};
for(const mesh_vertex_t& vertex : mesh.vertices())
{
vertices.push_back(vertex);
}
for(triangle_t t : mesh.triangles())
{
writeTriangle(vertices[t.vertices[0]].position, vertices[t.vertices[1]].position,
vertices[t.vertices[2]].position);
}
if(writeVertices)
{
for(const auto& v : vertices)
{
writePoint(v.position);
}
}
// Normals
if(writeNormals)
{
for(const auto& v : vertices)
{
writeLine(v.position, v.position + 0.1 * v.normal, {0.0, 0.0, 1.0});
}
}
}
/// Generates svg xml from the data written so far.
std::string str() const;
};
}
#endif
| 2,456 |
344 |
<gh_stars>100-1000
import numpy as np
from mushroom_rl.core import Serializable
class QRegressor(Serializable):
"""
This class is used to create a regressor that approximates the Q-function
using a multi-dimensional output where each output corresponds to the
Q-value of each action. This is used, for instance, by the ``ConvNet`` used
in examples/atari_dqn.
"""
def __init__(self, approximator, **params):
"""
Constructor.
Args:
approximator (class): the model class to approximate the
Q-function;
**params: parameters dictionary to the regressor.
"""
self.model = approximator(**params)
self._add_save_attr(
model=self._get_serialization_method(approximator)
)
def fit(self, state, action, q, **fit_params):
"""
Fit the model.
Args:
state (np.ndarray): states;
action (np.ndarray): actions;
q (np.ndarray): target q-values;
**fit_params: other parameters used by the fit method of the
regressor.
"""
self.model.fit(state, action, q, **fit_params)
def predict(self, *z, **predict_params):
"""
Predict.
Args:
*z: a list containing states or states and actions depending
on whether the call requires to predict all q-values or only
one q-value corresponding to the provided action;
**predict_params: other parameters used by the predict method
of each regressor.
Returns:
The predictions of the model.
"""
assert len(z) == 1 or len(z) == 2
state = z[0]
q = self.model.predict(state, **predict_params)
if len(z) == 2:
action = z[1].ravel()
if q.ndim == 1:
return q[action]
else:
return q[np.arange(q.shape[0]), action]
else:
return q
def reset(self):
"""
Reset the model parameters.
"""
try:
self.model.reset()
except AttributeError:
raise NotImplementedError('Attempt to reset weights of a'
' non-parametric regressor.')
@property
def weights_size(self):
return self.model.weights_size
def get_weights(self):
return self.model.get_weights()
def set_weights(self, w):
self.model.set_weights(w)
def diff(self, state, action=None):
if action is None:
return self.model.diff(state)
else:
return self.model.diff(state, action).squeeze()
def __len__(self):
return len(self.model)
| 1,288 |
930 |
from Algos.Encrypt import Encrypt
from Algos.Decrypt import Decrypt
"""
[NOTE] Here we demostrate an use of the Encrypt and Decrypt algorithms
We also play the audio file as well.
"""
# You can try with Forest.wav as well
message_path = input("Enter path of Audio file: ")
secret = input("Enter secret message: ")
# Using Encrypt
en = Encrypt(message_path, secret)
en.play_audio()
res, status = en.encrypt_using_lsb("Encrypted", "encrypted.wav")
if status:
print(res)
# Using Decrypt
dec = Decrypt("Encrypted\encrypted.wav")
dec.play_audio()
res, status = dec.decrypt_audio("Decrypted", "decrypted.txt", False)
if status:
print(res)
| 221 |
416 |
<filename>iPhoneOS11.4.sdk/System/Library/Frameworks/StoreKit.framework/Headers/SKAdNetwork.h
//
// SKAdNetwork.h
// StoreKit
//
// Copyright © 2017 Apple Inc. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <StoreKit/StoreKitDefines.h>
NS_ASSUME_NONNULL_BEGIN
SK_EXTERN_CLASS_AVAILABLE(11_3) @interface SKAdNetwork : NSObject
// Participating apps should call this on launch to complete the install+open action associated with a product view
+ (void)registerAppForAdNetworkAttribution NS_AVAILABLE_IOS(11_3);
@end
// Constants for use with SKStoreProductViewController to associate a product view with an install+open
// Advertising network's cryptographic signature for the atribution params (NSString)
SK_EXTERN NSString * const SKStoreProductParameterAdNetworkAttributionSignature NS_AVAILABLE_IOS(11_3);
// Advertising network campaign identifier (NSNumber)
SK_EXTERN NSString * const SKStoreProductParameterAdNetworkCampaignIdentifier NS_AVAILABLE_IOS(11_3);
// Advertising network identifier (NSString)
SK_EXTERN NSString * const SKStoreProductParameterAdNetworkIdentifier NS_AVAILABLE_IOS(11_3);
// Random entropy value for security (NSUUID)
SK_EXTERN NSString * const SKStoreProductParameterAdNetworkNonce NS_AVAILABLE_IOS(11_3);
// Timestamp for this ad impression (NSNumber)
SK_EXTERN NSString * const SKStoreProductParameterAdNetworkTimestamp NS_AVAILABLE_IOS(11_3);
NS_ASSUME_NONNULL_END
| 440 |
1,870 |
<reponame>Kunple-w/sofa-boot<filename>sofa-boot-project/sofa-boot-core/healthcheck-sofa-boot/src/main/java/com/alipay/sofa/healthcheck/impl/ModuleHealthChecker.java<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alipay.sofa.healthcheck.impl;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.Status;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import com.alipay.sofa.boot.constant.SofaBootConstants;
import com.alipay.sofa.healthcheck.core.HealthChecker;
import com.alipay.sofa.isle.ApplicationRuntimeModel;
import com.alipay.sofa.isle.deployment.DeploymentDescriptor;
/**
* Abstract Module Health Checker
*
* @author xuanbei 18/5/16
*/
public class ModuleHealthChecker implements ApplicationContextAware, HealthChecker {
@Value("${" + SofaBootConstants.SOFABOOT_MODULE_CHECK_RETRY_COUNT + ":"
+ SofaBootConstants.SOFABOOT_MODULE_CHECK_RETRY_DEFAULT_COUNT + "}")
private int retryCount;
@Value("${" + SofaBootConstants.SOFABOOT_MODULE_CHECK_RETRY_INTERVAL + ":"
+ SofaBootConstants.SOFABOOT_MODULE_CHECK_RETRY_DEFAULT_INTERVAL + "}")
private long retryInterval;
@Value("${" + SofaBootConstants.SOFABOOT_MODULE_CHECK_STRICT_ENABLED + ":"
+ SofaBootConstants.SOFABOOT_MODULE_CHECK_STRICT_DEFAULT_ENABLED + "}")
private boolean strictCheck;
@Value("${" + SofaBootConstants.SOFABOOT_MODULE_HEALTH_CHECK_TIMEOUT + ":"
+ SofaBootConstants.SOFABOOT_MODULE_HEALTH_CHECK_DEFAULT_TIMEOUT + "}")
private int timeout;
private ApplicationContext applicationContext;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
@Override
public Health isHealthy() {
Health.Builder builder = new Health.Builder();
ApplicationRuntimeModel application = applicationContext.getBean(
SofaBootConstants.APPLICATION, ApplicationRuntimeModel.class);
for (DeploymentDescriptor deploymentDescriptor : application.getInstalled()) {
builder.withDetail(deploymentDescriptor.getName(), "passed");
}
for (DeploymentDescriptor deploymentDescriptor : application.getAllInactiveDeployments()) {
builder.withDetail(deploymentDescriptor.getName(), "inactive");
}
for (DeploymentDescriptor deploymentDescriptor : application.getFailed()) {
builder.withDetail(deploymentDescriptor.getName(), "failed");
}
if (application.getFailed().size() == 0) {
return builder.status(Status.UP).build();
} else {
return builder.status(Status.DOWN).build();
}
}
@Override
public String getComponentName() {
return "SOFABoot-Modules";
}
@Override
public int getRetryCount() {
return retryCount;
}
@Override
public long getRetryTimeInterval() {
return retryInterval;
}
@Override
public boolean isStrictCheck() {
return strictCheck;
}
@Override
public int getTimeout() {
return timeout;
}
}
| 1,520 |
4,054 |
<filename>vespalib/src/vespa/vespalib/util/array.hpp
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
#include "array.h"
#include <cstdlib>
#include <cstring>
#include <type_traits>
namespace vespalib {
template <typename T>
void construct(T * dest, const T * source, size_t sz, std::false_type)
{
for (size_t i(0); i < sz; i++) {
::new (static_cast<void *>(dest + i)) T(*(source + i));
}
}
template <typename T>
void construct(T * dest, const T * source, size_t sz, std::true_type)
{
memcpy(dest, source, sz*sizeof(T));
}
template <typename T>
void construct(T * dest, size_t sz, std::false_type)
{
for (size_t i(0); i < sz; i++) {
void *ptr = &dest[i];
new(ptr) T();
}
}
template <typename T>
void construct(T * dest, size_t sz, std::true_type)
{
(void) dest;
(void) sz;
}
template <typename T>
void construct(T * dest, size_t sz, T val, std::false_type)
{
for (size_t i(0); i < sz; i++) {
void *ptr = &dest[i];
new(ptr) T(val);
}
}
template <typename T>
void construct(T * dest, size_t sz, T val, std::true_type)
{
for (size_t i(0); i < sz; i++) {
dest[i] = val;
}
}
template <typename T>
Array<T>::Array(const Array & rhs)
: _array(rhs._array.create(rhs.size() * sizeof(T))),
_sz(rhs.size())
{
construct(array(0), rhs.array(0), _sz, std::is_trivially_copyable<T>());
}
template <typename T>
Array<T> & Array<T>::operator =(const Array & rhs)
{
if (&rhs != this) {
Array t(rhs);
swap(t);
}
return *this;
}
template <typename T>
Array<T> & Array<T>::operator =(Array && rhs) noexcept {
if (&rhs != this) {
Array t(std::move(rhs));
swap(t);
}
return *this;
}
template <typename T>
void Array<T>::assign(const_iterator begin_, const_iterator end_) {
Array tmp(begin_, end_);
swap(tmp);
}
template <typename T>
void Array<T>::reserve(size_t n) {
if (capacity() < n) {
increase(n);
}
}
template <typename T>
bool Array<T>::try_unreserve(size_t n)
{
if (n >= capacity()) {
return false;
}
if (n < size()) {
return false;
}
return _array.resize_inplace(n * sizeof(T));
}
template <typename T>
void Array<T>::resize(size_t n)
{
if (n > capacity()) {
reserve(n);
}
if (n > _sz) {
construct(array(_sz), n-_sz, std::is_trivially_default_constructible<T>());
} else if (n < _sz) {
std::destroy(array(n), array(_sz));
}
_sz = n;
}
template <typename T>
void move(T * dest, T * source, size_t sz, std::false_type)
{
for (size_t i(0); i < sz; i++) {
::new (static_cast<void *>(dest + i)) T(std::move(*(source + i)));
std::destroy_at(source + i);
}
}
template <typename T>
void move(T * dest, const T * source, size_t sz, std::true_type)
{
memcpy(dest, source, sz*sizeof(T));
}
template <typename T>
void Array<T>::increase(size_t n)
{
Alloc newArray(_array.create(sizeof(T)*n));
if (capacity() > 0) {
move(static_cast<T *>(newArray.get()), array(0), _sz, std::is_trivially_copyable<T>());
}
_array.swap(newArray);
}
template <typename T>
Array<T>::Array(const Alloc & initial)
: _array(initial.create(0)),
_sz(0)
{ }
template <typename T>
Array<T>::Array(Alloc && buf, size_t sz) :
_array(std::move(buf)),
_sz(sz)
{
}
template <typename T>
Array<T>::Array(Array &&rhs) noexcept
: _array(std::move(rhs._array)),
_sz(rhs._sz)
{
rhs._sz = 0;
}
template <typename T>
Array<T>::Array(size_t sz, const Alloc & initial) :
_array(initial.create(sz * sizeof(T))),
_sz(sz)
{
construct(array(0), _sz, std::is_trivially_default_constructible<T>());
}
template <typename T>
Array<T>::Array(size_t sz, T value, const Alloc & initial) :
_array(initial.create(sz * sizeof(T))),
_sz(sz)
{
construct(array(0), _sz, value, std::is_trivially_copyable<T>());
}
template <typename T>
Array<T>::Array(const_iterator begin_, const_iterator end_, const Alloc & initial) :
_array(initial.create(begin_ != end_ ? sizeof(T) * (end_-begin_) : 0)),
_sz(end_-begin_)
{
construct(array(0), begin_, _sz, std::is_trivially_copyable<T>());
}
template <typename T>
Array<T>::~Array()
{
cleanup();
}
template <typename T>
void Array<T>::cleanup()
{
std::destroy(array(0), array(_sz));
_sz = 0;
Alloc().swap(_array);
}
}
| 2,087 |
585 |
/*
The MIT License (MIT)
Copyright (c) [2016] [BTC.COM]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "StratumMinerBeam.h"
#include "StratumSessionBeam.h"
#include "StratumServerBeam.h"
#include "DiffController.h"
#include "CommonBeam.h"
///////////////////////////////// StratumSessionBeam
///////////////////////////////////
StratumMinerBeam::StratumMinerBeam(
StratumSessionBeam &session,
const DiffController &diffController,
const std::string &clientAgent,
const std::string &workerName,
int64_t workerId)
: StratumMinerBase(
session, diffController, clientAgent, workerName, workerId) {
}
void StratumMinerBeam::handleRequest(
const std::string &idStr,
const std::string &method,
const JsonNode &jparams,
const JsonNode &jroot) {
if (method == "solution") {
handleRequest_Submit(idStr, jroot);
}
}
void StratumMinerBeam::handleRequest_Submit(
const string &idStr, const JsonNode &jroot) {
// const type cannot access string indexed object member
JsonNode &jsonRoot = const_cast<JsonNode &>(jroot);
auto &session = getSession();
if (session.getState() != StratumSession::AUTHENTICATED) {
handleShare(idStr, StratumStatus::UNAUTHORIZED, 0, session.getChainId());
return;
}
if (jsonRoot["id"].type() != Utilities::JS::type::Str ||
jsonRoot["nonce"].type() != Utilities::JS::type::Str ||
jsonRoot["output"].type() != Utilities::JS::type::Str) {
handleShare(idStr, StratumStatus::ILLEGAL_PARARMS, 0, session.getChainId());
return;
}
uint32_t jobId = strtoul(jsonRoot["id"].str().c_str(), nullptr, 10);
uint64_t nonce = jsonRoot["nonce"].uint64_hex();
string output = jsonRoot["output"].str();
auto localJob = session.findLocalJob(jobId);
// can't find local job
if (localJob == nullptr) {
handleShare(idStr, StratumStatus::JOB_NOT_FOUND, 0, session.getChainId());
return;
}
auto &server = session.getServer();
auto &worker = session.getWorker();
auto sessionId = session.getSessionId();
shared_ptr<StratumJobEx> exjob = server.GetJobRepository(localJob->chainId_)
->getStratumJobEx(localJob->jobId_);
// can't find stratum job
if (exjob.get() == nullptr) {
handleShare(idStr, StratumStatus::JOB_NOT_FOUND, 0, session.getChainId());
return;
}
auto sjob = std::static_pointer_cast<StratumJobBeam>(exjob->sjob_);
// Used to prevent duplicate shares.
// Note: The same (input, nonce) may have multiple different and valid
// outputs.
uint64_t inputPrefix = stoull(sjob->input_.substr(0, 16), nullptr, 16);
uint32_t outputHash = djb2(output.c_str());
auto iter = jobDiffs_.find(localJob);
if (iter == jobDiffs_.end()) {
handleShare(idStr, StratumStatus::JOB_NOT_FOUND, 0, localJob->chainId_);
LOG(ERROR) << "can't find session's diff, worker: " << worker.fullName_;
return;
}
auto &jobDiff = iter->second;
ShareBeam share;
share.set_version(ShareBeam::CURRENT_VERSION);
share.set_inputprefix(inputPrefix);
share.set_workerhashid(workerId_);
share.set_userid(worker.userId(localJob->chainId_));
share.set_sharediff(jobDiff.currentJobDiff_);
share.set_blockbits(sjob->blockBits_);
share.set_timestamp((uint64_t)time(nullptr));
share.set_status(StratumStatus::REJECT_NO_REASON);
share.set_height(sjob->height_);
share.set_nonce(nonce);
share.set_sessionid(sessionId);
share.set_outputhash(outputHash);
IpAddress ip;
ip.fromIpv4Int(session.getClientIp());
share.set_ip(ip.toString());
// LocalShare localShare(nonce, outputHash, 0);
LocalShareType localShare(nonce, outputHash);
// can't add local share
if (!localJob->addLocalShare(localShare)) {
handleShare(
idStr,
StratumStatus::DUPLICATE_SHARE,
jobDiff.currentJobDiff_,
localJob->chainId_);
// add invalid share to counter
invalidSharesCounter_.insert((int64_t)time(nullptr), 1);
return;
}
uint256 blockHash;
server.checkAndUpdateShare(
localJob->chainId_,
share,
exjob,
output,
jobDiff.jobDiffs_,
worker.fullName_,
blockHash);
if (StratumStatus::isAccepted(share.status())) {
DLOG(INFO) << "share reached the diff: " << share.sharediff();
} else {
DLOG(INFO) << "share not reached the diff: " << share.sharediff();
}
// we send share to kafka by default, but if there are lots of invalid
// shares in a short time, we just drop them.
if (handleShare(
idStr, share.status(), share.sharediff(), localJob->chainId_)) {
if (StratumStatus::isSolved(share.status())) {
server.sendSolvedShare2Kafka(
localJob->chainId_, share, sjob->input_, output, worker, blockHash);
// mark jobs as stale
server.GetJobRepository(localJob->chainId_)
->markAllJobsAsStale(sjob->height());
}
} else {
// check if there is invalid share spamming
int64_t invalidSharesNum = invalidSharesCounter_.sum(
time(nullptr), INVALID_SHARE_SLIDING_WINDOWS_SIZE);
// too much invalid shares, don't send them to kafka
if (invalidSharesNum >= INVALID_SHARE_SLIDING_WINDOWS_MAX_LIMIT) {
LOG(WARNING) << "invalid share spamming, worker: " << worker.fullName_
<< ", " << share.toString();
return;
}
}
DLOG(INFO) << share.toString();
std::string message;
if (!share.SerializeToStringWithVersion(message)) {
LOG(ERROR) << "share SerializeToStringWithVersion failed!"
<< share.toString();
return;
}
server.sendShare2Kafka(localJob->chainId_, message.data(), message.size());
}
| 2,381 |
5,169 |
{
"name": "MTLManagedObjectAdapter",
"version": "1.0",
"license": "MIT",
"summary": "Model framework for Cocoa and Cocoa Touch.",
"homepage": "https://github.com/Mantle/Mantle",
"authors": {
"GitHub": "<EMAIL>"
},
"source": {
"git": "https://github.com/Mantle/MTLManagedObjectAdapter.git",
"tag": "1.0"
},
"requires_arc": true,
"platforms": {
"ios": "8.0",
"osx": "10.9"
},
"source_files": "MTLManagedObjectAdapter/MTLManagedObjectAdapter.{h,m}",
"public_header_files": "MTLManagedObjectAdapter/MTLManagedObjectAdapter.h",
"dependencies": {
"Mantle": [
"~> 2.0"
]
},
"frameworks": [
"Foundation",
"CoreData"
],
"prepare_command": " PREFIX=\"mtl_moa_\"\n # Add prefix to header imports\n ext_header_prefix_src() {\n SOURCE_FILE=$1\n EXT_HEADER_NAME=$2\n sed -i.bak \"s/\"${EXT_HEADER_NAME}\"/\"${PREFIX}${EXT_HEADER_NAME}\"/g\" ${SOURCE_FILE} && rm ${SOURCE_FILE}.bak\n }\n ext_header_prefix_src MTLManagedObjectAdapter/MTLManagedObjectAdapter.m EXTRuntimeExtensions.h\n ext_header_prefix_src MTLManagedObjectAdapter/MTLManagedObjectAdapter.m EXTScope.h\n ext_header_prefix_src MTLManagedObjectAdapter/extobjc/EXTRuntimeExtensions.m EXTRuntimeExtensions.h\n ext_header_prefix_src MTLManagedObjectAdapter/extobjc/EXTScope.m EXTScope.h\n # Change header name\n ext_header_prefix_mv() {\n SOURCE_FILE=$1\n FILE_NAME=`basename ${SOURCE_FILE}`\n DIR_NAME=`dirname ${SOURCE_FILE}`\n mv ${SOURCE_FILE} `dirname ${SOURCE_FILE}`/${PREFIX}`basename ${SOURCE_FILE}`\n }\n export -f ext_header_prefix_mv\n export PREFIX=${PREFIX}\n find MTLManagedObjectAdapter/extobjc -name \"*.h\" -exec bash -c 'ext_header_prefix_mv \"$0\"' {} \\;\n unset ext_header_prefix_mv\n unset PREFIX\n",
"subspecs": [
{
"name": "extobjc",
"source_files": "MTLManagedObjectAdapter/extobjc/*.{h,m}",
"private_header_files": "MTLManagedObjectAdapter/extobjc/*.h"
}
]
}
| 843 |
319 |
package com.evacipated.cardcrawl.modthespire.lib;
import org.apache.commons.lang3.NotImplementedException;
public class SpireSuper
{
public static <R> R call(Object... params)
{
throw new NotImplementedException("This shouldn't happen.");
}
}
| 98 |
1,708 |
"""Manage filesystems in temporary locations.
A temporary filesytem is stored in a location defined by your OS
(``/tmp`` on linux). The contents are deleted when the filesystem
is closed.
A `TempFS` is a good way of preparing a directory structure in advance,
that you can later copy. It can also be used as a temporary data store.
"""
from __future__ import print_function
from __future__ import unicode_literals
import shutil
import tempfile
import typing
import six
from . import errors
from .osfs import OSFS
if typing.TYPE_CHECKING:
from typing import Optional, Text
@six.python_2_unicode_compatible
class TempFS(OSFS):
"""A temporary filesystem on the OS.
Temporary filesystems are created using the `tempfile.mkdtemp`
function to obtain a temporary folder in an OS-specific location.
You can provide an alternative location with the ``temp_dir``
argument of the constructor.
Examples:
Create with the constructor::
>>> from fs.tempfs import TempFS
>>> tmp_fs = TempFS()
Or via an FS URL::
>>> import fs
>>> tmp_fs = fs.open_fs("temp://")
Use a specific identifier for the temporary folder to better
illustrate its purpose::
>>> named_tmp_fs = fs.open_fs("temp://local_copy")
>>> named_tmp_fs = TempFS(identifier="local_copy")
"""
def __init__(
self,
identifier="__tempfs__", # type: Text
temp_dir=None, # type: Optional[Text]
auto_clean=True, # type: bool
ignore_clean_errors=True, # type: bool
):
# type: (...) -> None
"""Create a new `TempFS` instance.
Arguments:
identifier (str): A string to distinguish the directory within
the OS temp location, used as part of the directory name.
temp_dir (str, optional): An OS path to your temp directory
(leave as `None` to auto-detect).
auto_clean (bool): If `True` (the default), the directory
contents will be wiped on close.
ignore_clean_errors (bool): If `True` (the default), any errors
in the clean process will be suppressed. If `False`, they
will be raised.
"""
self.identifier = identifier
self._auto_clean = auto_clean
self._ignore_clean_errors = ignore_clean_errors
self._cleaned = False
self.identifier = identifier.replace("/", "-")
self._temp_dir = tempfile.mkdtemp(identifier or "fsTempFS", dir=temp_dir)
super(TempFS, self).__init__(self._temp_dir)
def __repr__(self):
# type: () -> Text
return "TempFS()"
def __str__(self):
# type: () -> Text
return "<tempfs '{}'>".format(self._temp_dir)
def close(self):
# type: () -> None
"""Close the filesystem and release any resources.
It is important to call this method when you have finished
working with the filesystem. Some filesystems may not finalize
changes until they are closed (archives for example). You may
call this method explicitly (it is safe to call close multiple
times), or you can use the filesystem as a context manager to
automatically close.
Hint:
Depending on the value of ``auto_clean`` passed when creating
the `TempFS`, the underlying temporary folder may be removed
or not.
Example:
>>> tmp_fs = TempFS(auto_clean=False)
>>> syspath = tmp_fs.getsyspath("/")
>>> tmp_fs.close()
>>> os.path.exists(syspath)
True
"""
if self._auto_clean:
self.clean()
super(TempFS, self).close()
def clean(self):
# type: () -> None
"""Clean (delete) temporary files created by this filesystem."""
if self._cleaned:
return
try:
shutil.rmtree(self._temp_dir)
except Exception as error:
if not self._ignore_clean_errors:
raise errors.OperationFailed(
msg="failed to remove temporary directory; {}".format(error),
exc=error,
)
self._cleaned = True
| 1,749 |
587 |
<filename>seq2seq/tools/__init__.py
import torch
from random import randrange
from math import floor
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
from .config import PAD
def _limit_lengths(seqs, max_length=None, max_tokens=None):
max_length = max_length or float('inf')
lengths = [min(s.nelement(), max_length) for s in seqs]
if max_tokens is not None:
num_tokens = sum(lengths)
if num_tokens > max_tokens:
max_length = int(floor(num_tokens / len(seqs)))
lengths = [min(length, max_length) for length in lengths]
return lengths
# def _limit_batch_tokens(seqs, max_length=None, max_tokens=None, log=False):
# """
# seqs: a list of Tensors to be batched together
# max_length: maximum sequence length permitted
# max_tokens: maximum number of tokens (with padding) permitted -- batch will be trimed if exceeded
# """
# max_length = max_length or float('inf')
# lengths = [min(s.nelement(), max_length) for s in seqs]
# if max_tokens is not None:
# num_tokens = max(lengths) * len(seqs)
# if num_tokens > max_tokens: # needs to restrict batch size to fit maximum tokens
# # account for padding in final tensor
# padded_lengths = np.maximum.accumulate(lengths)
# num_tokens_batch = padded_lengths * (np.arange(len(seqs)) + 1)
# # determine new batch size and trim sequence
# B = int((num_tokens_batch > max_tokens).argmax() - 1)
# seqs = seqs[:B]
# lengths = lengths[:B]
# if log:
# logging.debug('Trimmed batch to %s as number of tokens was > %s'
# % (B, max_tokens))
# return seqs, lengths
def batch_sequences(seqs, max_length=None, max_tokens=None, fixed_length=None, batch_first=False, pad_value=PAD,
sort=False, pack=False, augment=False, device=None, dtype=torch.long):
"""
seqs: a list of Tensors to be batched together
max_length: maximum sequence length permitted
max_tokens: maximum number of tokens in batch permitted
"""
batch_dim, time_dim = (0, 1) if batch_first else (1, 0)
if fixed_length is not None:
fixed_length = max_length = min(max_length, fixed_length)
if len(seqs) == 1 and not fixed_length:
lengths = _limit_lengths(seqs, max_length, max_tokens)
seq_tensor = seqs[0].view(-1,)[:lengths[0]]
seq_tensor = seq_tensor.unsqueeze(batch_dim)\
.to(dtype=dtype, device=device)
else:
if sort:
seqs.sort(key=len, reverse=True)
lengths = _limit_lengths(seqs, max_length, max_tokens)
batch_length = max(lengths) if fixed_length is None\
else fixed_length
tensor_size = (len(seqs), batch_length) if batch_first \
else (batch_length, len(seqs))
seq_tensor = torch.full(tensor_size, pad_value,
dtype=dtype, device=device)
for i, seq in enumerate(seqs):
start_seq = 0
end_seq = lengths[i]
if augment and end_seq < seq.nelement():
delta = randrange(seq.nelement() - end_seq + 1)
start_seq += delta
end_seq += delta
seq_tensor.narrow(time_dim, 0, lengths[i]).select(batch_dim, i)\
.copy_(seq[start_seq:end_seq])
if pack:
seq_tensor = pack_padded_sequence(
seq_tensor, lengths, batch_first=batch_first)
if device is not None: # batch_sizes is not casted to device by default
seq_tensor = PackedSequence(seq_tensor.data,
seq_tensor.batch_sizes.to(device))
return (seq_tensor, lengths)
def batch_nested_sequences(seqs_subseqs, max_length=None, max_tokens=None, fixed_length=None, batch_first=True, pad_value=PAD,
augment=False, device=None, dtype=torch.long):
"""
seqs: a list of Tensors to be batched together
sub_seqs: a list of list of Tensors to be batched together
max_length: maximum sequence length permitted
max_tokens: maximum number of tokens in batch permitted
"""
seqs, sub_seqs = zip(*seqs_subseqs)
batch_dim, time_dim = (0, 1) if batch_first else (1, 0)
if fixed_length is not None:
fixed_length = max_length = min(max_length, fixed_length)
lengths = _limit_lengths(seqs, max_length, max_tokens)
sub_seqs = [s[:length] for s, length in zip(sub_seqs, lengths)]
sub_lengths = [[sub.nelement() for sub in s] for s in sub_seqs]
batch_length = max(lengths) if fixed_length is None\
else fixed_length
batch_sub_length = max([max([s2.numel() for s2 in s1]) for s1 in sub_seqs])
sub_tensor_size = (len(seqs), batch_length, batch_sub_length) if batch_first \
else (batch_length, batch_sub_length, len(seqs))
sub_seq_tensor = torch.full(sub_tensor_size, pad_value,
dtype=dtype, device=device)
tensor_size = (len(seqs), batch_length) if batch_first \
else (batch_length, len(seqs))
seq_tensor = torch.full(tensor_size, pad_value,
dtype=dtype, device=device)
for i, seq in enumerate(seqs):
end_seq = lengths[i]
seq_tensor.narrow(time_dim, 0, lengths[i]).select(batch_dim, i)\
.copy_(seq[0:end_seq])
for j, sub_seq in enumerate(sub_seqs[i]):
end_sub_seq = sub_lengths[i][j]
sub_seq_tensor\
.narrow(time_dim+1, 0, end_sub_seq)\
.select(time_dim, j)\
.select(batch_dim, i)\
.copy_(sub_seq[0:end_sub_seq])
return (seq_tensor, lengths), (sub_seq_tensor, sub_lengths)
| 2,682 |
575 |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_FETCH_BODY_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_FETCH_BODY_H_
#include "third_party/blink/renderer/bindings/core/v8/active_script_wrappable.h"
#include "third_party/blink/renderer/bindings/core/v8/script_promise.h"
#include "third_party/blink/renderer/bindings/core/v8/script_value.h"
#include "third_party/blink/renderer/core/core_export.h"
#include "third_party/blink/renderer/core/execution_context/execution_context_lifecycle_observer.h"
#include "third_party/blink/renderer/platform/bindings/script_wrappable.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
namespace blink {
class BodyStreamBuffer;
class ExceptionState;
class ExecutionContext;
class ReadableStream;
class ScriptState;
// This class represents Body mix-in defined in the fetch spec
// https://fetch.spec.whatwg.org/#body-mixin.
//
// Note: This class has body stream and its predicate whereas in the current
// spec only Response has it and Request has a byte stream defined in the
// Encoding spec. The spec should be fixed shortly to be aligned with this
// implementation.
class CORE_EXPORT Body : public ExecutionContextClient {
public:
explicit Body(ExecutionContext*);
ScriptPromise arrayBuffer(ScriptState*, ExceptionState&);
ScriptPromise blob(ScriptState*, ExceptionState&);
ScriptPromise formData(ScriptState*, ExceptionState&);
ScriptPromise json(ScriptState*, ExceptionState&);
ScriptPromise text(ScriptState*, ExceptionState&);
ReadableStream* body();
virtual BodyStreamBuffer* BodyBuffer() = 0;
virtual const BodyStreamBuffer* BodyBuffer() const = 0;
// This should only be called from the generated bindings. All other code
// should use IsBodyUsed() instead.
bool bodyUsed() const { return IsBodyUsed(); }
// True if the body has been read from.
virtual bool IsBodyUsed() const;
// True if the body is locked.
bool IsBodyLocked() const;
bool HasPendingActivity() const;
private:
// TODO(e_hakkinen): Fix |MimeType()| to always contain parameters and
// remove |ContentType()|.
virtual String ContentType() const = 0;
virtual String MimeType() const = 0;
// Body consumption algorithms will reject with a TypeError in a number of
// error conditions. This method wraps those up into one call which throws
// an exception if consumption cannot proceed. The caller must check
// |exception_state| on return.
void RejectInvalidConsumption(ExceptionState& exception_state) const;
DISALLOW_COPY_AND_ASSIGN(Body);
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_FETCH_BODY_H_
| 872 |
475 |
package org.micro.neural.config.store;
import lombok.extern.slf4j.Slf4j;
import org.micro.neural.common.URL;
import org.micro.neural.common.utils.SerializeUtils;
import org.redisson.Redisson;
import org.redisson.api.*;
import org.redisson.api.listener.PatternMessageListener;
import org.redisson.codec.SerializationCodec;
import org.redisson.config.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
/**
* Neural Store
*
* @author lry
*/
@Slf4j
public enum RedisStore {
//===
INSTANCE;
private boolean started;
private RedissonClient redissonClient;
private Map<String, PatternMessageListener> patternListeners = new ConcurrentHashMap<>();
/**
* The initialize store
*
* @param url {@link URL}
*/
public synchronized void initialize(URL url) {
if (started) {
return;
}
Config config = new Config();
String category = url.getParameter(URL.CATEGORY_KEY);
RedisModel redisModel = RedisModel.parse(category);
if (RedisModel.SENTINEL == redisModel) {
SentinelServersConfig sentinelServersConfig = config.useSentinelServers();
sentinelServersConfig.addSentinelAddress(url.getAddresses());
} else if (RedisModel.CLUSTER == redisModel) {
ClusterServersConfig clusterServersConfig = config.useClusterServers();
clusterServersConfig.addNodeAddress(url.getAddresses());
} else if (RedisModel.MASTER_SLAVE == redisModel) {
MasterSlaveServersConfig masterSlaveServersConfig = config.useMasterSlaveServers();
masterSlaveServersConfig.setMasterAddress(url.getAddress());
masterSlaveServersConfig.setSlaveAddresses(new HashSet<>(url.getBackupAddressList()));
} else if (RedisModel.REPLICATED == redisModel) {
ReplicatedServersConfig replicatedServersConfig = config.useReplicatedServers();
replicatedServersConfig.addNodeAddress(url.getAddresses());
} else {
SingleServerConfig singleServerConfig = config.useSingleServer();
singleServerConfig.setAddress(url.getAddress());
}
this.redissonClient = Redisson.create(config);
this.started = true;
}
public void batchIncrementBy(String key, Map<String, Object> data, long expire) {
for (Map.Entry<String, Object> entry : data.entrySet()) {
if (entry.getValue() instanceof Long) {
redissonClient.getMap(key).addAndGet(entry.getKey(), (Long) entry.getValue());
} else {
redissonClient.getMap(key).put(entry.getKey(), String.valueOf(entry.getValue()));
}
}
redissonClient.getMap(key).expire(expire, TimeUnit.MILLISECONDS);
}
/**
* The put all map
*
* @param space space
* @param data map data
*/
public void putAllMap(String space, Map<String, String> data) {
redissonClient.getMap(space).putAll(data);
}
/**
* The execute lua script
*
* @param script lua script
* @param timeout future timeout
* @param keys key list
* @return return object list
*/
public List<Object> eval(String script, Long timeout, List<Object> keys) {
List<Object> keyArray = new ArrayList<>(keys.size());
for (int i = 0; i < keys.size(); i++) {
Object obj = keys.get(i);
if (obj == null) {
throw new IllegalArgumentException("The key[" + i + "] is null");
}
keyArray.add(obj);
}
try {
RFuture<List<Object>> redisFuture = redissonClient.getScript().evalAsync(
RScript.Mode.READ_WRITE, script, RScript.ReturnType.MULTI, keyArray);
return redisFuture.get(timeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
/**
* The get all key-value by name
*
* @param name map name
* @return map
*/
public Map<String, String> getMap(String name) {
Map<Object, Object> remoteMap = redissonClient.getMap(name);
if (remoteMap == null || remoteMap.isEmpty()) {
return Collections.emptyMap();
}
Map<String, String> map = new HashMap<>();
for (Map.Entry<Object, Object> entry : remoteMap.entrySet()) {
map.put(String.valueOf(entry.getKey()), String.valueOf(entry.getValue()));
}
return map;
}
/**
* The publish
*
* @param channel channel
* @param data data
*/
public void publish(String channel, String data) {
RTopic topic = redissonClient.getTopic(channel, new SerializationCodec());
topic.publish(SerializeUtils.serialize(data));
}
/**
* The subscribe by pattern
*
* @param pattern pattern
* @param listener {@link IStoreListener}
*/
public void subscribe(String pattern, IStoreListener listener) {
if (patternListeners.containsKey(pattern)) {
log.warn("The repeated subscribe:{}, listener:{}", pattern, listener);
return;
}
PatternMessageListener<String> pmListener = (pattern1, channel, msg) -> {
log.debug("The notify message pattern:{}, channel:{}, msg: {}", pattern1, channel, msg);
listener.notify(channel.toString(), msg);
};
patternListeners.put(pattern, pmListener);
RPatternTopic rPatternTopic = redissonClient.getPatternTopic(pattern);
rPatternTopic.addListener(String.class, pmListener);
}
/**
* The pattern unsubscribe
*
* @param pattern pattern
*/
public void unsubscribe(String pattern) {
PatternMessageListener patternMessageListener = patternListeners.get(pattern);
if (patternMessageListener == null) {
return;
}
RPatternTopic rPatternTopic = redissonClient.getPatternTopic(pattern);
rPatternTopic.removeListener(patternMessageListener);
}
/**
* The destroy
*/
public void destroy() {
for (Map.Entry<String, PatternMessageListener> entry : patternListeners.entrySet()) {
unsubscribe(entry.getKey());
}
if (null != redissonClient) {
redissonClient.shutdown();
}
}
}
| 2,639 |
5,169 |
<filename>Specs/4/7/2/PhraseSDK/2.2.0/PhraseSDK.podspec.json
{
"name": "PhraseSDK",
"version": "2.2.0",
"summary": "iOS SDK for Phrase",
"homepage": "https://phrase.com",
"license": {
"type": "Commercial",
"file": "LICENSE.md"
},
"authors": {
"Dynport GmbH": "<EMAIL>"
},
"source": {
"git": "https://github.com/phrase/ios-sdk.git",
"tag": "2.2.0"
},
"platforms": {
"ios": "9.0"
},
"frameworks": "Foundation",
"preserve_paths": "PhraseSDK.framework",
"public_header_files": "PhraseSDK.framework/Headers/PhraseSDK.h",
"source_files": "PhraseSDK.framework/Headers/PhraseSDK.h",
"vendored_frameworks": "PhraseSDK.framework",
"requires_arc": true
}
| 316 |
305 |
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "video/video_loopback.h"
#include <stdio.h>
#include <memory>
#include <string>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "absl/types/optional.h"
#include "api/test/simulated_network.h"
#include "api/test/video_quality_test_fixture.h"
#include "api/transport/bitrate_settings.h"
#include "api/video_codecs/video_codec.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "system_wrappers/include/field_trial.h"
#include "test/field_trial.h"
#include "test/gtest.h"
#include "test/run_test.h"
#include "video/video_quality_test.h"
// Flags common with screenshare loopback, with different default values.
ABSL_FLAG(int, width, 640, "Video width.");
ABSL_FLAG(int, height, 480, "Video height.");
ABSL_FLAG(int, fps, 30, "Frames per second.");
ABSL_FLAG(int, capture_device_index, 0, "Capture device to select");
ABSL_FLAG(int, min_bitrate, 50, "Call and stream min bitrate in kbps.");
ABSL_FLAG(int, start_bitrate, 300, "Call start bitrate in kbps.");
ABSL_FLAG(int, target_bitrate, 800, "Stream target bitrate in kbps.");
ABSL_FLAG(int, max_bitrate, 800, "Call and stream max bitrate in kbps.");
ABSL_FLAG(bool,
suspend_below_min_bitrate,
false,
"Suspends video below the configured min bitrate.");
ABSL_FLAG(int,
num_temporal_layers,
1,
"Number of temporal layers. Set to 1-4 to override.");
ABSL_FLAG(int,
inter_layer_pred,
2,
"Inter-layer prediction mode. "
"0 - enabled, 1 - disabled, 2 - enabled only for key pictures.");
// Flags common with screenshare loopback, with equal default values.
ABSL_FLAG(std::string, codec, "VP8", "Video codec to use.");
ABSL_FLAG(int,
selected_tl,
-1,
"Temporal layer to show or analyze. -1 to disable filtering.");
ABSL_FLAG(
int,
duration,
0,
"Duration of the test in seconds. If 0, rendered will be shown instead.");
ABSL_FLAG(std::string, output_filename, "", "Target graph data filename.");
ABSL_FLAG(std::string,
graph_title,
"",
"If empty, title will be generated automatically.");
ABSL_FLAG(int, loss_percent, 0, "Percentage of packets randomly lost.");
ABSL_FLAG(int,
avg_burst_loss_length,
-1,
"Average burst length of lost packets.");
ABSL_FLAG(int,
link_capacity,
0,
"Capacity (kbps) of the fake link. 0 means infinite.");
ABSL_FLAG(int, queue_size, 0, "Size of the bottleneck link queue in packets.");
ABSL_FLAG(int,
avg_propagation_delay_ms,
0,
"Average link propagation delay in ms.");
ABSL_FLAG(std::string,
rtc_event_log_name,
"",
"Filename for rtc event log. Two files "
"with \"_send\" and \"_recv\" suffixes will be created.");
ABSL_FLAG(std::string,
rtp_dump_name,
"",
"Filename for dumped received RTP stream.");
ABSL_FLAG(int,
std_propagation_delay_ms,
0,
"Link propagation delay standard deviation in ms.");
ABSL_FLAG(int, num_streams, 0, "Number of streams to show or analyze.");
ABSL_FLAG(int,
selected_stream,
0,
"ID of the stream to show or analyze. "
"Set to the number of streams to show them all.");
ABSL_FLAG(int, num_spatial_layers, 1, "Number of spatial layers to use.");
ABSL_FLAG(int,
selected_sl,
-1,
"Spatial layer to show or analyze. -1 to disable filtering.");
ABSL_FLAG(std::string,
stream0,
"",
"Comma separated values describing VideoStream for stream #0.");
ABSL_FLAG(std::string,
stream1,
"",
"Comma separated values describing VideoStream for stream #1.");
ABSL_FLAG(std::string,
sl0,
"",
"Comma separated values describing SpatialLayer for layer #0.");
ABSL_FLAG(std::string,
sl1,
"",
"Comma separated values describing SpatialLayer for layer #1.");
ABSL_FLAG(std::string,
sl2,
"",
"Comma separated values describing SpatialLayer for layer #2.");
ABSL_FLAG(std::string,
encoded_frame_path,
"",
"The base path for encoded frame logs. Created files will have "
"the form <encoded_frame_path>.<n>.(recv|send.<m>).ivf");
ABSL_FLAG(bool, logs, false, "print logs to stderr");
ABSL_FLAG(bool, send_side_bwe, true, "Use send-side bandwidth estimation");
ABSL_FLAG(bool, generic_descriptor, false, "Use the generic frame descriptor.");
ABSL_FLAG(bool, allow_reordering, false, "Allow packet reordering to occur");
ABSL_FLAG(bool, use_ulpfec, false, "Use RED+ULPFEC forward error correction.");
ABSL_FLAG(bool, use_flexfec, false, "Use FlexFEC forward error correction.");
ABSL_FLAG(bool, audio, false, "Add audio stream");
ABSL_FLAG(bool,
use_real_adm,
false,
"Use real ADM instead of fake (no effect if audio is false)");
ABSL_FLAG(bool,
audio_video_sync,
false,
"Sync audio and video stream (no effect if"
" audio is false)");
ABSL_FLAG(bool,
audio_dtx,
false,
"Enable audio DTX (no effect if audio is false)");
ABSL_FLAG(bool, video, true, "Add video stream");
ABSL_FLAG(
std::string,
force_fieldtrials,
"",
"Field trials control experimental feature code which can be forced. "
"E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enabled/"
" will assign the group Enable to field trial WebRTC-FooFeature. Multiple "
"trials are separated by \"/\"");
// Video-specific flags.
ABSL_FLAG(std::string,
clip,
"",
"Name of the clip to show. If empty, using chroma generator.");
namespace webrtc {
namespace {
size_t Width() {
return static_cast<size_t>(absl::GetFlag(FLAGS_width));
}
size_t Height() {
return static_cast<size_t>(absl::GetFlag(FLAGS_height));
}
int Fps() {
return absl::GetFlag(FLAGS_fps);
}
size_t GetCaptureDevice() {
return static_cast<size_t>(absl::GetFlag(FLAGS_capture_device_index));
}
int MinBitrateKbps() {
return absl::GetFlag(FLAGS_min_bitrate);
}
int StartBitrateKbps() {
return absl::GetFlag(FLAGS_start_bitrate);
}
int TargetBitrateKbps() {
return absl::GetFlag(FLAGS_target_bitrate);
}
int MaxBitrateKbps() {
return absl::GetFlag(FLAGS_max_bitrate);
}
int NumTemporalLayers() {
return absl::GetFlag(FLAGS_num_temporal_layers);
}
InterLayerPredMode InterLayerPred() {
if (absl::GetFlag(FLAGS_inter_layer_pred) == 0) {
return InterLayerPredMode::kOn;
} else if (absl::GetFlag(FLAGS_inter_layer_pred) == 1) {
return InterLayerPredMode::kOff;
} else {
RTC_DCHECK_EQ(absl::GetFlag(FLAGS_inter_layer_pred), 2);
return InterLayerPredMode::kOnKeyPic;
}
}
std::string Codec() {
return absl::GetFlag(FLAGS_codec);
}
int SelectedTL() {
return absl::GetFlag(FLAGS_selected_tl);
}
int DurationSecs() {
return absl::GetFlag(FLAGS_duration);
}
std::string OutputFilename() {
return absl::GetFlag(FLAGS_output_filename);
}
std::string GraphTitle() {
return absl::GetFlag(FLAGS_graph_title);
}
int LossPercent() {
return static_cast<int>(absl::GetFlag(FLAGS_loss_percent));
}
int AvgBurstLossLength() {
return static_cast<int>(absl::GetFlag(FLAGS_avg_burst_loss_length));
}
int LinkCapacityKbps() {
return static_cast<int>(absl::GetFlag(FLAGS_link_capacity));
}
int QueueSize() {
return static_cast<int>(absl::GetFlag(FLAGS_queue_size));
}
int AvgPropagationDelayMs() {
return static_cast<int>(absl::GetFlag(FLAGS_avg_propagation_delay_ms));
}
std::string RtcEventLogName() {
return absl::GetFlag(FLAGS_rtc_event_log_name);
}
std::string RtpDumpName() {
return absl::GetFlag(FLAGS_rtp_dump_name);
}
int StdPropagationDelayMs() {
return absl::GetFlag(FLAGS_std_propagation_delay_ms);
}
int NumStreams() {
return absl::GetFlag(FLAGS_num_streams);
}
int SelectedStream() {
return absl::GetFlag(FLAGS_selected_stream);
}
int NumSpatialLayers() {
return absl::GetFlag(FLAGS_num_spatial_layers);
}
int SelectedSL() {
return absl::GetFlag(FLAGS_selected_sl);
}
std::string Stream0() {
return absl::GetFlag(FLAGS_stream0);
}
std::string Stream1() {
return absl::GetFlag(FLAGS_stream1);
}
std::string SL0() {
return absl::GetFlag(FLAGS_sl0);
}
std::string SL1() {
return absl::GetFlag(FLAGS_sl1);
}
std::string SL2() {
return absl::GetFlag(FLAGS_sl2);
}
std::string EncodedFramePath() {
return absl::GetFlag(FLAGS_encoded_frame_path);
}
std::string Clip() {
return absl::GetFlag(FLAGS_clip);
}
} // namespace
void Loopback() {
BuiltInNetworkBehaviorConfig pipe_config;
pipe_config.loss_percent = LossPercent();
pipe_config.avg_burst_loss_length = AvgBurstLossLength();
pipe_config.link_capacity_kbps = LinkCapacityKbps();
pipe_config.queue_length_packets = QueueSize();
pipe_config.queue_delay_ms = AvgPropagationDelayMs();
pipe_config.delay_standard_deviation_ms = StdPropagationDelayMs();
pipe_config.allow_reordering = absl::GetFlag(FLAGS_allow_reordering);
BitrateConstraints call_bitrate_config;
call_bitrate_config.min_bitrate_bps = MinBitrateKbps() * 1000;
call_bitrate_config.start_bitrate_bps = StartBitrateKbps() * 1000;
call_bitrate_config.max_bitrate_bps = -1; // Don't cap bandwidth estimate.
VideoQualityTest::Params params;
params.call = {absl::GetFlag(FLAGS_send_side_bwe),
absl::GetFlag(FLAGS_generic_descriptor), call_bitrate_config,
0};
params.video[0] = {absl::GetFlag(FLAGS_video),
Width(),
Height(),
Fps(),
MinBitrateKbps() * 1000,
TargetBitrateKbps() * 1000,
MaxBitrateKbps() * 1000,
absl::GetFlag(FLAGS_suspend_below_min_bitrate),
Codec(),
NumTemporalLayers(),
SelectedTL(),
0, // No min transmit bitrate.
absl::GetFlag(FLAGS_use_ulpfec),
absl::GetFlag(FLAGS_use_flexfec),
NumStreams() < 2, // Automatic quality scaling.
Clip(),
GetCaptureDevice()};
params.audio = {
absl::GetFlag(FLAGS_audio), absl::GetFlag(FLAGS_audio_video_sync),
absl::GetFlag(FLAGS_audio_dtx), absl::GetFlag(FLAGS_use_real_adm)};
params.logging = {RtcEventLogName(), RtpDumpName(), EncodedFramePath()};
params.screenshare[0].enabled = false;
params.analyzer = {"video", 0.0, 0.0, DurationSecs(),
OutputFilename(), GraphTitle()};
params.config = pipe_config;
if (NumStreams() > 1 && Stream0().empty() && Stream1().empty()) {
params.ss[0].infer_streams = true;
}
std::vector<std::string> stream_descriptors;
stream_descriptors.push_back(Stream0());
stream_descriptors.push_back(Stream1());
std::vector<std::string> SL_descriptors;
SL_descriptors.push_back(SL0());
SL_descriptors.push_back(SL1());
SL_descriptors.push_back(SL2());
VideoQualityTest::FillScalabilitySettings(
¶ms, 0, stream_descriptors, NumStreams(), SelectedStream(),
NumSpatialLayers(), SelectedSL(), InterLayerPred(), SL_descriptors);
auto fixture = std::make_unique<VideoQualityTest>(nullptr);
if (DurationSecs()) {
fixture->RunWithAnalyzer(params);
} else {
fixture->RunWithRenderers(params);
}
}
int RunLoopbackTest(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
absl::ParseCommandLine(argc, argv);
rtc::LogMessage::SetLogToStderr(absl::GetFlag(FLAGS_logs));
// InitFieldTrialsFromString stores the char*, so the char array must outlive
// the application.
const std::string field_trials = absl::GetFlag(FLAGS_force_fieldtrials);
webrtc::field_trial::InitFieldTrialsFromString(field_trials.c_str());
webrtc::test::RunTest(webrtc::Loopback);
return 0;
}
} // namespace webrtc
| 5,268 |
5,169 |
<gh_stars>1000+
{
"name": "libev",
"version": "4.15.0",
"summary": "Unofficial libev mirror for Cocoapods",
"description": " This is a binary distribution of the libev library built for iOS and OSX.\n The library uses configure which makes it a bit tricky to cross compile.\n",
"homepage": "http://software.schmorp.de/pkg/libev.html",
"license": "BSD",
"authors": {
"jaylyerly": "<EMAIL>"
},
"social_media_url": "http://twitter.com/jaylyerly",
"platforms": {
"ios": "6.0",
"osx": "10.7"
},
"source": {
"git": "https://github.com/jaylyerly/libev.git",
"tag": "4.15.0"
},
"source_files": "include/*.h",
"ios": {
"libraries": "ev-ios"
},
"osx": {
"libraries": "ev-osx"
},
"preserve_paths": [
"include",
"lib"
],
"requires_arc": false,
"xcconfig": {
"HEADER_SEARCH_PATHS": "$(PODS_ROOT)/libev/include/",
"LIBRARY_SEARCH_PATHS": "$(PODS_ROOT)/libev/lib"
}
}
| 452 |
356 |
<reponame>EvilPudding/candle
#ifndef MESH_H
#define MESH_H
#include "macros.h"
#include "mafs.h"
#include "material.h"
#include "vector.h"
#include "khash.h"
struct mesh;
struct face;
struct edge;
typedef struct vec3_t(*support_cb)(struct mesh *self, const vec3_t dir);
#ifdef MESH4
# define vecN vec4
# define vecN_t vec4_t
# define uvecN_t uvec4_t
static vec4_t VEC3(float x, float y, float z)
{
return vec4(x, y, z, 0.0f);
}
# define VEC3i(x, y, z) {x, y, z, 0.0f}
# define vecN_xyz(v) vec4_xyz(v)
# define ZN Z4
# define _vecN(a) _vec4(a)
# define _uvecN(a) _uvec4(a)
# define NDIMS 4
# define dN d4
# define dN_t d4_t
# define D3(x, y, z) d4(x, y, z, 0.0f)
#else
# define vecN vec3
# define vecN_t vec3_t
# define uvecN_t uvec3_t
static vec3_t VEC3(float x, float y, float z)
{
return vec3(x, y, z);
}
# define VEC3i(x, y, z) {x, y, z}
# define vecN_xyz(v) v
# define _uvecN(a) _uvec3(a)
# define ZN Z3
# define dN d3_t
# define D3(x, y, z) d3(x, y, z)
# define NDIMS 3
#endif
typedef float(*modifier_cb)(struct mesh *mesh, float percent, void *usrptr);
typedef int(*iter_cb)(struct mesh *mesh, void *selection, void *usrptr);
#define EXP(e) e
#define vecN_(eq) CAT2(vecN, _##eq)
#define dN_(eq) CAT2(dN, _##eq)
#define SEL_UNSELECTED 0
#define SEL_EDITING 1
KHASH_MAP_INIT_INT(id, int)
typedef enum
{
MESH_VERT,
MESH_EDGE,
MESH_FACE,
MESH_ANY
} geom_t;
typedef struct vertex_t
{
vecN_t pos;
vec4_t color;
/* vec3_t normal; */
int half;
int tmp;
/* Skin */
vec4_t wei;
vec4_t bid;
} vertex_t;
#define v_half(v, m) (m_edge(m, v->half))
typedef struct edge /* Half edge */
{
int v;
vec3_t n; /* NORMAL OF v */
vec3_t tg; /* TANGENT OF v */
vec2_t t; /* TEXTURE COORD OF v */
int face; /* face_t id */
int pair; /* edge_t id for triangle meshes only */
int next; /* edge_t id */
int prev; /* edge_t id */
int cell_pair; /* edge_t id for tetrahedral meshes only */
int tmp;
} edge_t;
#define e_prev(e, m) (m_edge(m, (e)->prev))
/* Returns the previous edge_t* of edge:e in mesh:m */
#define e_next(e, m) (m_edge(m, (e)->next))
/* Returns the next edge_t* of edge:e in mesh:m */
#define e_cpair(e, m) (m_edge(m, (e)->cell_pair))
/* Returns the selected pair edge_t* of edge:e in mesh:m */
#define e_pair(e, m) (m_edge(m, (e)->pair))
/* Returns the pair edge_t* of edge:e in mesh:m */
#define e_face(e, m) (m_face(m, (e)->face))
/* Returns the face_t* of edge:e in mesh:m */
#define e_vert(e, m) (m_vert(m, (e)->v))
/* Returns the 0th vertex_t* from edge:e in mesh:m */
typedef struct face_t /* Half face */
{
int e_size;
int e[4]; /* edge_t[e_size] id */
vec3_t n; /* flat normal, only applicable to triangles */
#ifdef MESH4
int pair;
int cell;
#endif
int tmp;
} face_t;
#define f_edge(f, i, m) (m_edge(m, f->e[i]))
/* Returns the i'th edge_t* from face:f in mesh:m */
#define f_vert(f, i, m) (e_vert(f_edge(f, i, m), m))
/* Returns the i'th vertex_t* from face:f in mesh:m */
#ifdef MESH4
#define f_cell(f, m) m_cell(m, f->cell)
/* Returns the pair face_t* of face:f in mesh:m */
#define f_pair(f, m) m_face(m, f->pair)
/* Returns the pair face_t* of face:f in mesh:m */
#endif
#ifdef MESH4
typedef struct cell_t /* Cell */
{
int f_size;
int f[5]; /* face_t[f_size] id */
} cell_t;
#define c_face(c, i, m) (m_face(m, c->f[i]))
/* Returns the i'th face_t* from cell:c in mesh:m */
#define c_edge(c, i, j, m) (f_edge(c_face(c, i, m), j, m))
/* Returns the j'th edge_t* of the i'th face_t from cell:c in mesh:m */
#define c_vert(c, i, j, m) (f_vert(c_face(c, i, m), j, m))
/* Returns the j'th vertex4_t* of the i'th face_t from cell:c in mesh:m */
#endif
typedef struct
{
khash_t(id) *faces;
khash_t(id) *edges;
khash_t(id) *verts;
#ifdef MESH4
khash_t(id) *cells;
#endif
} mesh_selection_t;
typedef struct mesh
{
vector_t *faces;
vector_t *verts;
vector_t *edges;
#ifdef MESH4
vector_t *cells;
#endif
mesh_selection_t selections[16];
khash_t(id) *faces_hash;
khash_t(id) *edges_hash;
int has_texcoords;
int triangulated;
int static_normals;
int current_cell;
int current_surface;
mat4_t transformation;
support_cb support;
char name[256];
int locked_write;
int locked_read;
int update_id;
int changes;
void *mtx;
/* DISPAY PROPERTIES */
int wireframe;
int cull;
float offset;
float smooth_angle;
int receive_shadows;
int ref_num;
int has_skin;
} mesh_t;
typedef enum
{
DIR_X,
DIR_Y,
DIR_Z
#ifdef MESH4
, DIR_W
#endif
} direction_t;
#ifdef MESH4
#define m_cell(m, i) ((cell_t*)vector_get(m->cells, i))
#endif
#define m_face(m, i) ((face_t*)vector_get(m->faces, i))
#define m_edge(m, i) ((edge_t*)vector_get(m->edges, i))
#define m_vert(m, i) ((vertex_t*)vector_get(m->verts, i))
mesh_t *mesh_new(void);
mesh_t *mesh_clone(mesh_t *self);
void mesh_assign(mesh_t *self, mesh_t *other);
void mesh_destroy(mesh_t *self);
void mesh_load(mesh_t *self, const char *filename);
void mesh_circle(mesh_t *self, float radius, int segments, vecN_t dir);
void mesh_arc(mesh_t *self, float radius, int segments, vecN_t dir,
float start, float end, bool_t closed);
void mesh_frame_cuboid(mesh_t *self, vec3_t p2, vec3_t p1);
void mesh_frame_capsule(mesh_t *self, float radius, vec3_t dir);
void mesh_frame_sphere(mesh_t *self, float radius);
void mesh_quad(mesh_t *self);
mesh_t *mesh_torus(float radius, float inner_radius, int segments,
int inner_segments);
void mesh_disk(mesh_t *self, float radius, float inner_radius, int segments,
vecN_t dir);
void mesh_cube(mesh_t *self, float size, float tex_scale);
void mesh_ico(mesh_t *self, float size);
void mesh_point_grid(mesh_t *self, vecN_t start, vecN_t size, uvecN_t segments);
void mesh_triangle_grid(mesh_t *self, uvec2_t segments);
void mesh_clear(mesh_t *self);
void mesh_subdivide(mesh_t *mesh, int subdivisions);
void mesh_spherize(mesh_t *mesh, float roundness);
mesh_t *mesh_lathe(mesh_t *mesh, float angle, int segments,
float x, float y, float z);
void mesh_cuboid(mesh_t *self, float tex_scale, vec3_t p1, vec3_t p2);
/* mesh_t *mesh_cuboid(float tex_scale, */
/* float x1, float y1, float z1, */
/* float x2, float y2, float z2); */
void mesh_translate_uv(mesh_t *self, vec2_t p);
void mesh_scale_uv(mesh_t *self, float scale);
void mesh_lock(mesh_t *self);
void mesh_lock_write(mesh_t *self);
void mesh_unlock(mesh_t *self);
void mesh_unlock_write(mesh_t *self);
void mesh_update(mesh_t *self);
void mesh_modified(mesh_t *self);
void mesh_get_tg_bt(mesh_t *self);
void mesh_update_smooth_normals(mesh_t *self, float smooth_max);
int mesh_dup_vert(mesh_t *self, int i);
int mesh_add_vert(mesh_t *self, vecN_t p);
int mesh_assert_vert(mesh_t *self, vecN_t pos);
int mesh_append_edge(mesh_t *self, vecN_t p);
int mesh_add_edge_s(mesh_t *self, int v, int next);
int mesh_add_edge(mesh_t *self, int v, int next, int prev, vec3_t vn, vec2_t vt);
void mesh_link_edges(mesh_t *self, int e0, int e1);
int mesh_add_triangle(mesh_t *self,
int v0, vec3_t v0n, vec2_t v0t,
int v1, vec3_t v1n, vec2_t v1t,
int v2, vec3_t v2n, vec2_t v2t);
int mesh_add_tetrahedron(mesh_t *self, int v0, int v1, int v2, int v3);
int mesh_add_tetrahedral_prism(mesh_t *self, int fid, int v0, int v1, int v2);
int mesh_add_triangle_s(mesh_t *self, int v0, int v1, int v2);
void mesh_check_pairs(mesh_t *self);
int mesh_remove_lone_faces(mesh_t *self);
int mesh_remove_lone_edges(mesh_t *self);
void mesh_remove_faces(mesh_t *self);
void mesh_remove_face(mesh_t *self, int face_i, bool_t face_only);
void mesh_remove_edge(mesh_t *self, int edge_i);
void mesh_remove_vert(mesh_t *self, int vert_i);
void mesh_select(mesh_t *self, int selection, geom_t geom, int id);
void mesh_unselect(mesh_t *self, int selection, geom_t geom, int id);
void mesh_paint(mesh_t *self, vec4_t color);
void mesh_weld(mesh_t *self, geom_t geom);
void mesh_for_each_selected(mesh_t *self, geom_t geom, iter_cb cb, void *usrptr);
void mesh_extrude_faces(mesh_t *self, int steps, vecN_t offset,
float scale, modifier_cb scale_cb, modifier_cb offset_cb,
void *usrptr);
void mesh_extrude_edges(mesh_t *self, int steps, vecN_t offset,
float scale, modifier_cb scale_cb, modifier_cb offset_cb,
void *usrptr);
void mesh_translate_points(mesh_t *self, float percent, vecN_t offset,
float scale, modifier_cb scale_cb, modifier_cb offset_cb,
void *usrptr);
void mesh_triangulate(mesh_t *self);
void mesh_invert_normals(mesh_t *self);
int mesh_edge_rotate_to_unpaired(mesh_t *self, int edge_id);
void mesh_add_quad(mesh_t *self,
int v0, vec3_t v0n, vec2_t v0t,
int v1, vec3_t v1n, vec2_t v1t,
int v2, vec3_t v2n, vec2_t v2t,
int v3, vec3_t v3n, vec2_t v3t);
int mesh_add_regular_quad( mesh_t *self,
vecN_t p1, vec3_t n1, vec2_t t1, vecN_t p2, vec3_t n2, vec2_t t2,
vecN_t p3, vec3_t n3, vec2_t t3, vecN_t p4, vec3_t n4, vec2_t t4
);
void mesh_verts_prealloc(mesh_t *self, int size);
void mesh_edges_prealloc(mesh_t *self, int size);
void mesh_faces_prealloc(mesh_t *self, int size);
int mesh_update_unpaired_edges(mesh_t *self);
int mesh_update_unpaired_faces(mesh_t *self);
int mesh_vert_has_face(mesh_t *self, vertex_t *vert, int face_id);
void mesh_translate(mesh_t *self, vec3_t t);
void mesh_rotate(mesh_t *self, float angle, int x, int y, int z);
mat4_t mesh_save(mesh_t *self);
void mesh_restore(mesh_t *self, mat4_t saved);
int mesh_update_flips(mesh_t *self);
vecN_t mesh_get_selection_center(mesh_t *self);
float mesh_get_selection_radius(mesh_t *self, vecN_t center);
vertex_t *mesh_farthest(mesh_t *self, const vec3_t dir);
float mesh_get_margin(const mesh_t *self);
/* COLLISIONS */
int mesh_gjk_intersection(mesh_t *self, mesh_t *other);
void meshes_reg(void);
#endif /* !MESH_H */
| 4,404 |
477 |
/*
* Copyright (C) 2018 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.doctoror.particlesdrawable.opengl;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Bitmap;
import android.graphics.Color;
import android.graphics.drawable.Animatable;
import android.graphics.drawable.BitmapDrawable;
import android.graphics.drawable.ColorDrawable;
import android.graphics.drawable.Drawable;
import android.opengl.GLSurfaceView;
import android.os.Build;
import android.util.AttributeSet;
import android.util.Log;
import com.doctoror.particlesdrawable.contract.SceneConfiguration;
import com.doctoror.particlesdrawable.contract.SceneController;
import com.doctoror.particlesdrawable.contract.SceneScheduler;
import com.doctoror.particlesdrawable.engine.Engine;
import com.doctoror.particlesdrawable.engine.SceneConfigurator;
import com.doctoror.particlesdrawable.model.Scene;
import com.doctoror.particlesdrawable.opengl.chooser.EGLConfigChooserCallback;
import com.doctoror.particlesdrawable.opengl.chooser.FailsafeEGLConfigChooserFactory;
import com.doctoror.particlesdrawable.opengl.renderer.GlSceneRenderer;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.opengles.GL10;
import androidx.annotation.ColorInt;
import androidx.annotation.FloatRange;
import androidx.annotation.IntRange;
import androidx.annotation.Keep;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.VisibleForTesting;
/**
* Particles View that draws on {@link GLSurfaceView}.
*/
@Keep
public class GlParticlesView extends GLSurfaceView implements
Animatable,
SceneController,
SceneConfiguration,
SceneScheduler,
GLSurfaceView.Renderer {
private static final int DEFAULT_SAMPLES = 4;
final Scene scene;
private final SceneConfigurator sceneConfigurator;
final GlSceneRenderer renderer;
final Engine engine;
private volatile boolean backgroundColorDirty;
private volatile boolean backgroundTextureDirty;
@ColorInt
private volatile int backgroundColor = Color.DKGRAY;
private volatile Bitmap backgroundTexture;
public GlParticlesView(@NonNull final Context context) {
this(context, null);
}
public GlParticlesView(@NonNull final Context context, @Nullable final AttributeSet attrs) {
this(context, attrs, DEFAULT_SAMPLES, null);
}
public GlParticlesView(
@NonNull final Context context,
@Nullable final AttributeSet attrs,
final int samples,
@Nullable final EGLConfigChooserCallback eglConfigChooserCallback) {
super(context, attrs);
this.scene = new Scene();
this.sceneConfigurator = new SceneConfigurator();
this.renderer = new GlSceneRenderer();
this.engine = new Engine(scene, this, renderer);
init(context, attrs, samples, eglConfigChooserCallback);
}
@VisibleForTesting
GlParticlesView(
@NonNull final Context context,
@Nullable final AttributeSet attrs,
@NonNull final Engine engine,
@NonNull final Scene scene,
@NonNull final SceneConfigurator sceneConfigurator,
@NonNull final GlSceneRenderer sceneRenderer,
final int samples,
@Nullable final EGLConfigChooserCallback eglConfigChooserCallback) {
super(context, attrs);
this.engine = engine;
this.scene = scene;
this.sceneConfigurator = sceneConfigurator;
this.renderer = sceneRenderer;
init(context, attrs, samples, eglConfigChooserCallback);
}
private void init(
@NonNull final Context context,
@Nullable final AttributeSet attrs,
int samples,
@Nullable final EGLConfigChooserCallback configChooserCallback) {
if (attrs != null) {
sceneConfigurator.configureSceneFromAttributes(scene, context.getResources(), attrs);
final TypedArray glAttrs = context
.obtainStyledAttributes(attrs, R.styleable.GlParticlesView);
try {
samples = glAttrs.getInt(R.styleable.GlParticlesView_multisampling, samples);
} finally {
glAttrs.recycle();
}
}
final TypedArray array = context.getTheme()
.obtainStyledAttributes(new int[]{android.R.attr.windowBackground});
try {
backgroundColor = array.getColor(0, Color.DKGRAY);
} catch (Exception e) {
Log.w("GlParticlesView", "Failed to obtain windowBackground", e);
} finally {
array.recycle();
}
setEGLContextClientVersion(2);
setEGLConfigChooser(FailsafeEGLConfigChooserFactory
.newFailsafeEGLConfigChooser(samples, configChooserCallback));
setRenderer(this);
setRenderMode(RENDERMODE_WHEN_DIRTY);
renderer.markParticleTextureDirty();
}
/**
* Sets the background color for this View.
* Default is windowBackground.
* <p>
* Will not affect textures set in {@link #setBackground(Drawable)}.
*
* @param color the background of this View.
*/
@Override
public void setBackgroundColor(@ColorInt final int color) {
backgroundColor = color;
backgroundColorDirty = true;
}
/**
* Applies background. Supported Drawables are {@link BitmapDrawable}, and {@link ColorDrawable}
* since API Level 11.
* <p>
* The background will stretch to fill the entire screen. If you need transformations, like
* center crop, you should do it yourself before passing here.
* <p>
* Setting a {@link ColorDrawable} will apply a background color and remove any previously set
* {@link BitmapDrawable}.
* <p>
* If you want to change the background color without affecting the {@link Bitmap} texture, use
* {@link #setBackgroundColor(int)}
*
* @param background the background to apply
* @throws IllegalArgumentException if the background is not a {@link BitmapDrawable} or
* {@link ColorDrawable} if on API level 11 or higher.
*/
@Override
public void setBackgroundDrawable(@Nullable final Drawable background) {
applyBackground(background);
}
/**
* Applies background. Supported Drawables are {@link BitmapDrawable}, and {@link ColorDrawable}
* since API Level 11.
* <p>
* The background will stretch to fill the entire screen. If you need transformations, like
* center crop, you should do it yourself before passing here.
* <p>
* Setting a {@link ColorDrawable} will apply a background color and remove any previously set
* {@link BitmapDrawable}.
* <p>
* If you want to change the background color without affecting the {@link Bitmap} texture, use
* {@link #setBackgroundColor(int)}
*
* @param background the background to apply
* @throws IllegalArgumentException if the background is not a {@link BitmapDrawable} or
* {@link ColorDrawable} if on API level 11 or higher.
*/
@Override
public void setBackground(@Nullable final Drawable background) {
applyBackground(background);
}
private void applyBackground(@Nullable final Drawable background) {
if (background == null) {
processAndSetBackgroundTexture(null);
} else if (background instanceof BitmapDrawable) {
processAndSetBackgroundTexture(((BitmapDrawable) background).getBitmap());
} else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB
&& background instanceof ColorDrawable) {
processAndSetBackgroundTexture(null);
setBackgroundColor(((ColorDrawable) background).getColor());
} else {
throw new IllegalArgumentException(
"Only BitmapDrawable (sdk >= 9) or ColorDrawable (sdk >= 11) are supported");
}
}
private void processAndSetBackgroundTexture(@Nullable final Bitmap texture) {
backgroundTexture = texture;
backgroundTextureDirty = true;
}
/**
* {@inheritDoc}
*/
@Override
public void nextFrame() {
queueEvent(new Runnable() {
@Override
public void run() {
engine.nextFrame();
}
});
}
/**
* {@inheritDoc}
*/
@Override
public void makeFreshFrame() {
queueEvent(new Runnable() {
@Override
public void run() {
engine.makeFreshFrame();
}
});
}
/**
* {@inheritDoc}
*/
@Override
public void makeFreshFrameWithParticlesOffscreen() {
queueEvent(new Runnable() {
@Override
public void run() {
engine.makeFreshFrameWithParticlesOffscreen();
}
});
}
/**
* {@inheritDoc}
*/
@Override
public void setFrameDelay(@IntRange(from = 0) final int delay) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setFrameDelay(delay);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public int getFrameDelay() {
return scene.getFrameDelay();
}
/**
* {@inheritDoc}
*/
@Override
public void setSpeedFactor(@FloatRange(from = 0) final float speedFactor) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setSpeedFactor(speedFactor);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public float getSpeedFactor() {
return scene.getSpeedFactor();
}
/**
* {@inheritDoc}
*/
public void setParticleRadiusRange(
@FloatRange(from = 0.5f) final float minRadius,
@FloatRange(from = 0.5f) final float maxRadius) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setParticleRadiusRange(minRadius, maxRadius);
renderer.markParticleTextureDirty();
}
});
}
/**
* {@inheritDoc}
*/
@Override
public float getParticleRadiusMin() {
return scene.getParticleRadiusMin();
}
/**
* {@inheritDoc}
*/
@Override
public float getParticleRadiusMax() {
return scene.getParticleRadiusMax();
}
/**
* {@inheritDoc}
*/
public void setLineThickness(@FloatRange(from = 1) final float lineThickness) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setLineThickness(lineThickness);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public float getLineThickness() {
return scene.getLineThickness();
}
/**
* {@inheritDoc}
*/
public void setLineLength(@FloatRange(from = 0) final float lineLength) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setLineLength(lineLength);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public float getLineLength() {
return scene.getLineLength();
}
/**
* {@inheritDoc}
*/
public void setDensity(@IntRange(from = 0) final int newNum) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setDensity(newNum);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public int getDensity() {
return scene.getDensity();
}
/**
* {@inheritDoc}
*/
public void setParticleColor(@ColorInt final int color) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setParticleColor(color);
renderer.markParticleTextureDirty();
}
});
}
/**
* {@inheritDoc}
*/
@Override
public int getParticleColor() {
return scene.getParticleColor();
}
/**
* {@inheritDoc}
*/
public void setLineColor(@ColorInt final int lineColor) {
queueEvent(new Runnable() {
@Override
public void run() {
scene.setLineColor(lineColor);
}
});
}
/**
* {@inheritDoc}
*/
@Override
public int getLineColor() {
return scene.getLineColor();
}
@Override
public void scheduleNextFrame(final long delay) {
if (delay == 0) {
requestRender();
} else {
removeCallbacks(requestRenderRunnable);
postDelayed(requestRenderRunnable, delay);
}
}
@Override
public void unscheduleNextFrame() {
}
/**
* Start animating. This will clear the explicit control flag if set by {@link #stop()}.
* Note that if this View's visibility is not {@link #VISIBLE} or it's not attached to window,
* this will not start animating until the state changes to meet the requirements above.
*/
@Override
public void start() {
onResume();
engine.start();
}
/**
* Explicilty stop animating. This will stop animating and no animations will start
* automatically until you call {@link #start()}.
*/
@Override
public void stop() {
engine.stop();
onPause();
queueEvent(new Runnable() {
@Override
public void run() {
renderer.recycle();
}
});
}
@Override
public boolean isRunning() {
return engine.isRunning();
}
@Override
public void onSurfaceCreated(@NonNull final GL10 gl, @NonNull final EGLConfig config) {
renderer.recycle();
renderer.setupGl();
backgroundColorDirty = true;
backgroundTextureDirty = true;
}
@Override
public void onSurfaceChanged(@NonNull final GL10 gl, final int width, final int height) {
engine.setDimensions(width, height);
renderer.setDimensions(width, height);
backgroundColorDirty = true;
backgroundTextureDirty = true;
}
@Override
public void onDrawFrame(@NonNull final GL10 gl) {
if (backgroundColorDirty) {
renderer.setClearColor(backgroundColor);
backgroundColorDirty = false;
}
if (backgroundTextureDirty) {
renderer.setBackgroundTexture(backgroundTexture);
backgroundTextureDirty = false;
}
engine.draw();
engine.run();
}
private final Runnable requestRenderRunnable = new Runnable() {
@Override
public void run() {
requestRender();
}
};
}
| 6,526 |
1,139 |
<reponame>ghiloufibelgacem/jornaldev<gh_stars>1000+
package com.journaldev.springhibernate.dao;
import java.util.List;
import com.journaldev.springhibernate.model.Person;
public interface PersonDAO {
public void addPerson(Person p);
public List<Person> listPersons();
}
| 109 |
647 |
<reponame>Bertware/transitfeed<gh_stars>100-1000
#!/usr/bin/python2.5
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
class GtfsFactoryUser(object):
"""Base class for objects that must store a GtfsFactory in order to
be able to instantiate Gtfs classes.
If a non-default GtfsFactory is to be used, it must be set explicitly."""
_gtfs_factory = None
def GetGtfsFactory(self):
"""Return the object's GTFS Factory.
Returns:
The GTFS Factory that was set for this object. If none was explicitly
set, it first sets the object's factory to transitfeed's GtfsFactory
and returns it"""
if self._gtfs_factory is None:
#TODO(anog): We really need to create a dependency graph and clean things
# up, as the comment in __init__.py says.
# Not having GenericGTFSObject as a leaf (with no other
# imports) creates all sorts of circular import problems.
# This is why the import is here and not at the top level.
# When this runs, gtfsfactory should have already been loaded
# by other modules, avoiding the circular imports.
from . import gtfsfactory
self._gtfs_factory = gtfsfactory.GetGtfsFactory()
return self._gtfs_factory
def SetGtfsFactory(self, factory):
self._gtfs_factory = factory
| 665 |
945 |
<reponame>arobert01/ITK
// This is core/vnl/vnl_float_3x3.h
#ifndef vnl_float_3x3_h_
#define vnl_float_3x3_h_
//:
// \file
// \brief 3x3 matrix of float
//
// vnl_float_3x3 is a vnl_matrix<float> of fixed size 3x3.
// It is merely a typedef for vnl_matrix_fixed<float,3,3>
//
// \author <NAME>
// \date 1 April 2003
//
//-----------------------------------------------------------------------------
#include "vnl_matrix_fixed.h"
typedef vnl_matrix_fixed<float,3,3> vnl_float_3x3;
#endif // vnl_float_3x3_h_
| 225 |
2,143 |
<gh_stars>1000+
package com.tngtech.archunit.testutil.assertion;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import com.tngtech.archunit.core.domain.JavaCodeUnit;
import com.tngtech.archunit.core.domain.JavaTypeVariable;
import org.assertj.core.api.AbstractObjectAssert;
import static com.tngtech.archunit.core.domain.properties.HasName.Utils.namesOf;
import static com.tngtech.archunit.testutil.Assertions.assertThat;
import static com.tngtech.archunit.testutil.Assertions.assertThatType;
import static com.tngtech.archunit.testutil.Assertions.assertThatTypeVariable;
import static com.tngtech.archunit.testutil.Assertions.assertThatTypes;
import static com.tngtech.archunit.testutil.ReflectionTestUtils.constructor;
import static com.tngtech.archunit.testutil.ReflectionTestUtils.method;
import static com.tngtech.archunit.testutil.assertion.JavaTypeVariableAssertion.getTypeVariableWithName;
public class JavaCodeUnitAssertion<T extends JavaCodeUnit, SELF extends JavaCodeUnitAssertion<T, SELF>>
extends JavaMemberAssertion<T, SELF> {
public JavaCodeUnitAssertion(T javaMember, Class<SELF> selfType) {
super(javaMember, selfType);
}
public void isEquivalentTo(Method method) {
super.isEquivalentTo(method);
assertThatTypes(actual.getRawParameterTypes()).matchExactly(method.getParameterTypes());
assertThatType(actual.getRawReturnType()).matches(method.getReturnType());
}
public void isEquivalentTo(Constructor<?> constructor) {
super.isEquivalentTo(constructor);
assertThatTypes(actual.getRawParameterTypes()).matchExactly(constructor.getParameterTypes());
assertThatType(actual.getRawReturnType()).matches(void.class);
}
public JavaTypeVariableOfCodeUnitAssertion hasTypeParameter(String name) {
JavaTypeVariable<JavaCodeUnit> typeVariable = getTypeVariableWithName(name, actual.getTypeParameters());
return new JavaTypeVariableOfCodeUnitAssertion(typeVariable);
}
public JavaCodeUnitAssertion<T, SELF> hasTypeParameters(String... names) {
assertThat(namesOf(actual.getTypeParameters())).as("names of type parameters").containsExactly(names);
return this;
}
public JavaTypeVariableOfCodeUnitAssertion hasOnlyTypeParameter(String name) {
assertThat(actual.getTypeParameters()).as("Type parameters").hasSize(1);
return hasTypeParameter(name);
}
public void matchesConstructor(Class<?> owner, Class<?>... parameterTypes) {
isEquivalentTo(constructor(owner, parameterTypes));
}
public void matchesMethod(Class<?> owner, String methodName, Class<?>... parameterTypes) {
isEquivalentTo(method(owner, methodName, parameterTypes));
}
public class JavaTypeVariableOfCodeUnitAssertion extends AbstractObjectAssert<JavaTypeVariableOfCodeUnitAssertion, JavaTypeVariable<JavaCodeUnit>> {
private JavaTypeVariableOfCodeUnitAssertion(JavaTypeVariable<JavaCodeUnit> actual) {
super(actual, JavaTypeVariableOfCodeUnitAssertion.class);
}
public JavaCodeUnitAssertion<T, SELF> withBoundsMatching(Class<?>... bounds) {
assertThatTypeVariable(actual).hasBoundsMatching(bounds);
return JavaCodeUnitAssertion.this;
}
public JavaCodeUnitAssertion<T, SELF> withBoundsMatching(ExpectedConcreteType... bounds) {
assertThatTypeVariable(actual).hasBoundsMatching(bounds);
return JavaCodeUnitAssertion.this;
}
}
}
| 1,227 |
1,288 |
// Copyright 2020 Google LLC. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#include "packager/media/formats/webvtt/webvtt_muxer.h"
#include "packager/file/file.h"
#include "packager/file/file_closer.h"
#include "packager/media/formats/webvtt/webvtt_utils.h"
#include "packager/status_macros.h"
namespace shaka {
namespace media {
namespace webvtt {
WebVttMuxer::WebVttMuxer(const MuxerOptions& options) : TextMuxer(options) {}
WebVttMuxer::~WebVttMuxer() {}
Status WebVttMuxer::InitializeStream(TextStreamInfo* stream) {
stream->set_codec(kCodecWebVtt);
stream->set_codec_string("wvtt");
const std::string preamble = WebVttGetPreamble(*stream);
buffer_.reset(new WebVttFileBuffer(
options().transport_stream_timestamp_offset_ms, preamble));
return Status::OK;
}
Status WebVttMuxer::AddTextSampleInternal(const TextSample& sample) {
if (sample.id().find('\n') != std::string::npos) {
return Status(error::MUXER_FAILURE, "Text id cannot contain newlines");
}
buffer_->Append(sample);
return Status::OK;
}
Status WebVttMuxer::WriteToFile(const std::string& filename, uint64_t* size) {
// Write everything to the file before telling the manifest so that the
// file will exist on disk.
std::unique_ptr<File, FileCloser> file(File::Open(filename.c_str(), "w"));
if (!file) {
return Status(error::FILE_FAILURE, "Failed to open " + filename);
}
buffer_->WriteTo(file.get(), size);
buffer_->Reset();
if (!file.release()->Close()) {
return Status(error::FILE_FAILURE, "Failed to close " + filename);
}
return Status::OK;
}
} // namespace webvtt
} // namespace media
} // namespace shaka
| 644 |
1,329 |
from .easylog import write_config_into_log
from .ramp import ramp_up, ramp_down
| 31 |
1,970 |
<reponame>liufangqi/hudi<filename>hudi-common/src/main/java/org/apache/hudi/common/table/timeline/dto/ClusteringOpDTO.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.common.table.timeline.dto;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hudi.common.model.HoodieFileGroupId;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.util.collection.Pair;
/**
* The data transfer object of clustering.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class ClusteringOpDTO {
@JsonProperty("id")
private String fileId;
@JsonProperty("partition")
private String partitionPath;
@JsonProperty("instantTime")
private String instantTime;
@JsonProperty("instantState")
private String instantState;
@JsonProperty("instantAction")
private String instantAction;
public static ClusteringOpDTO fromClusteringOp(HoodieFileGroupId fileGroupId, HoodieInstant instant) {
ClusteringOpDTO dto = new ClusteringOpDTO();
dto.fileId = fileGroupId.getFileId();
dto.partitionPath = fileGroupId.getPartitionPath();
dto.instantAction = instant.getAction();
dto.instantState = instant.getState().name();
dto.instantTime = instant.getTimestamp();
return dto;
}
public static Pair<HoodieFileGroupId, HoodieInstant> toClusteringOperation(ClusteringOpDTO dto) {
return Pair.of(new HoodieFileGroupId(dto.partitionPath, dto.fileId),
new HoodieInstant(HoodieInstant.State.valueOf(dto.instantState), dto.instantAction, dto.instantTime));
}
}
| 757 |
335 |
{
"word": "Frontier",
"definitions": [
"A line or border separating two countries.",
"The extreme limit of settled land beyond which lies wilderness, especially in reference to the western US before Pacific settlement.",
"The extreme limit of understanding or achievement in a particular area."
],
"parts-of-speech": "Noun"
}
| 112 |
1,144 |
<gh_stars>1000+
/******************************************************************************
*
* Copyright(c) 2016 - 2017 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#if (BT_SUPPORT == 1 && COEX_SUPPORT == 1)
#if (RTL8723D_SUPPORT == 1)
/* *******************************************
* The following is for 8723D 2Ant BT Co-exist definition
* ******************************************* */
#define BT_8723D_2ANT_COEX_DBG 0
#define BT_AUTO_REPORT_ONLY_8723D_2ANT 1
#define BT_INFO_8723D_2ANT_B_FTP BIT(7)
#define BT_INFO_8723D_2ANT_B_A2DP BIT(6)
#define BT_INFO_8723D_2ANT_B_HID BIT(5)
#define BT_INFO_8723D_2ANT_B_SCO_BUSY BIT(4)
#define BT_INFO_8723D_2ANT_B_ACL_BUSY BIT(3)
#define BT_INFO_8723D_2ANT_B_INQ_PAGE BIT(2)
#define BT_INFO_8723D_2ANT_B_SCO_ESCO BIT(1)
#define BT_INFO_8723D_2ANT_B_CONNECTION BIT(0)
#define BTC_RSSI_COEX_THRESH_TOL_8723D_2ANT 2
#define BT_8723D_2ANT_WIFI_RSSI_COEXSWITCH_THRES1 80
#define BT_8723D_2ANT_BT_RSSI_COEXSWITCH_THRES1 80
#define BT_8723D_2ANT_WIFI_RSSI_COEXSWITCH_THRES2 80
#define BT_8723D_2ANT_BT_RSSI_COEXSWITCH_THRES2 80
#define BT_8723D_2ANT_DEFAULT_ISOLATION 15
#define BT_8723D_2ANT_WIFI_MAX_TX_POWER 15
#define BT_8723D_2ANT_BT_MAX_TX_POWER 3
#define BT_8723D_2ANT_WIFI_SIR_THRES1 -15
#define BT_8723D_2ANT_WIFI_SIR_THRES2 -30
#define BT_8723D_2ANT_BT_SIR_THRES1 -15
#define BT_8723D_2ANT_BT_SIR_THRES2 -30
/* for Antenna detection */
#define BT_8723D_2ANT_ANTDET_PSDTHRES_BACKGROUND 50
#define BT_8723D_2ANT_ANTDET_PSDTHRES_2ANT_BADISOLATION 70
#define BT_8723D_2ANT_ANTDET_PSDTHRES_2ANT_GOODISOLATION 52
#define BT_8723D_2ANT_ANTDET_PSDTHRES_1ANT 40
#define BT_8723D_2ANT_ANTDET_RETRY_INTERVAL 10
#define BT_8723D_2ANT_ANTDET_SWEEPPOINT_DELAY 60000
#define BT_8723D_2ANT_ANTDET_ENABLE 0
#define BT_8723D_2ANT_ANTDET_BTTXTIME 100
#define BT_8723D_2ANT_ANTDET_BTTXCHANNEL 39
#define BT_8723D_2ANT_ANTDET_PSD_SWWEEPCOUNT 50
#define BT_8723D_2ANT_LTECOEX_INDIRECTREG_ACCESS_TIMEOUT 30000
enum bt_8723d_2ant_signal_state {
BT_8723D_2ANT_SIG_STA_SET_TO_LOW = 0x0,
BT_8723D_2ANT_SIG_STA_SET_BY_HW = 0x0,
BT_8723D_2ANT_SIG_STA_SET_TO_HIGH = 0x1,
BT_8723D_2ANT_SIG_STA_MAX
};
enum bt_8723d_2ant_path_ctrl_owner {
BT_8723D_2ANT_PCO_BTSIDE = 0x0,
BT_8723D_2ANT_PCO_WLSIDE = 0x1,
BT_8723D_2ANT_PCO_MAX
};
enum bt_8723d_2ant_gnt_ctrl_type {
BT_8723D_2ANT_GNT_TYPE_CTRL_BY_PTA = 0x0,
BT_8723D_2ANT_GNT_TYPE_CTRL_BY_SW = 0x1,
BT_8723D_2ANT_GNT_TYPE_MAX
};
enum bt_8723d_2ant_gnt_ctrl_block {
BT_8723D_2ANT_GNT_BLOCK_RFC_BB = 0x0,
BT_8723D_2ANT_GNT_BLOCK_RFC = 0x1,
BT_8723D_2ANT_GNT_BLOCK_BB = 0x2,
BT_8723D_2ANT_GNT_BLOCK_MAX
};
enum bt_8723d_2ant_lte_coex_table_type {
BT_8723D_2ANT_CTT_WL_VS_LTE = 0x0,
BT_8723D_2ANT_CTT_BT_VS_LTE = 0x1,
BT_8723D_2ANT_CTT_MAX
};
enum bt_8723d_2ant_lte_break_table_type {
BT_8723D_2ANT_LBTT_WL_BREAK_LTE = 0x0,
BT_8723D_2ANT_LBTT_BT_BREAK_LTE = 0x1,
BT_8723D_2ANT_LBTT_LTE_BREAK_WL = 0x2,
BT_8723D_2ANT_LBTT_LTE_BREAK_BT = 0x3,
BT_8723D_2ANT_LBTT_MAX
};
enum bt_info_src_8723d_2ant {
BT_INFO_SRC_8723D_2ANT_WIFI_FW = 0x0,
BT_INFO_SRC_8723D_2ANT_BT_RSP = 0x1,
BT_INFO_SRC_8723D_2ANT_BT_ACTIVE_SEND = 0x2,
BT_INFO_SRC_8723D_2ANT_MAX
};
enum bt_8723d_2ant_bt_status {
BT_8723D_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
BT_8723D_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
BT_8723D_2ANT_BT_STATUS_INQ_PAGE = 0x2,
BT_8723D_2ANT_BT_STATUS_ACL_BUSY = 0x3,
BT_8723D_2ANT_BT_STATUS_SCO_BUSY = 0x4,
BT_8723D_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
BT_8723D_2ANT_BT_STATUS_MAX
};
enum bt_8723d_2ant_coex_algo {
BT_8723D_2ANT_COEX_ALGO_UNDEFINED = 0x0,
BT_8723D_2ANT_COEX_ALGO_SCO = 0x1,
BT_8723D_2ANT_COEX_ALGO_HID = 0x2,
BT_8723D_2ANT_COEX_ALGO_A2DP = 0x3,
BT_8723D_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
BT_8723D_2ANT_COEX_ALGO_PANEDR = 0x5,
BT_8723D_2ANT_COEX_ALGO_PANHS = 0x6,
BT_8723D_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
BT_8723D_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
BT_8723D_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
BT_8723D_2ANT_COEX_ALGO_HID_A2DP = 0xa,
BT_8723D_2ANT_COEX_ALGO_NOPROFILEBUSY = 0xb,
BT_8723D_2ANT_COEX_ALGO_A2DPSINK = 0xc,
BT_8723D_2ANT_COEX_ALGO_MAX
};
enum bt_8723d_2ant_phase {
BT_8723D_2ANT_PHASE_COEX_INIT = 0x0,
BT_8723D_2ANT_PHASE_WLANONLY_INIT = 0x1,
BT_8723D_2ANT_PHASE_WLAN_OFF = 0x2,
BT_8723D_2ANT_PHASE_2G_RUNTIME = 0x3,
BT_8723D_2ANT_PHASE_5G_RUNTIME = 0x4,
BT_8723D_2ANT_PHASE_BTMPMODE = 0x5,
BT_8723D_2ANT_PHASE_ANTENNA_DET = 0x6,
BT_8723D_2ANT_PHASE_COEX_POWERON = 0x7,
BT_8723D_2ANT_PHASE_2G_FREERUN = 0x8,
BT_8723D_2ANT_PHASE_MAX
};
enum bt_8723d_2ant_Scoreboard {
BT_8723D_2ANT_SCOREBOARD_ACTIVE = BIT(0),
BT_8723D_2ANT_SCOREBOARD_ONOFF = BIT(1),
BT_8723D_2ANT_SCOREBOARD_SCAN = BIT(2),
BT_8723D_2ANT_SCOREBOARD_UNDERTEST = BIT(3),
BT_8723D_2ANT_SCOREBOARD_RXGAIN = BIT(4),
BT_8723D_2ANT_SCOREBOARD_WLBUSY = BIT(6),
BT_8723D_2ANT_SCOREBOARD_TDMA = BIT(9),
};
struct coex_dm_8723d_2ant {
/* fw mechanism */
u8 pre_bt_dec_pwr_lvl;
u8 cur_bt_dec_pwr_lvl;
u8 pre_fw_dac_swing_lvl;
u8 cur_fw_dac_swing_lvl;
boolean cur_ignore_wlan_act;
boolean pre_ignore_wlan_act;
u8 cur_ps_tdma;
u8 ps_tdma_para[5];
u8 ps_tdma_du_adj_type;
boolean reset_tdma_adjust;
boolean cur_ps_tdma_on;
boolean pre_bt_auto_report;
boolean cur_bt_auto_report;
/* sw mechanism */
boolean pre_rf_rx_lpf_shrink;
boolean cur_rf_rx_lpf_shrink;
u32 bt_rf_0x1e_backup;
boolean pre_low_penalty_ra;
boolean cur_low_penalty_ra;
boolean pre_dac_swing_on;
u32 pre_dac_swing_lvl;
boolean cur_dac_swing_on;
u32 cur_dac_swing_lvl;
boolean pre_adc_back_off;
boolean cur_adc_back_off;
boolean pre_agc_table_en;
boolean cur_agc_table_en;
u32 cur_val0x6c0;
u32 cur_val0x6c4;
u32 cur_val0x6c8;
u8 cur_val0x6cc;
boolean limited_dig;
/* algorithm related */
u8 pre_algorithm;
u8 cur_algorithm;
u8 bt_status;
u8 wifi_chnl_info[3];
boolean need_recover0x948;
u32 backup0x948;
u8 pre_lps;
u8 cur_lps;
u8 pre_rpwm;
u8 cur_rpwm;
boolean is_switch_to_1dot5_ant;
u8 switch_thres_offset;
u32 arp_cnt;
u8 pre_ant_pos_type;
u8 cur_ant_pos_type;
u32 setting_tdma;
};
struct coex_sta_8723d_2ant {
boolean bt_disabled;
boolean bt_link_exist;
boolean sco_exist;
boolean a2dp_exist;
boolean hid_exist;
boolean pan_exist;
boolean under_lps;
boolean under_ips;
u8 connect_ap_period_cnt;
u8 pnp_awake_period_cnt;
u32 high_priority_tx;
u32 high_priority_rx;
u32 low_priority_tx;
u32 low_priority_rx;
boolean is_hipri_rx_overhead;
u8 bt_rssi;
boolean bt_tx_rx_mask;
u8 pre_bt_rssi_state;
u8 pre_wifi_rssi_state[4];
u8 bt_info_c2h[BT_INFO_SRC_8723D_2ANT_MAX][10];
u32 bt_info_c2h_cnt[BT_INFO_SRC_8723D_2ANT_MAX];
boolean bt_whck_test;
boolean c2h_bt_inquiry_page;
boolean c2h_bt_remote_name_req;
u8 bt_retry_cnt;
u8 bt_info_ext;
u8 bt_info_ext2;
u32 pop_event_cnt;
u8 scan_ap_num;
u32 crc_ok_cck;
u32 crc_ok_11g;
u32 crc_ok_11n;
u32 crc_ok_11n_vht;
u32 crc_err_cck;
u32 crc_err_11g;
u32 crc_err_11n;
u32 crc_err_11n_vht;
u32 acc_crc_ratio;
u32 now_crc_ratio;
u32 cnt_crcok_max_in_10s;
boolean cck_lock;
boolean cck_lock_ever;
boolean cck_lock_warn;
u8 coex_table_type;
boolean force_lps_ctrl;
u8 dis_ver_info_cnt;
u8 a2dp_bit_pool;
u8 kt_ver;
boolean concurrent_rx_mode_on;
u16 score_board;
u8 isolation_btween_wb; /* 0~ 50 */
u8 wifi_coex_thres;
u8 bt_coex_thres;
u8 wifi_coex_thres2;
u8 bt_coex_thres2;
u8 num_of_profile;
boolean acl_busy;
boolean bt_create_connection;
boolean wifi_is_high_pri_task;
u32 specific_pkt_period_cnt;
u32 bt_coex_supported_feature;
u32 bt_coex_supported_version;
u8 bt_ble_scan_type;
u32 bt_ble_scan_para[3];
boolean run_time_state;
boolean freeze_coexrun_by_btinfo;
boolean is_A2DP_3M;
boolean voice_over_HOGP;
u8 bt_info;
u8 forbidden_slot;
u8 hid_busy_num;
u8 hid_pair_cnt;
u32 cnt_remotenamereq;
u32 cnt_setuplink;
u32 cnt_reinit;
u32 cnt_ignwlanact;
u32 cnt_page;
u32 cnt_roleswitch;
u16 bt_reg_vendor_ac;
u16 bt_reg_vendor_ae;
boolean is_setup_link;
boolean wl_noisy_level;
u32 gnt_error_cnt;
u8 bt_afh_map[10];
u8 bt_relink_downcount;
boolean is_tdma_btautoslot;
boolean is_tdma_btautoslot_hang;
boolean is_esco_mode;
boolean is_rf_state_off;
boolean is_hid_low_pri_tx_overhead;
boolean is_bt_multi_link;
boolean is_bt_a2dp_sink;
u8 wl_fw_dbg_info[10];
u8 wl_rx_rate;
u8 wl_rts_rx_rate;
u16 score_board_WB;
boolean is_2g_freerun;
boolean is_hid_rcu;
boolean is_ble_scan_en;
u16 legacy_forbidden_slot;
u16 le_forbidden_slot;
u8 bt_a2dp_vendor_id;
u32 bt_a2dp_device_name;
boolean is_bt_opp_exist;
boolean is_no_wl_5ms_extend;
u16 wl_0x42a_backup;
u32 wl_0x430_backup;
u32 wl_0x434_backup;
u8 wl_0x456_backup;
boolean wl_tx_limit_en;
boolean wl_ampdu_limit_en;
boolean wl_rxagg_limit_en;
u8 wl_rxagg_size;
};
#define BT_8723D_2ANT_ANTDET_PSD_POINTS 256 /* MAX:1024 */
#define BT_8723D_2ANT_ANTDET_PSD_AVGNUM 1 /* MAX:3 */
#define BT_8723D_2ANT_ANTDET_BUF_LEN 16
struct psdscan_sta_8723d_2ant {
u32 ant_det_bt_le_channel; /* BT LE Channel ex:2412 */
u32 ant_det_bt_tx_time;
u32 ant_det_pre_psdscan_peak_val;
boolean ant_det_is_ant_det_available;
u32 ant_det_psd_scan_peak_val;
boolean ant_det_is_btreply_available;
u32 ant_det_psd_scan_peak_freq;
u8 ant_det_result;
u8 ant_det_peak_val[BT_8723D_2ANT_ANTDET_BUF_LEN];
u8 ant_det_peak_freq[BT_8723D_2ANT_ANTDET_BUF_LEN];
u32 ant_det_try_count;
u32 ant_det_fail_count;
u32 ant_det_inteval_count;
u32 ant_det_thres_offset;
u32 real_cent_freq;
s32 real_offset;
u32 real_span;
u32 psd_band_width;
u32 psd_point;
u32 psd_report[1024];
u32 psd_report_max_hold[1024];
u32 psd_start_point;
u32 psd_stop_point;
u32 psd_max_value_point;
u32 psd_max_value;
u32 psd_max_value2;
u32 psd_avg_value;
u32 psd_loop_max_value[BT_8723D_2ANT_ANTDET_PSD_SWWEEPCOUNT];
u32 psd_start_base;
u32 psd_avg_num; /* 1/8/16/32 */
u32 psd_gen_count;
boolean is_antdet_running;
boolean is_psd_show_max_only;
};
/* *******************************************
* The following is interface which will notify coex module.
* ******************************************* */
void ex_halbtc8723d2ant_power_on_setting(IN struct btc_coexist *btcoexist);
void ex_halbtc8723d2ant_pre_load_firmware(IN struct btc_coexist *btcoexist);
void ex_halbtc8723d2ant_init_hw_config(IN struct btc_coexist *btcoexist,
IN boolean wifi_only);
void ex_halbtc8723d2ant_init_coex_dm(IN struct btc_coexist *btcoexist);
void ex_halbtc8723d2ant_ips_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_lps_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_scan_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_connect_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_media_status_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_specific_packet_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_bt_info_notify(IN struct btc_coexist *btcoexist,
IN u8 *tmp_buf, IN u8 length);
void ex_halbtc8723d2ant_wl_fwdbginfo_notify(IN struct btc_coexist *btcoexist,
IN u8 *tmp_buf, IN u8 length);
void ex_halbtc8723d2ant_rx_rate_change_notify(IN struct btc_coexist *btcoexist,
IN BOOLEAN is_data_frame, IN u8 btc_rate_id);
void ex_halbtc8723d2ant_rf_status_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_halt_notify(IN struct btc_coexist *btcoexist);
void ex_halbtc8723d2ant_pnp_notify(IN struct btc_coexist *btcoexist,
IN u8 pnp_state);
void ex_halbtc8723d2ant_set_antenna_notify(IN struct btc_coexist *btcoexist,
IN u8 type);
void ex_halbtc8723d2ant_periodical(IN struct btc_coexist *btcoexist);
void ex_halbtc8723d2ant_display_coex_info(IN struct btc_coexist *btcoexist);
void ex_halbtc8723d2ant_antenna_detection(IN struct btc_coexist *btcoexist,
IN u32 cent_freq, IN u32 offset, IN u32 span, IN u32 seconds);
void ex_halbtc8723d2ant_display_ant_detection(IN struct btc_coexist *btcoexist);
#else
#define ex_halbtc8723d2ant_power_on_setting(btcoexist)
#define ex_halbtc8723d2ant_pre_load_firmware(btcoexist)
#define ex_halbtc8723d2ant_init_hw_config(btcoexist, wifi_only)
#define ex_halbtc8723d2ant_init_coex_dm(btcoexist)
#define ex_halbtc8723d2ant_ips_notify(btcoexist, type)
#define ex_halbtc8723d2ant_lps_notify(btcoexist, type)
#define ex_halbtc8723d2ant_scan_notify(btcoexist, type)
#define ex_halbtc8723d2ant_connect_notify(btcoexist, type)
#define ex_halbtc8723d2ant_media_status_notify(btcoexist, type)
#define ex_halbtc8723d2ant_specific_packet_notify(btcoexist, type)
#define ex_halbtc8723d2ant_bt_info_notify(btcoexist, tmp_buf, length)
#define ex_halbtc8723d2ant_wl_fwdbginfo_notify(btcoexist, tmp_buf, length)
#define ex_halbtc8723d2ant_rx_rate_change_notify(btcoexist, is_data_frame, btc_rate_id)
#define ex_halbtc8723d2ant_rf_status_notify(btcoexist, type)
#define ex_halbtc8723d2ant_halt_notify(btcoexist)
#define ex_halbtc8723d2ant_pnp_notify(btcoexist, pnp_state)
#define ex_halbtc8723d2ant_periodical(btcoexist)
#define ex_halbtc8723d2ant_display_coex_info(btcoexist)
#define ex_halbtc8723d2ant_set_antenna_notify(btcoexist, type)
#define ex_halbtc8723d2ant_display_ant_detection(btcoexist)
#define ex_halbtc8723d2ant_antenna_detection(btcoexist, cent_freq, offset, span, seconds)
#endif
#endif
| 7,570 |
1,056 |
<filename>java/debugger.jpda/src/org/netbeans/modules/debugger/jpda/expr/EvaluatorExpression.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.debugger.jpda.expr;
import java.util.HashMap;
import java.util.Map;
import org.netbeans.api.debugger.jpda.InvalidExpressionException;
import org.netbeans.modules.debugger.jpda.JavaEvaluator;
import org.netbeans.spi.debugger.jpda.Evaluator;
/**
*
* @author <NAME>
*/
public final class EvaluatorExpression implements CompilationInfoHolder {
private final String expression;
private final Map<Evaluator, AssociatedExpression<?>> associatedExpressions = new HashMap<Evaluator, AssociatedExpression<?>>();
private Object parsedData;
public EvaluatorExpression(String expression) {
this.expression = expression;
}
public String getExpression() {
return expression;
}
public Evaluator.Result evaluate(Evaluator<?> e, Evaluator.Context context) throws InvalidExpressionException {
AssociatedExpression<?> ae = associatedExpressions.get(e);
if (ae == null) {
ae = new AssociatedExpression(e, expression);
associatedExpressions.put(e, ae);
}
if (e instanceof JavaEvaluator) {
return ((JavaEvaluator) e).evaluate((Evaluator.Expression<JavaExpression>) ae.expr,
context, this);
}
return ae.evaluate(context);
}
@Override
public Object getParsedData() {
return parsedData;
}
@Override
public void setParsedData(Object parsedData) {
this.parsedData = parsedData;
}
private static class AssociatedExpression<PI> {
private final Evaluator<PI> e;
private final Evaluator.Expression<PI> expr;
AssociatedExpression(Evaluator<PI> e, String expression) {
this.e = e;
this.expr = new Evaluator.Expression<>(expression);
}
public Evaluator.Result evaluate(Evaluator.Context context) throws InvalidExpressionException {
return e.evaluate(expr, context);
}
}
}
| 1,039 |
511 |
#include <spng.h>
#include <iostream>
int main(int argc, char **argv)
{
(void)argc; (void)argv;
unsigned char buf[30] = {0};
spng_ctx *ctx = spng_ctx_new(0);
spng_set_png_buffer(ctx, buf, 30);
struct spng_plte plte;
int e = spng_get_plte(ctx, &plte);
std::cout << spng_strerror(e);
spng_ctx_free(ctx);
return 0;
}
| 176 |
1,928 |
<filename>dpark/file_manager/mfs_proxy.py
from __future__ import absolute_import
import os
import grp
import time
import socket
import getpass
from .utils import unpack, pack, uint8, attrToFileInfo, uint64
from .consts import *
import six
from six.moves import range
from dpark.utils.log import get_logger
logger = get_logger(__name__)
class ProtocolError(Exception):
pass
class Chunk:
def __init__(self, index, id_, file_length, version, csdata, ele_width=6):
self.index = index
self.id = id_
self.file_length = file_length
self.length = min(file_length - index * CHUNKSIZE, CHUNKSIZE)
self.version = version
self.ele_width = ele_width
self.addrs = self._parse(csdata)
def _parse(self, csdata):
return [(socket.inet_ntoa(csdata[i:i + 4]),
unpack("H", csdata[i + 4:i + 6])[0])
for i in range(0, len(csdata), self.ele_width)]
def __repr__(self):
return "<Chunk(%d, %d, %d)>" % (self.id, self.version, self.length)
class ProxyConn(object):
def __init__(self, host, port, version=(0, 0, 0)):
self.host = host
self.port = port
self.version = version
self.uid = os.getuid()
self.gids = [g.gr_gid for g in grp.getgrall() if getpass.getuser() in g.gr_mem]
self.gids.insert(0, os.getgid())
self.conn = None
self.msgid = 0
@classmethod
def get_masterinfo(cls, path):
while path != os.path.sep:
mp = os.path.join(path, ".masterinfo")
try:
stb = os.lstat(mp)
except OSError:
pass
else:
if stb.st_ino in (0x7FFFFFFF, 0x7FFFFFFE) and \
stb.st_nlink == 1 and \
stb.st_uid == 0 and \
stb.st_gid == 0 and \
stb.st_size in (10, 14):
sz = stb.st_size
with open(mp, 'rb', 0) as f:
proxyinfo = f.read(sz)
if len(proxyinfo) != sz:
raise Exception('fail to read master info from %s' % mp)
ip = socket.inet_ntoa(proxyinfo[:4])
port, = unpack("H", proxyinfo[4:])
if stb.st_size > 10:
major_version, mid_version, minor_version = unpack('HBB', proxyinfo[10:])
if major_version > 1:
minor_version /= 2
return ip, port, (major_version, mid_version, minor_version)
else:
return ip, port, (0, 0, 0)
path = os.path.dirname(path)
def recv_full(self, n):
r = b""
while len(r) < n:
rr = self.conn.recv(n - len(r))
if not rr:
raise IOError('need %d bytes, got %d', n, len(r))
r += rr
return r
def send_full(self, buf):
n = self.conn.send(buf)
while n < len(buf):
sent = self.conn.send(buf[n:])
if not sent:
raise IOError('Write failed')
n += sent
def _connect(self):
if self.conn is not None:
return
N = 8
for i in range(N):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((self.host, self.port))
self.conn = conn
return
except socket.error as e:
if i == N - 1:
raise Exception("Fail to connect to mfs proxy %s:%s, %s", self.host, self.port, e)
time.sleep(1.5 ** i) # 1.5**8 = 25.6
def _recv_cmd(self, cmd):
d = self.recv_full(12)
rcmd, size, msgid = unpack("III", d)
data = self.recv_full(size - 4)
if rcmd != cmd + 1:
raise ProtocolError("get incorrect cmd (%s \!= %s)" % (rcmd, cmd + 1))
if msgid != self.msgid:
raise ProtocolError('get incorrect msgid(%s != %s)' % (msgid, self.msgid))
return data
def sendAndReceive(self, cmd, *args):
self.msgid += 1
msg = pack(cmd, self.msgid, *args)
num_retry = 3
for i in range(num_retry):
self._connect()
try:
self.send_full(msg)
data = self._recv_cmd(cmd)
return data
except IOError:
self.terminate()
if i == num_retry - 1:
raise
else:
time.sleep(2 ** i * 0.1)
def terminate(self):
self.conn.close()
self.conn = None
def getdirplus(self, inode, max_entries=0xFFFFFFFF, nedgeid=0):
flag = GETDIR_FLAG_WITHATTR
if self.version < (2, 0, 0):
ans = self.sendAndReceive(CLTOMA_FUSE_READDIR, inode, self.uid, self.gids[0], uint8(flag))
else:
gidsize = len(self.gids)
gids = [gid for gid in self.gids]
gids.append(uint8(flag))
gids.append(max_entries)
gids.append(uint64(nedgeid))
ans = self.sendAndReceive(CLTOMA_FUSE_READDIR, inode,
self.uid, gidsize, *gids)
p = 0
infos = {}
# rnedgeid, = unpack('Q', ans[p: p + 8])
p += 8
while p < len(ans):
length, = unpack('B', ans[p: p + 1])
p += 1
name = ans[p: p + length]
if not six.PY2:
name = name.decode('utf-8')
p += length
i, = unpack("I", ans[p: p + 4])
p += 4
attr = ans[p: p + 35]
infos[name] = attrToFileInfo(i, attr, name, self.version)
p += 35
return infos
def getattr(self, inode, opened=0):
if self.version < (1, 6, 28):
ans = self.sendAndReceive(CLTOMA_FUSE_GETATTR, inode,
self.uid, self.gids[0])
else:
ans = self.sendAndReceive(CLTOMA_FUSE_GETATTR, inode,
uint8(opened), self.uid, self.gids[0])
return attrToFileInfo(inode, ans[:35], version=self.version)
def readlink(self, inode):
ans = self.sendAndReceive(CLTOMA_FUSE_READLINK, inode)
length, = unpack('I', ans)
if length + 4 != len(ans):
raise Exception('invalid length')
return ans[4: -1]
def readchunk(self, inode, index, chunkopflags=0):
"""
// msgid:32 length:64 chunkid:64 version:32 N*[ ip:32 port:16 ]
// msgid:32 protocolid:8 length:64 chunkid:64 version:32 N*[ ip:32 port:16 cs_ver:32 ]
(master and client both versions >= 1.7.32 - protocolid==1)
// msgid:32 protocolid:8 length:64 chunkid:64 version:32 N*[ ip:32 port:16 cs_ver:32 labelmask:32 ]
(master and client both versions >= 3.0.10 - protocolid==2)
"""
cnt = 0
while True:
cnt += 1
if self.version < (3, 0, 4):
ans = self.sendAndReceive(CLTOMA_FUSE_READ_CHUNK, inode, index)
else:
ans = self.sendAndReceive(CLTOMA_FUSE_READ_CHUNK, inode,
index, uint8(chunkopflags))
n = len(ans)
if n == 1:
from .utils import Error
err = ord(ans)
if err == ERROR_LOCKED:
if cnt < 100:
time.sleep(0.1)
continue
logger.warning('Waited too long for locked chunk %s:%s', inode, index)
raise Error(ord(ans))
if n < 20:
raise Exception('read chunk invalid length: %s(expected 20 above)' % n)
# self.version is master`s version, not mfsmount`s
if self.version >= (1, 7, 32) and ((n - 21) % 14 == 0 or (n - 21) % 10 == 0):
protocolid, flength, id_, version = unpack('BQQI', ans)
if protocolid == 2:
assert (n - 21) % 14 == 0, n
return Chunk(index, id_, flength, version, ans[21:], ele_width=14)
elif protocolid == 1:
assert (n - 21) % 10 == 0, n
return Chunk(index, id_, flength, version, ans[21:], ele_width=10)
assert (n - 20) % 6 == 0, n
flength, id_, version = unpack("QQI", ans)
return Chunk(index, id_, flength, version, ans[20:])
| 4,804 |
393 |
<gh_stars>100-1000
from . import base_config
__all__ = ['base_config']
| 26 |
302 |
package io.github.iamazy.elasticsearch.dsl.jdbc.statement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLWarning;
/**
* @author iamazy
* @date 2019/12/16
**/
public abstract class AbstractStatement extends AbstractFeatureNotSupportedStatement {
private boolean isClosed=false;
private int timeout;
@Override
public void close() throws SQLException {
isClosed=true;
}
@Override
public int getMaxFieldSize() throws SQLException {
return 0;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
//ignore
}
@Override
public int getMaxRows() throws SQLException {
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
//ignore
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
//ignore
}
@Override
public int getQueryTimeout() throws SQLException {
return timeout;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
this.timeout=seconds;
}
@Override
public void cancel() throws SQLException {
//ignore
}
@Override
public SQLWarning getWarnings() throws SQLException {
return null;
}
@Override
public void clearWarnings() throws SQLException {
//ignore
}
@Override
public int getFetchSize() throws SQLException {
return 0;
}
@Override
public void setFetchSize(int rows) throws SQLException {
//ignore
}
@Override
public int getResultSetConcurrency() throws SQLException {
return ResultSet.CONCUR_READ_ONLY;
}
@Override
public int getResultSetType() throws SQLException {
return ResultSet.TYPE_FORWARD_ONLY;
}
@Override
public boolean getMoreResults(int current) throws SQLException {
return false;
}
@Override
public int getResultSetHoldability() throws SQLException {
return ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
@Override
public boolean isClosed() {
return isClosed;
}
@Override
public boolean isPoolable() throws SQLException {
return false;
}
@Override
public void setPoolable(boolean poolable) throws SQLException {
//ignore
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
if(isWrapperFor(iface)){
return (T)this;
}
throw new SQLException(String.format("[%s] cannot be unwrapped as [%s]",getClass().getName(),iface.getName()));
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface.isInstance(this);
}
/**
* execute sql for prepareStatement
* eg: select * from user where name = ?
* @param sql
* @param args
* @return
* @throws SQLException
*/
protected abstract ResultSet executeQuery(String sql,Object[] args) throws SQLException ;
}
| 1,215 |
4,535 |
<gh_stars>1000+
// Copyright 2017-2018 Intel Corporation.
#include "tile/platform/local_machine/tdep_scheduler.h"
namespace vertexai {
namespace tile {
namespace local_machine {
TransitiveDepScheduler::TransitiveDepScheduler(const std::shared_ptr<Placer>& placer, std::size_t max_in_flight)
: placer_{placer}, max_in_flight_{max_in_flight} {}
schedule::Schedule TransitiveDepScheduler::BuildSchedule(const tile::proto::Program& program,
const lang::KernelList& kl) {
schedule::Schedule schedule = ToScheduleSteps(program, kl);
AddDataflowDeps(&schedule);
if (max_in_flight_) {
AddLinearDeps(&schedule, max_in_flight_);
}
placer_->PlaceSchedule(program, &schedule)->Apply();
return schedule;
}
const char* TransitiveDepScheduler::name() const { return "TransitiveDep"; }
} // namespace local_machine
} // namespace tile
} // namespace vertexai
| 355 |
348 |
<reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000
{"nom":"Barésia-sur-l'Ain","circ":"2ème circonscription","dpt":"Jura","inscrits":126,"abs":62,"votants":64,"blancs":8,"nuls":1,"exp":55,"res":[{"nuance":"LR","nom":"<NAME>","voix":34},{"nuance":"MDM","nom":"<NAME>","voix":21}]}
| 123 |
1,144 |
// Generated Model - DO NOT CHANGE
package org.compiere.model;
import java.sql.ResultSet;
import java.util.Properties;
import javax.annotation.Nullable;
/** Generated Model for AD_Window
* @author metasfresh (generated)
*/
@SuppressWarnings("unused")
public class X_AD_Window extends org.compiere.model.PO implements I_AD_Window, org.compiere.model.I_Persistent
{
private static final long serialVersionUID = -1673008926L;
/** Standard Constructor */
public X_AD_Window (final Properties ctx, final int AD_Window_ID, @Nullable final String trxName)
{
super (ctx, AD_Window_ID, trxName);
}
/** Load Constructor */
public X_AD_Window (final Properties ctx, final ResultSet rs, @Nullable final String trxName)
{
super (ctx, rs, trxName);
}
/** Load Meta Data */
@Override
protected org.compiere.model.POInfo initPO(final Properties ctx)
{
return org.compiere.model.POInfo.getPOInfo(Table_Name);
}
@Override
public org.compiere.model.I_AD_Color getAD_Color()
{
return get_ValueAsPO(COLUMNNAME_AD_Color_ID, org.compiere.model.I_AD_Color.class);
}
@Override
public void setAD_Color(final org.compiere.model.I_AD_Color AD_Color)
{
set_ValueFromPO(COLUMNNAME_AD_Color_ID, org.compiere.model.I_AD_Color.class, AD_Color);
}
@Override
public void setAD_Color_ID (final int AD_Color_ID)
{
if (AD_Color_ID < 1)
set_Value (COLUMNNAME_AD_Color_ID, null);
else
set_Value (COLUMNNAME_AD_Color_ID, AD_Color_ID);
}
@Override
public int getAD_Color_ID()
{
return get_ValueAsInt(COLUMNNAME_AD_Color_ID);
}
@Override
public org.compiere.model.I_AD_Element getAD_Element()
{
return get_ValueAsPO(COLUMNNAME_AD_Element_ID, org.compiere.model.I_AD_Element.class);
}
@Override
public void setAD_Element(final org.compiere.model.I_AD_Element AD_Element)
{
set_ValueFromPO(COLUMNNAME_AD_Element_ID, org.compiere.model.I_AD_Element.class, AD_Element);
}
@Override
public void setAD_Element_ID (final int AD_Element_ID)
{
if (AD_Element_ID < 1)
set_Value (COLUMNNAME_AD_Element_ID, null);
else
set_Value (COLUMNNAME_AD_Element_ID, AD_Element_ID);
}
@Override
public int getAD_Element_ID()
{
return get_ValueAsInt(COLUMNNAME_AD_Element_ID);
}
@Override
public org.compiere.model.I_AD_Image getAD_Image()
{
return get_ValueAsPO(COLUMNNAME_AD_Image_ID, org.compiere.model.I_AD_Image.class);
}
@Override
public void setAD_Image(final org.compiere.model.I_AD_Image AD_Image)
{
set_ValueFromPO(COLUMNNAME_AD_Image_ID, org.compiere.model.I_AD_Image.class, AD_Image);
}
@Override
public void setAD_Image_ID (final int AD_Image_ID)
{
if (AD_Image_ID < 1)
set_Value (COLUMNNAME_AD_Image_ID, null);
else
set_Value (COLUMNNAME_AD_Image_ID, AD_Image_ID);
}
@Override
public int getAD_Image_ID()
{
return get_ValueAsInt(COLUMNNAME_AD_Image_ID);
}
@Override
public void setAD_Window_ID (final int AD_Window_ID)
{
if (AD_Window_ID < 1)
set_ValueNoCheck (COLUMNNAME_AD_Window_ID, null);
else
set_ValueNoCheck (COLUMNNAME_AD_Window_ID, AD_Window_ID);
}
@Override
public int getAD_Window_ID()
{
return get_ValueAsInt(COLUMNNAME_AD_Window_ID);
}
@Override
public void setDescription (final @Nullable java.lang.String Description)
{
set_Value (COLUMNNAME_Description, Description);
}
@Override
public java.lang.String getDescription()
{
return get_ValueAsString(COLUMNNAME_Description);
}
/**
* EntityType AD_Reference_ID=389
* Reference name: _EntityTypeNew
*/
public static final int ENTITYTYPE_AD_Reference_ID=389;
@Override
public void setEntityType (final java.lang.String EntityType)
{
set_Value (COLUMNNAME_EntityType, EntityType);
}
@Override
public java.lang.String getEntityType()
{
return get_ValueAsString(COLUMNNAME_EntityType);
}
@Override
public void setHelp (final @Nullable java.lang.String Help)
{
set_Value (COLUMNNAME_Help, Help);
}
@Override
public java.lang.String getHelp()
{
return get_ValueAsString(COLUMNNAME_Help);
}
@Override
public void setInternalName (final @Nullable java.lang.String InternalName)
{
set_Value (COLUMNNAME_InternalName, InternalName);
}
@Override
public java.lang.String getInternalName()
{
return get_ValueAsString(COLUMNNAME_InternalName);
}
@Override
public void setIsBetaFunctionality (final boolean IsBetaFunctionality)
{
set_Value (COLUMNNAME_IsBetaFunctionality, IsBetaFunctionality);
}
@Override
public boolean isBetaFunctionality()
{
return get_ValueAsBoolean(COLUMNNAME_IsBetaFunctionality);
}
@Override
public void setIsDefault (final boolean IsDefault)
{
set_Value (COLUMNNAME_IsDefault, IsDefault);
}
@Override
public boolean isDefault()
{
return get_ValueAsBoolean(COLUMNNAME_IsDefault);
}
@Override
public void setIsEnableRemoteCacheInvalidation (final boolean IsEnableRemoteCacheInvalidation)
{
set_Value (COLUMNNAME_IsEnableRemoteCacheInvalidation, IsEnableRemoteCacheInvalidation);
}
@Override
public boolean isEnableRemoteCacheInvalidation()
{
return get_ValueAsBoolean(COLUMNNAME_IsEnableRemoteCacheInvalidation);
}
@Override
public void setIsExcludeFromZoomTargets (final boolean IsExcludeFromZoomTargets)
{
set_Value (COLUMNNAME_IsExcludeFromZoomTargets, IsExcludeFromZoomTargets);
}
@Override
public boolean isExcludeFromZoomTargets()
{
return get_ValueAsBoolean(COLUMNNAME_IsExcludeFromZoomTargets);
}
@Override
public void setIsOverrideInMenu (final boolean IsOverrideInMenu)
{
set_Value (COLUMNNAME_IsOverrideInMenu, IsOverrideInMenu);
}
@Override
public boolean isOverrideInMenu()
{
return get_ValueAsBoolean(COLUMNNAME_IsOverrideInMenu);
}
@Override
public void setIsSOTrx (final boolean IsSOTrx)
{
set_Value (COLUMNNAME_IsSOTrx, IsSOTrx);
}
@Override
public boolean isSOTrx()
{
return get_ValueAsBoolean(COLUMNNAME_IsSOTrx);
}
@Override
public void setName (final java.lang.String Name)
{
set_Value (COLUMNNAME_Name, Name);
}
@Override
public java.lang.String getName()
{
return get_ValueAsString(COLUMNNAME_Name);
}
@Override
public org.compiere.model.I_AD_Window getOverrides_Window()
{
return get_ValueAsPO(COLUMNNAME_Overrides_Window_ID, org.compiere.model.I_AD_Window.class);
}
@Override
public void setOverrides_Window(final org.compiere.model.I_AD_Window Overrides_Window)
{
set_ValueFromPO(COLUMNNAME_Overrides_Window_ID, org.compiere.model.I_AD_Window.class, Overrides_Window);
}
@Override
public void setOverrides_Window_ID (final int Overrides_Window_ID)
{
if (Overrides_Window_ID < 1)
set_Value (COLUMNNAME_Overrides_Window_ID, null);
else
set_Value (COLUMNNAME_Overrides_Window_ID, Overrides_Window_ID);
}
@Override
public int getOverrides_Window_ID()
{
return get_ValueAsInt(COLUMNNAME_Overrides_Window_ID);
}
@Override
public void setProcessing (final boolean Processing)
{
set_Value (COLUMNNAME_Processing, Processing);
}
@Override
public boolean isProcessing()
{
return get_ValueAsBoolean(COLUMNNAME_Processing);
}
/**
* WindowType AD_Reference_ID=108
* Reference name: AD_Window Types
*/
public static final int WINDOWTYPE_AD_Reference_ID=108;
/** Single Record = S */
public static final String WINDOWTYPE_SingleRecord = "S";
/** Maintain = M */
public static final String WINDOWTYPE_Maintain = "M";
/** Transaktion = T */
public static final String WINDOWTYPE_Transaktion = "T";
/** Query Only = Q */
public static final String WINDOWTYPE_QueryOnly = "Q";
@Override
public void setWindowType (final java.lang.String WindowType)
{
set_Value (COLUMNNAME_WindowType, WindowType);
}
@Override
public java.lang.String getWindowType()
{
return get_ValueAsString(COLUMNNAME_WindowType);
}
@Override
public void setWinHeight (final int WinHeight)
{
set_Value (COLUMNNAME_WinHeight, WinHeight);
}
@Override
public int getWinHeight()
{
return get_ValueAsInt(COLUMNNAME_WinHeight);
}
@Override
public void setWinWidth (final int WinWidth)
{
set_Value (COLUMNNAME_WinWidth, WinWidth);
}
@Override
public int getWinWidth()
{
return get_ValueAsInt(COLUMNNAME_WinWidth);
}
@Override
public void setZoomIntoPriority (final int ZoomIntoPriority)
{
set_Value (COLUMNNAME_ZoomIntoPriority, ZoomIntoPriority);
}
@Override
public int getZoomIntoPriority()
{
return get_ValueAsInt(COLUMNNAME_ZoomIntoPriority);
}
}
| 3,227 |
3,579 |
<gh_stars>1000+
/*
* Copyright 2015, The Querydsl Team (http://www.querydsl.com/team)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.querydsl.jdo;
import static org.junit.Assert.assertEquals;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import javax.jdo.Query;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
import com.querydsl.jdo.test.domain.Product;
import com.querydsl.jdo.test.domain.QProduct;
import com.querydsl.jdo.test.domain.QStore;
public class FetchPlanTest extends AbstractJDOTest {
private JDOQuery<?> query;
@After
public void tearDown() {
if (query != null) {
query.close();
}
super.tearDown();
}
@SuppressWarnings("unchecked")
@Test
public void listProducts() throws Exception {
QProduct product = QProduct.product;
query = query();
query.from(product)
.where(product.name.startsWith("A"))
.addFetchGroup("myfetchgroup1")
.addFetchGroup("myfetchgroup2")
.setMaxFetchDepth(2)
.select(product).fetch();
// query.close();
Field queriesField = AbstractJDOQuery.class.getDeclaredField("queries");
queriesField.setAccessible(true);
List<Query> queries = (List<Query>) queriesField.get(query);
Query jdoQuery = queries.get(0);
assertEquals(new HashSet<String>(Arrays.asList("myfetchgroup1","myfetchgroup2")),
jdoQuery.getFetchPlan().getGroups());
assertEquals(2, jdoQuery.getFetchPlan().getMaxFetchDepth());
}
@SuppressWarnings("unchecked")
@Test
public void listStores() throws Exception {
QStore store = QStore.store;
query = query();
query.from(store)
.addFetchGroup("products")
.select(store).fetch();
Field queriesField = AbstractJDOQuery.class.getDeclaredField("queries");
queriesField.setAccessible(true);
List<Query> queries = (List<Query>) queriesField.get(query);
Query jdoQuery = queries.get(0);
assertEquals(new HashSet<String>(Collections.singletonList("products")),
jdoQuery.getFetchPlan().getGroups());
assertEquals(1, jdoQuery.getFetchPlan().getMaxFetchDepth());
}
@BeforeClass
public static void doPersist() {
List<Object> entities = new ArrayList<>();
for (int i = 0; i < 10; i++) {
entities.add(new Product("C" + i, "F", 200.00, 2));
entities.add(new Product("B" + i, "E", 400.00, 4));
entities.add(new Product("A" + i, "D", 600.00, 6));
}
doPersist(entities);
}
}
| 1,331 |
852 |
// -*- C++ -*-
// CMS includes
#include "FWCore/Utilities/interface/InputTag.h"
#include "DataFormats/Common/interface/Handle.h"
#include "DataFormats/JetReco/interface/CaloJet.h"
#include "PhysicsTools/FWLite/interface/EventContainer.h"
#include "PhysicsTools/FWLite/interface/CommandLineParser.h"
// Root includes
#include "TROOT.h"
using namespace std;
///////////////////////////
// ///////////////////// //
// // Main Subroutine // //
// ///////////////////// //
///////////////////////////
int main (int argc, char* argv[])
{
////////////////////////////////
// ////////////////////////// //
// // Command Line Options // //
// ////////////////////////// //
////////////////////////////////
// Tell people what this analysis code does and setup default options.
optutl::CommandLineParser parser ("Plots Jet Pt");
////////////////////////////////////////////////
// Change any defaults or add any new command //
// line options you would like here. //
////////////////////////////////////////////////
parser.stringValue ("outputFile") = "jetPt"; // .root added automatically
// Parse the command line arguments
parser.parseArguments (argc, argv);
//////////////////////////////////
// //////////////////////////// //
// // Create Event Container // //
// //////////////////////////// //
//////////////////////////////////
// This object 'event' is used both to get all information from the
// event as well as to store histograms, etc.
fwlite::EventContainer eventCont (parser);
////////////////////////////////////////
// ////////////////////////////////// //
// // Begin Run // //
// // (e.g., book histograms, etc) // //
// ////////////////////////////////// //
////////////////////////////////////////
// Setup a style
gROOT->SetStyle ("Plain");
// Book those histograms!
eventCont.add( new TH1F( "jetPt", "jetPt", 1000, 0, 1000) );
//////////////////////
// //////////////// //
// // Event Loop // //
// //////////////// //
//////////////////////
// create labels
edm::InputTag jetLabel ("sisCone5CaloJets");
for (eventCont.toBegin(); ! eventCont.atEnd(); ++eventCont)
{
//////////////////////////////////
// Take What We Need From Event //
//////////////////////////////////
edm::Handle< vector< reco::CaloJet > > jetHandle;
eventCont.getByLabel (jetLabel, jetHandle );
assert ( jetHandle.isValid() );
// Loop over the jets
const vector< reco::CaloJet >::const_iterator kJetEnd = jetHandle->end();
for (vector< reco::CaloJet >::const_iterator jetIter = jetHandle->begin();
kJetEnd != jetIter;
++jetIter)
{
eventCont.hist("jetPt")->Fill (jetIter->pt());
} // for jetIter
} // for eventCont
////////////////////////
// ////////////////// //
// // Clean Up Job // //
// ////////////////// //
////////////////////////
// Histograms will be automatically written to the root file
// specificed by command line options.
// All done! Bye bye.
return 0;
}
| 1,009 |
575 |
<reponame>mghgroup/Glide-Browser
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/events/pointer_event_util.h"
#include <cmath>
#include "third_party/blink/renderer/platform/wtf/math_extras.h"
namespace blink {
// static
double PointerEventUtil::AzimuthFromTilt(int32_t tilt_x_degrees,
int32_t tilt_y_degrees) {
DCHECK(tilt_x_degrees >= -90 && tilt_x_degrees <= 90);
DCHECK(tilt_y_degrees >= -90 && tilt_y_degrees <= 90);
if (tilt_x_degrees == 0) {
if (tilt_y_degrees > 0)
return kPiOverTwoDouble;
if (tilt_y_degrees < 0)
return 3.0 * kPiOverTwoDouble;
return 0.0;
}
if (tilt_y_degrees == 0) {
if (tilt_x_degrees < 0)
return kPiDouble;
return 0.0;
}
if (abs(tilt_x_degrees) == 90 || abs(tilt_y_degrees) == 90)
return 0.0;
DCHECK(tilt_x_degrees != 0.0 && tilt_y_degrees != 0.0 &&
abs(tilt_x_degrees) != 90 && abs(tilt_y_degrees) != 90);
const double tilt_x_radians = kPiDouble / 180.0 * tilt_x_degrees;
const double tilt_y_radians = kPiDouble / 180.0 * tilt_y_degrees;
const double tan_x = tan(tilt_x_radians);
const double tan_y = tan(tilt_y_radians);
double azimuth_radians = atan2(tan_y, tan_x);
azimuth_radians = (azimuth_radians >= 0) ? azimuth_radians
: (azimuth_radians + kTwoPiDouble);
DCHECK(azimuth_radians >= 0 && azimuth_radians <= kTwoPiDouble);
return azimuth_radians;
}
// static
double PointerEventUtil::AltitudeFromTilt(int32_t tilt_x_degrees,
int32_t tilt_y_degrees) {
DCHECK(tilt_x_degrees >= -90 && tilt_x_degrees <= 90);
DCHECK(tilt_y_degrees >= -90 && tilt_y_degrees <= 90);
const double tilt_x_radians = kPiDouble / 180.0 * tilt_x_degrees;
const double tilt_y_radians = kPiDouble / 180.0 * tilt_y_degrees;
if (abs(tilt_x_degrees) == 90 || abs(tilt_y_degrees) == 90)
return 0;
if (tilt_x_degrees == 0)
return kPiOverTwoDouble - abs(tilt_y_radians);
if (tilt_y_degrees == 0)
return kPiOverTwoDouble - abs(tilt_x_radians);
return atan(1.0 /
sqrt(pow(tan(tilt_x_radians), 2) + pow(tan(tilt_y_radians), 2)));
}
// static
int32_t PointerEventUtil::TiltXFromSpherical(double azimuth_radians,
double altitude_radians) {
DCHECK(azimuth_radians >= 0 && azimuth_radians <= kTwoPiDouble);
DCHECK(altitude_radians >= 0 && altitude_radians <= kPiOverTwoDouble);
if (altitude_radians != 0) {
// Not using std::round because we need Javascript Math.round behaviour
// here which is different
return std::floor(
rad2deg(atan(cos(azimuth_radians) / tan(altitude_radians))) + 0.5);
}
if (azimuth_radians == kPiOverTwoDouble ||
azimuth_radians == 3 * kPiOverTwoDouble) {
return 0;
} else if (azimuth_radians < kPiOverTwoDouble ||
azimuth_radians > 3 * kPiOverTwoDouble) {
// In 1st or 4th quadrant
return 90;
} else {
// In 2nd or 3rd quadrant
return -90;
}
}
// static
int32_t PointerEventUtil::TiltYFromSpherical(double azimuth_radians,
double altitude_radians) {
DCHECK(azimuth_radians >= 0 && azimuth_radians <= kTwoPiDouble);
DCHECK(altitude_radians >= 0 && altitude_radians <= kPiOverTwoDouble);
if (altitude_radians != 0) {
// Not using std::round because we need Javascript Math.round behaviour
// here which is different
return std::floor(
rad2deg(atan(sin(azimuth_radians) / tan(altitude_radians))) + 0.5);
}
if (azimuth_radians == 0 || azimuth_radians == kPiDouble ||
azimuth_radians == kTwoPiDouble) {
return 0;
} else if (azimuth_radians < kPiDouble) {
// 1st and 2nd quadrants
return 90;
} else {
// 3rd and 4th quadrants
return -90;
}
}
// static
int32_t PointerEventUtil::TransformToTiltInValidRange(int32_t tilt_degrees) {
if (tilt_degrees >= -90 && tilt_degrees <= 90)
return tilt_degrees;
// In order to avoid floating point division we'll make the assumption
// that |tilt_degrees| will NOT be far outside the valid range.
// With this assumption we can use loops and integer calculation to transform
// |tilt_degrees| into valid range.
while (tilt_degrees > 90)
tilt_degrees -= 180;
while (tilt_degrees < -90)
tilt_degrees += 180;
DCHECK(tilt_degrees >= -90 && tilt_degrees <= 90);
return tilt_degrees;
}
// static
double PointerEventUtil::TransformToAzimuthInValidRange(
double azimuth_radians) {
if (azimuth_radians >= 0 && azimuth_radians <= kTwoPiDouble)
return azimuth_radians;
// In order to avoid floating point division/multiplication we'll make the
// assumption that |azimuth_radians| will NOT be far outside the valid range.
// With this assumption we can use loops and addition/subtraction to
// transform |azimuth_radians| into valid range.
while (azimuth_radians > kTwoPiDouble)
azimuth_radians -= kTwoPiDouble;
while (azimuth_radians < 0)
azimuth_radians += kTwoPiDouble;
DCHECK(azimuth_radians >= 0 && azimuth_radians <= kTwoPiDouble);
return azimuth_radians;
}
// static
double PointerEventUtil::TransformToAltitudeInValidRange(
double altitude_radians) {
if (altitude_radians >= 0 && altitude_radians <= kPiOverTwoDouble)
return altitude_radians;
// In order to avoid floating point division/multiplication we'll make the
// assumption that |altitude_radians| will NOT be far outside the valid range.
// With this assumption we can use loops and addition/subtraction to
// transform |altitude_radians| into valid range
while (altitude_radians > kPiOverTwoDouble)
altitude_radians -= kPiOverTwoDouble;
while (altitude_radians < 0)
altitude_radians += kPiOverTwoDouble;
DCHECK(altitude_radians >= 0 && altitude_radians <= kPiOverTwoDouble);
return altitude_radians;
}
} // namespace blink
| 2,464 |
453 |
#include <math.h>
#include "headers/cosh.h"
double cosh(double x)
{
return _cosh(x);
}
| 41 |
347 |
<reponame>hbraha/ovirt-engine
package org.ovirt.engine.core.bll.network;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasEntry;
import static org.mockito.ArgumentMatchers.same;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.Map;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.ovirt.engine.core.bll.provider.ProviderProxyFactory;
import org.ovirt.engine.core.bll.provider.network.NetworkProviderProxy;
import org.ovirt.engine.core.common.AuditLogType;
import org.ovirt.engine.core.common.businessentities.Provider;
import org.ovirt.engine.core.common.businessentities.network.Network;
import org.ovirt.engine.core.common.businessentities.network.ProviderNetwork;
import org.ovirt.engine.core.common.businessentities.network.VmNic;
import org.ovirt.engine.core.common.errors.EngineException;
import org.ovirt.engine.core.compat.Guid;
import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector;
import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogable;
import org.ovirt.engine.core.dao.provider.ProviderDao;
import org.ovirt.engine.core.utils.InjectedMock;
import org.ovirt.engine.core.utils.InjectorExtension;
@ExtendWith({MockitoExtension.class, InjectorExtension.class})
public class ExternalNetworkManagerTest {
private static final Guid NIC_ID = Guid.newGuid();
private static final String NIC_NAME = "nic name";
private static final Guid PROVIDER_ID = Guid.newGuid();
private static final String PROVIDER_NAME = "provider name";
@Mock
@InjectedMock
public AuditLogDirector auditLogDirector;
@Mock
@InjectedMock
public ProviderDao providerDao;
@Mock
private ProviderProxyFactory providerProxyFactory;
@Mock
private NetworkProviderProxy networkProviderProxy;
@Captor
private ArgumentCaptor<AuditLogable> auditLogableCaptor;
private VmNic nic = new VmNic();
private Network network = createNetwork();
@InjectMocks
private ExternalNetworkManager underTest = new ExternalNetworkManager(nic, network);
private ProviderNetwork providerNetwork;
private Provider provider;
@BeforeEach
public void setUp() {
provider = new Provider<>();
when(providerDao.get(PROVIDER_ID)).thenReturn(provider);
when(providerProxyFactory.create(provider)).thenReturn(networkProviderProxy);
}
private Network createNetwork() {
final Network network = new Network();
providerNetwork = new ProviderNetwork();
providerNetwork.setProviderId(PROVIDER_ID);
network.setProvidedBy(providerNetwork);
return network;
}
@Test
public void testDeallocateIfExternalPositive() {
underTest.deallocateIfExternal();
verify(networkProviderProxy).deallocate(nic);
}
@Test
public void testDeallocateIfExternalThrowException() {
nic.setName(NIC_NAME);
nic.setId(NIC_ID);
provider.setName(PROVIDER_NAME);
doThrow(new EngineException()).when(networkProviderProxy).deallocate(nic);
underTest.deallocateIfExternal();
verify(auditLogDirector).log(
auditLogableCaptor.capture(),
same(AuditLogType.REMOVE_PORT_FROM_EXTERNAL_PROVIDER_FAILED));
final Map<String, String> capturedCustomValues = auditLogableCaptor.getValue().getCustomValues();
assertThat(capturedCustomValues, hasEntry("nicname", NIC_NAME));
assertThat(capturedCustomValues, hasEntry("nicid", NIC_ID.toString()));
assertThat(capturedCustomValues, hasEntry("providername", PROVIDER_NAME));
}
}
| 1,426 |
10,225 |
<filename>extensions/resteasy-classic/resteasy-multipart/runtime/src/main/java/io/quarkus/resteasy/multipart/runtime/MultipartInputPartConfigContainerRequestFilter.java
package io.quarkus.resteasy.multipart.runtime;
import java.io.IOException;
import javax.enterprise.inject.Instance;
import javax.inject.Inject;
import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.ContainerRequestFilter;
import javax.ws.rs.ext.Provider;
import org.jboss.resteasy.plugins.providers.multipart.InputPart;
import io.quarkus.arc.WithCaching;
@Provider
public class MultipartInputPartConfigContainerRequestFilter implements ContainerRequestFilter {
@WithCaching
@Inject
Instance<ResteasyMultipartRuntimeConfig> resteasyMultipartConfigInstance;
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
ResteasyMultipartRuntimeConfig resteasyMultipartConfig = resteasyMultipartConfigInstance.get();
requestContext.setProperty(InputPart.DEFAULT_CHARSET_PROPERTY,
resteasyMultipartConfig.inputPart.defaultCharset.name());
requestContext.setProperty(InputPart.DEFAULT_CONTENT_TYPE_PROPERTY,
resteasyMultipartConfig.inputPart.defaultContentType);
}
}
| 433 |
480 |
/*
* Copyright [2013-2021], Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.polardbx.executor.gsi.utils;
import com.alibaba.polardbx.common.datatype.Decimal;
import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException;
import com.alibaba.polardbx.common.jdbc.ParameterContext;
import com.alibaba.polardbx.common.jdbc.ParameterMethod;
import com.alibaba.polardbx.common.jdbc.ZeroDate;
import com.alibaba.polardbx.common.jdbc.ZeroTime;
import com.alibaba.polardbx.common.jdbc.ZeroTimestamp;
import com.alibaba.polardbx.common.properties.PropUtil;
import com.alibaba.polardbx.executor.cursor.Cursor;
import com.alibaba.polardbx.optimizer.config.table.ColumnMeta;
import com.alibaba.polardbx.optimizer.core.datatype.DataType;
import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil;
import com.alibaba.polardbx.optimizer.core.datatype.DataTypes;
import com.alibaba.polardbx.optimizer.core.expression.bean.EnumValue;
import com.alibaba.polardbx.optimizer.core.row.Row;
import com.alibaba.polardbx.statistics.SQLRecorderLogger;
import com.google.common.base.Preconditions;
import com.google.common.io.BaseEncoding;
import io.airlift.slice.Slice;
import org.apache.commons.lang3.StringUtils;
import java.math.BigInteger;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.BiFunction;
/**
* Convert between extractor's output and loader's input
*/
public class Transformer {
/**
* Build upper bound parameter for data extraction, from the results of select top n
*
* @param cursor Result cursor of select top n
* @param defaultGen Default upper bound generator for empty source table
* @return Parameter list for data extraction
*/
public static List<Map<Integer, ParameterContext>> convertUpperBoundWithDefault(Cursor cursor,
BiFunction<ColumnMeta, Integer,
ParameterContext> defaultGen) {
final List<Map<Integer, ParameterContext>> batchParams = new ArrayList<>();
Row row;
while ((row = cursor.next()) != null) {
final List<ColumnMeta> columns = row.getParentCursorMeta().getColumns();
final Map<Integer, ParameterContext> params = new HashMap<>(columns.size());
for (int i = 0; i < columns.size(); i++) {
ParameterContext pc = buildColumnParam(row, i);
final DataType columnType = columns.get(i).getDataType();
if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.FloatType, DataTypes.DoubleType)) {
if (null != pc.getArgs()[1]) {
// For float value like "-100.003", query like "c_float <= -100.003" returns nothing.
// Should replace upper bound with "c_float <= -100"
pc = new ParameterContext(pc.getParameterMethod(),
new Object[] {pc.getArgs()[0], Math.ceil((Double) pc.getArgs()[1])});
}
}
params.put(i + 1, pc);
}
batchParams.add(params);
}
if (batchParams.isEmpty()) {
// Build default
final List<ColumnMeta> columns = cursor.getReturnColumns();
final int columnCount = columns.size();
final Map<Integer, ParameterContext> params = new HashMap<>(columnCount);
for (int i = 0; i < columnCount; i++) {
final ColumnMeta columnMeta = columns.get(i);
params.put(i + 1, defaultGen.apply(columnMeta, i + 1));
}
batchParams.add(params);
}
return batchParams;
}
/**
* Build batch insert parameter, from the results of select
*
* @param cursor result cursor of select
* @return batch parameters for insert
*/
public static List<Map<Integer, ParameterContext>> buildBatchParam(Cursor cursor) {
final List<Map<Integer, ParameterContext>> batchParams = new ArrayList<>();
Row row;
while ((row = cursor.next()) != null) {
final List<ColumnMeta> columns = row.getParentCursorMeta().getColumns();
final Map<Integer, ParameterContext> params = new HashMap<>(columns.size());
for (int i = 0; i < columns.size(); i++) {
final ParameterContext parameterContext = buildColumnParam(row, i);
params.put(i + 1, parameterContext);
}
batchParams.add(params);
}
return batchParams;
}
/**
* Build column parameter for insert,from the results of select
*
* @param row result set of select
* @param i column index, start from 0
* @return ParameterContext for specified column
*/
public static ParameterContext buildColumnParam(Row row, int i) {
DataType columnType = DataTypes.BinaryType;
Object value = null;
ParameterMethod method = ParameterMethod.setObject1;
try {
columnType = row.getParentCursorMeta().getColumnMeta(i).getDataType();
value = row.getObject(i);
if (value instanceof ZeroDate || value instanceof ZeroTimestamp || value instanceof ZeroTime || value instanceof Decimal) {
// 针对 0000-00-00 的时间类型 setObject 会失败,setString 没问题
value = value.toString();
method = ParameterMethod.setString;
} else if (value instanceof Slice) {
value = ((Slice) value).toStringUtf8();
method = ParameterMethod.setString;
} else if (value instanceof EnumValue) {
value = ((EnumValue) value).value;
method = ParameterMethod.setString;
} else if (value != null) {
if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.DateType, DataTypes.TimestampType,
DataTypes.DatetimeType, DataTypes.TimeType, DataTypes.YearType)) {
// 针对 0000-00-00 01:01:01.12 的时间类型或 0000 的year 类型,
// getObject 返回的结果错误,getBytes 后转为 String 没问题
value = new String(row.getBytes(i));
method = ParameterMethod.setString;
} else if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.BitType, DataTypes.BigBitType)) {
// 使用表示范围更大的类型,规避序列化/反序列化上下界时丢失数据
value = new BigInteger(row.getString(i));
method = ParameterMethod.setBit;
} else if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.FloatType, DataTypes.DoubleType)) {
// 使用表示范围更大的类型,规避序列化/反序列化上下界时丢失数据
value = row.getDouble(i);
method = ParameterMethod.setDouble;
} else if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.ULongType)) {
// BIGINT(64) UNSIGNED
value = row.getString(i);
method = ParameterMethod.setString;
} else if (DataTypeUtil
.anyMatchSemantically(columnType, DataTypes.BinaryType, DataTypes.BlobType,
DataTypes.BinaryStringType)) {
// 使用 setBytes 标记,序列化时使用16进制字符串
value = row.getBytes(i);
method = ParameterMethod.setBytes;
}
}
} catch (TddlNestableRuntimeException e) {
SQLRecorderLogger.ddlLogger.warn("Convert data type failed, use getBytes. message: " + e.getMessage());
// 类似 -01:01:01 的时间类型 getObject 会抛异常,getBytes 没问题
// Ignore exception, use getBytes instead
value = row.getBytes(i);
method = ParameterMethod.setBytes;
}
return new ParameterContext(method, new Object[] {i + 1, value, columnType});
}
/**
* here must keep compatible with the buildColumnParam(Row row, int i)
*/
public static Map<Integer, ParameterContext> buildColumnParam(
List<ColumnMeta> columnMetaList, List<String> values, Charset charset, PropUtil.LOAD_NULL_MODE defaultMode) {
Preconditions.checkArgument(
values.size() == columnMetaList.size(), "The column's length less than the values' length");
final Map<Integer, ParameterContext> parameterContexts = new HashMap<>();
ParameterMethod method = null;
for (int i = 0; i < columnMetaList.size(); i++) {
String stringVal = null;
ColumnMeta meta = columnMetaList.get(i);
if (StringUtils.isEmpty(values.get(i)) &&
(defaultMode == PropUtil.LOAD_NULL_MODE.DEFAULT_VALUE_MODE ||
defaultMode == PropUtil.LOAD_NULL_MODE.DEFAULT_VALUE_AND_N_MODE)) {
if (meta.getField().getDefault() != null) {
stringVal = meta.getField().getDefault();
} else {
if (meta.isNullable()) {
stringVal = null;
} else {
stringVal = values.get(i);
}
}
} else {
if ((PropUtil.LOAD_NULL_MODE.N_MODE == defaultMode ||
defaultMode == PropUtil.LOAD_NULL_MODE.DEFAULT_VALUE_AND_N_MODE)
&& "\\N".equalsIgnoreCase(values.get(i))) {
if (!meta.isNullable()) {
stringVal = values.get(i);
} else {
stringVal = null;
}
} else {
stringVal = values.get(i);
}
}
Object value = null;
try {
DataType columnType = columnMetaList.get(i).getDataType();
if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.DateType, DataTypes.TimestampType,
DataTypes.DatetimeType, DataTypes.TimeType, DataTypes.YearType)) {
// 针对 0000-00-00 01:01:01.12 的时间类型或 0000 的year 类型,
// getObject 返回的结果错误,getBytes 后转为 String 没问题
value = stringVal;
method = ParameterMethod.setString;
} else if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.BitType, DataTypes.BigBitType)
&& stringVal != null) {
// 使用表示范围更大的类型,规避序列化/反序列化上下界时丢失数据
value = new BigInteger(stringVal);
method = ParameterMethod.setBit;
} else if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.FloatType, DataTypes.DoubleType)
&& stringVal != null) {
// 使用表示范围更大的类型,规避序列化/反序列化上下界时丢失数据
value = Double.valueOf(stringVal);
method = ParameterMethod.setDouble;
} else if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.ULongType)) {
// BIGINT(64) UNSIGNED
value = stringVal;
method = ParameterMethod.setString;
} else if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.BinaryType, DataTypes.BlobType,
DataTypes.BinaryStringType) && stringVal != null) {
// 使用 setBytes 标记,序列化时使用16进制字符串
value = stringVal.getBytes(charset);
method = ParameterMethod.setBytes;
} else if (DataTypeUtil.isStringType(columnType)) {
value = stringVal;
method = ParameterMethod.setString;
} else {
value = stringVal;
method = ParameterMethod.setObject1;
}
} catch (TddlNestableRuntimeException e) {
SQLRecorderLogger.ddlLogger.warn(
"Convert backfill data failed, use getBytes. message: " + e.getMessage());
// 类似 -01:01:01 的时间类型 getObject 会抛异常,getBytes 没问题
// Ignore exception, use getBytes instead
value = stringVal.getBytes(charset);
method = ParameterMethod.setBytes;
}
parameterContexts.put(i + 1, new ParameterContext(method, new Object[] {i + 1, value}));
}
return parameterContexts;
}
public static ParameterContext buildParamByType(long index, String method, String value) {
return buildParamByType(index, ParameterMethod.valueOf(method), value);
}
public static ParameterContext buildParamByType(long index, ParameterMethod method, String value) {
return new ParameterContext(method, new Object[] {index, deserializeParam(method, value)});
}
public static String serializeParam(ParameterContext pc) {
final ParameterMethod method = pc.getParameterMethod();
switch (method) {
case setBytes:
return BaseEncoding.base16().encode((byte[]) pc.getArgs()[1]);
default:
return pc.getArgs()[1].toString();
}
}
public static Object deserializeParam(ParameterMethod method, String value) {
switch (method) {
case setInt:
return Integer.valueOf(value);
case setLong:
return Long.valueOf(value);
case setDouble:
return Double.valueOf(value);
case setBytes:
return BaseEncoding.base16().decode(value);
default:
return value;
}
}
}
| 6,986 |
6,497 |
<gh_stars>1000+
package com.sohu.cache.web.enums;
import com.sohu.cache.util.NumberUtil;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* 机器内存使用或分配区间
*/
public enum MachineMemoryDistriEnum {
BETWEEN_0_TO_1_PERCENT("0_1", "0-1%", 2),
BETWEEN_1_TO_10_PERCENT("1_10", "1-10%", 3),
BETWEEN_10_TO_25_PERCENT("10_25", "10-25%", 4),
BETWEEN_25_TO_50_PERCENT("25_50", "25-50%", 5),
BETWEEN_50_TO_75_PERCENT("50_75", "50-75%", 6),
BETWEEN_75_TO_100_PERCENT("75_90", "75-90%", 7),
BETWEEN_90_TO_100_PERCENT("90_100", "90-100%", 8),
BETWEEN_100_TO_1000_PERCENT("100_1000", "100%以上", 9);
public final static Map<String, MachineMemoryDistriEnum> MAP;
static {
Map<String, MachineMemoryDistriEnum> tmpMap = new HashMap<>();
for (MachineMemoryDistriEnum enumObject : MachineMemoryDistriEnum.values()) {
tmpMap.put(enumObject.getValue(), enumObject);
}
MAP = Collections.unmodifiableMap(tmpMap);
}
private String value;
private String info;
private int type;
private MachineMemoryDistriEnum(String value, String info, int type) {
this.value = value;
this.info = info;
this.type = type;
}
public static MachineMemoryDistriEnum getByValue(String targetValue) {
return MAP.get(targetValue);
}
/**
* @param percent
* @return
*/
public static MachineMemoryDistriEnum getRightPercentDistri(int percent) {
MachineMemoryDistriEnum[] enumArr = MachineMemoryDistriEnum.values();
for (MachineMemoryDistriEnum enumObject : enumArr) {
if (isInSize(enumObject, percent)) {
return enumObject;
}
}
return null;
}
/**
* @param enumObject
* @return
*/
private static boolean isInSize(MachineMemoryDistriEnum enumObject, long costTime) {
String value = enumObject.getValue();
int index = value.indexOf("_");
int start = NumberUtil.toInt(value.substring(0, index));
int end = NumberUtil.toInt(value.substring(index + 1));
if (costTime >= start && costTime < end) {
return true;
}
return false;
}
public String getValue() {
return value;
}
public String getInfo() {
return info;
}
public int getType() {
return type;
}
}
| 1,084 |
360 |
/* -------------------------------------------------------------------------
*
* EUC_JIS_2004 <--> UTF8
*
* Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/utils/mb/conversion_procs/utf8_and_euc2004/utf8_and_euc2004.c
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "knl/knl_variable.h"
#include "fmgr.h"
#include "mb/pg_wchar.h"
#include "../../Unicode/euc_jis_2004_to_utf8.map"
#include "../../Unicode/utf8_to_euc_jis_2004.map"
#include "../../Unicode/euc_jis_2004_to_utf8_combined.map"
#include "../../Unicode/utf8_to_euc_jis_2004_combined.map"
PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(euc_jis_2004_to_utf8);
PG_FUNCTION_INFO_V1(utf8_to_euc_jis_2004);
extern "C" Datum euc_jis_2004_to_utf8(PG_FUNCTION_ARGS);
extern "C" Datum utf8_to_euc_jis_2004(PG_FUNCTION_ARGS);
/* ----------
* conv_proc(
* INTEGER, -- source encoding id
* INTEGER, -- destination encoding id
* CSTRING, -- source string (null terminated C string)
* CSTRING, -- destination string (null terminated C string)
* INTEGER -- source string length
* ) returns VOID;
* ----------
*/
Datum euc_jis_2004_to_utf8(PG_FUNCTION_ARGS)
{
unsigned char* src = (unsigned char*)PG_GETARG_CSTRING(2);
unsigned char* dest = (unsigned char*)PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
CHECK_ENCODING_CONVERSION_ARGS(PG_EUC_JIS_2004, PG_UTF8);
LocalToUtf(src,
dest,
LUmapEUC_JIS_2004,
LUmapEUC_JIS_2004_combined,
sizeof(LUmapEUC_JIS_2004) / sizeof(pg_local_to_utf),
sizeof(LUmapEUC_JIS_2004_combined) / sizeof(pg_local_to_utf_combined),
PG_EUC_JIS_2004,
len);
PG_RETURN_VOID();
}
Datum utf8_to_euc_jis_2004(PG_FUNCTION_ARGS)
{
unsigned char* src = (unsigned char*)PG_GETARG_CSTRING(2);
unsigned char* dest = (unsigned char*)PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
CHECK_ENCODING_CONVERSION_ARGS(PG_UTF8, PG_EUC_JIS_2004);
UtfToLocal(src,
dest,
ULmapEUC_JIS_2004,
ULmapEUC_JIS_2004_combined,
sizeof(ULmapEUC_JIS_2004) / sizeof(pg_utf_to_local),
sizeof(ULmapEUC_JIS_2004_combined) / sizeof(pg_utf_to_local_combined),
PG_EUC_JIS_2004,
len);
PG_RETURN_VOID();
}
| 1,081 |
603 |
# -*- coding: utf-8 -*-
"""API module v1."""
# from .views import blueprint
| 29 |
2,001 |
package deadlock;
public class ThreadLockFixedDemo {
/* Both method are now requesting lock in same order,
* first Integer and then String.
* This will solve the problem,
* as long as both method are requesting lock in consistent order.
* */
public void method1() {
synchronized (Integer.class) {
System.out.println("Acquired lock on Integer.class object");
}
synchronized (String.class) {
System.out.println("Acquired lock on String.class object");
}
}
public void method2() {
synchronized (Integer.class) {
System.out.println("Acquired lock on Integer.class object");
}
synchronized (String.class) {
System.out.println("Acquired lock on String.class object");
}
}
}
| 318 |
490 |
import torch
import torch.nn as nn
class SimulTransOracle(nn.Module):
"""
Forward-backward based controller
"""
def __init__(self, penalty):
super().__init__()
self.penalty = penalty
def forward(self, scores):
"""
Inputs: Scores : log p(y_t | x<j) : B, Tt, Ts
"""
B, Tt, Ts = scores.size()
# Take negative-likelihood:
scores = - scores
# Estime the best decoding path i.e context sizes
# Forwad pass costs
fs = scores.new_zeros(B, Tt+1, Ts)
# First column:
fs[:, 1:, 0] = torch.cumsum(scores[..., 0], dim=-1)
# First row:
fs[:, 0] = self.penalty * torch.arange(1, Ts+1).cumsum(dim=-1).type_as(fs).unsqueeze(0).repeat(B, 1) / Ts
for t in range(1, Tt+1):
for j in range(1, Ts):
ifwrite = fs[:, t-1, j] + scores[:, t-1, j] # Write (t-1, j) -> (t, j)
ifread = fs[:, t, j-1] + self.penalty * (j+1)/ Ts # (t, j-1) -> (t,j)
fs[:, t, j] = torch.min(
ifwrite, ifread
)
bs = scores.new_zeros(B, Tt+1, Ts)
# Last column:
bs[:, :-1, -1] = torch.cumsum(scores[...,0], dim=-1).flip(-1)
# Last row:
bs[:, -1] = self.penalty * torch.arange(1, Ts+1).type_as(bs).unsqueeze(0).repeat(B, 1).flip(-1) / Ts
for t in range(Tt-1, -1, -1):
for j in range(Ts-2, -1, -1):
ifwrite = bs[:, t+1, j] + scores[:, t,j] # Write (t,j) -> (t+1, j)
ifread = bs[:, t, j+1] + self.penalty * (j+1)/Ts # Read (t,j) -> (t, j+1)
bs[:, t,j] = torch.min(
ifwrite, ifread
)
# accumulate the scores
cs = fs + bs
best_context = []
for b in range(B):
t = 0
j = 0
best = []
while t < Tt and j < Ts-1:
if cs[b, t+1, j] < cs[b, t, j+1]: # write
best.append(j)
t += 1
else: # read
j += 1
while len(best) < Tt:
best.append(Ts-1)
best_context.append(best)
best_context = torch.stack([torch.Tensor(ctx) for ctx in best_context], dim=0).to(scores.device).long()
return best_context
| 1,363 |
929 |
<filename>automation/scripts/testnet-validation/logs.py
from google.cloud import logging as glogging
from kubernetes import client, config
import logging
from datetime import datetime, timedelta
import sys
import time
def fetch_logs(namespace="default", hours_ago=1, log_filter="", max_entries=1000):
# Python Logging Config
logger = logging.getLogger()
#Stackdriver Client
stackdriver_client = glogging.Client()
# Create some filter expressions
earliest_timestamp = datetime.now() - timedelta(hours=int(hours_ago))
earliest_timestamp_formatted = earliest_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
FILTER_COMMON = """
timestamp >= "{}"
resource.type="k8s_container"
resource.labels.namespace_name="{}"
""".format(earliest_timestamp_formatted, namespace)
logger.debug("Common Filter: \n{}".format(FILTER_COMMON))
logger.info("Checking Logs for {} -- Past {} Hours".format(namespace, hours_ago))
logger.info(f"Fetching {max_entries} Log Entries")
# Query for rebroadcasted blocks and insert them into the trie
logger.info("Filter: \n{}".format(log_filter))
log_iterator = stackdriver_client.list_entries(filter_=FILTER_COMMON + log_filter)
logs = []
for index, log in enumerate(log_iterator):
logger.debug(log)
logs.append(log)
if (index+1) % max_entries == 0:
break
if (index+1) % 100 == 0:
logger.debug(f"Fetched {index+1} logs")
# Google API Rate-Limit
time.sleep(.04)
logger.debug(f"{len(logs)} logs retrieved")
return logs
| 532 |
14,668 |
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/webui/help_app_ui/help_app_manager_factory.h"
#include "ash/webui/help_app_ui/help_app_manager.h"
#include "chromeos/components/local_search_service/public/cpp/local_search_service_proxy_factory.h"
#include "components/keyed_service/content/browser_context_dependency_manager.h"
namespace ash {
namespace help_app {
// static
HelpAppManager* HelpAppManagerFactory::GetForBrowserContext(
content::BrowserContext* context) {
return static_cast<HelpAppManager*>(
HelpAppManagerFactory::GetInstance()->GetServiceForBrowserContext(
context, /*create=*/true));
}
// static
HelpAppManagerFactory* HelpAppManagerFactory::GetInstance() {
return base::Singleton<HelpAppManagerFactory>::get();
}
HelpAppManagerFactory::HelpAppManagerFactory()
: BrowserContextKeyedServiceFactory(
"HelpAppManager",
BrowserContextDependencyManager::GetInstance()) {
DependsOn(
local_search_service::LocalSearchServiceProxyFactory::GetInstance());
}
HelpAppManagerFactory::~HelpAppManagerFactory() = default;
content::BrowserContext* HelpAppManagerFactory::GetBrowserContextToUse(
content::BrowserContext* context) const {
// The service should exist in incognito mode.
return context;
}
KeyedService* HelpAppManagerFactory::BuildServiceInstanceFor(
content::BrowserContext* context) const {
return new HelpAppManager(
local_search_service::LocalSearchServiceProxyFactory::
GetForBrowserContext(context));
}
bool HelpAppManagerFactory::ServiceIsNULLWhileTesting() const {
return true;
}
} // namespace help_app
} // namespace ash
| 551 |
8,657 |
<reponame>sharksforarms/h2o
#include "h2o.h"
#include "h2o/configurator.h"
struct headers_util_add_arg_t {
yoml_t *node;
h2o_iovec_t *name;
h2o_iovec_t value;
};
struct headers_util_configurator_t {
h2o_configurator_t super;
h2o_configurator_t *child;
h2o_configurator_get_headers_commands_cb get_commands;
};
static int extract_name(const char *src, size_t len, h2o_iovec_t **_name)
{
h2o_iovec_t name;
const h2o_token_t *name_token;
name = h2o_str_stripws(src, len);
if (name.len == 0)
return -1;
name = h2o_strdup(NULL, name.base, name.len);
h2o_strtolower(name.base, name.len);
if ((name_token = h2o_lookup_token(name.base, name.len)) != NULL) {
*_name = (h2o_iovec_t *)&name_token->buf;
free(name.base);
} else {
*_name = h2o_mem_alloc(sizeof(**_name));
**_name = name;
}
return 0;
}
static int extract_name_value(const char *src, h2o_iovec_t **name, h2o_iovec_t *value)
{
const char *colon = strchr(src, ':');
if (colon == NULL)
return -1;
if (extract_name(src, colon - src, name) != 0)
return -1;
*value = h2o_str_stripws(colon + 1, strlen(colon + 1));
*value = h2o_strdup(NULL, value->base, value->len);
return 0;
}
static int is_list_cmd(int cmd_id)
{
return cmd_id == H2O_HEADERS_CMD_UNSET || cmd_id == H2O_HEADERS_CMD_UNSETUNLESS || cmd_id == H2O_HEADERS_CMD_COOKIE_UNSET ||
cmd_id == H2O_HEADERS_CMD_COOKIE_UNSETUNLESS;
}
static int add_cmd(h2o_configurator_command_t *cmd, int cmd_id, struct headers_util_add_arg_t *args, size_t num_args,
h2o_headers_command_when_t when, h2o_headers_command_t **cmds)
{
for (size_t i = 0; i < num_args; i++) {
if (h2o_iovec_is_token(args[i].name)) {
const h2o_token_t *token = (void *)args[i].name;
if (h2o_headers_is_prohibited_name(token)) {
h2o_configurator_errprintf(cmd, args[i].node, "the named header cannot be rewritten");
return -1;
}
}
if (!is_list_cmd(cmd_id))
h2o_headers_append_command(cmds, cmd_id, &(h2o_headers_command_arg_t){args[i].name, args[i].value}, 1, when);
}
if (is_list_cmd(cmd_id)) {
h2o_headers_command_arg_t cmdargs[num_args];
for (size_t i = 0; i < num_args; ++i)
cmdargs[i] = (h2o_headers_command_arg_t){args[i].name, args[i].value};
h2o_headers_append_command(cmds, cmd_id, cmdargs, num_args, when);
}
return 0;
}
static int parse_header_node(h2o_configurator_command_t *cmd, yoml_t **node, yoml_t ***headers, size_t *num_headers,
h2o_headers_command_when_t *when)
{
if ((*node)->type == YOML_TYPE_SCALAR) {
*headers = node;
*num_headers = 1;
*when = H2O_HEADERS_CMD_WHEN_FINAL;
} else if ((*node)->type == YOML_TYPE_SEQUENCE) {
*headers = (*node)->data.sequence.elements;
*num_headers = (*node)->data.sequence.size;
*when = H2O_HEADERS_CMD_WHEN_FINAL;
} else {
yoml_t **header_node;
yoml_t **when_node = NULL;
if (h2o_configurator_parse_mapping(cmd, *node, "header:sa", "when:*", &header_node, &when_node) != 0)
return -1;
if ((*header_node)->type == YOML_TYPE_SEQUENCE) {
*headers = (*header_node)->data.sequence.elements;
*num_headers = (*header_node)->data.sequence.size;
} else {
*headers = header_node;
*num_headers = 1;
}
if (when_node == NULL) {
*when = H2O_HEADERS_CMD_WHEN_FINAL;
} else {
switch (h2o_configurator_get_one_of(cmd, *when_node, "final,early,all")) {
case 0:
*when = H2O_HEADERS_CMD_WHEN_FINAL;
break;
case 1:
*when = H2O_HEADERS_CMD_WHEN_EARLY;
break;
case 2:
*when = H2O_HEADERS_CMD_WHEN_ALL;
break;
default:
return -1;
}
}
}
return 0;
}
static int on_config_header_2arg(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, int cmd_id, yoml_t *node,
h2o_headers_command_t **headers_cmds)
{
yoml_t **headers;
size_t num_headers;
h2o_headers_command_when_t when;
if (parse_header_node(cmd, &node, &headers, &num_headers, &when) != 0)
return -1;
struct headers_util_add_arg_t args[num_headers];
int i;
for (i = 0; i != num_headers; ++i) {
args[i].node = headers[i];
if (extract_name_value(args[i].node->data.scalar, &args[i].name, &args[i].value) != 0) {
h2o_configurator_errprintf(cmd, args[i].node, "failed to parse the value; should be in form of `name: value`");
return -1;
}
}
if (add_cmd(cmd, cmd_id, args, num_headers, when, headers_cmds) != 0) {
for (i = 0; i != num_headers; i++) {
if (!h2o_iovec_is_token(args[i].name))
free(args[i].name->base);
free(args[i].value.base);
}
return -1;
}
return 0;
}
static int on_config_unset_core(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node, int cmd_id)
{
yoml_t **headers;
size_t num_headers;
h2o_headers_command_when_t when;
struct headers_util_configurator_t *self = (void *)cmd->configurator;
if (parse_header_node(cmd, &node, &headers, &num_headers, &when) != 0)
return -1;
struct headers_util_add_arg_t args[num_headers];
int i;
for (i = 0; i != num_headers; ++i) {
args[i].node = headers[i];
if (cmd_id == H2O_HEADERS_CMD_UNSET || cmd_id == H2O_HEADERS_CMD_UNSETUNLESS) {
if (extract_name(args[i].node->data.scalar, strlen(args[i].node->data.scalar), &args[i].name) != 0) {
h2o_configurator_errprintf(cmd, args[i].node, "invalid header name");
return -1;
}
} else {
h2o_iovec_t tmp;
tmp = h2o_str_stripws(args[i].node->data.scalar, strlen(args[i].node->data.scalar));
if (tmp.len == 0) {
h2o_configurator_errprintf(cmd, args[i].node, "invalid header name");
return -1;
}
args[i].name = h2o_mem_alloc(sizeof(*args[0].name));
*args[i].name = h2o_strdup(NULL, tmp.base, tmp.len);
}
}
if (add_cmd(cmd, cmd_id, args, num_headers, when, self->get_commands(self->child)) != 0) {
for (i = 0; i != num_headers; i++) {
if (!h2o_iovec_is_token(args[i].name))
free(args[i].name->base);
}
return -1;
}
return 0;
}
static int on_config_header_unset(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
return on_config_unset_core(cmd, ctx, node, H2O_HEADERS_CMD_UNSET);
}
static int on_config_header_unsetunless(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
return on_config_unset_core(cmd, ctx, node, H2O_HEADERS_CMD_UNSETUNLESS);
}
static int on_config_cookie_unset(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
return on_config_unset_core(cmd, ctx, node, H2O_HEADERS_CMD_COOKIE_UNSET);
}
static int on_config_cookie_unsetunless(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
return on_config_unset_core(cmd, ctx, node, H2O_HEADERS_CMD_COOKIE_UNSETUNLESS);
}
#define DEFINE_2ARG(fn, cmd_id) \
static int fn(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node) \
{ \
struct headers_util_configurator_t *self = (void *)cmd->configurator; \
return on_config_header_2arg(cmd, ctx, cmd_id, node, self->get_commands(self->child)); \
}
DEFINE_2ARG(on_config_header_add, H2O_HEADERS_CMD_ADD)
DEFINE_2ARG(on_config_header_append, H2O_HEADERS_CMD_APPEND)
DEFINE_2ARG(on_config_header_merge, H2O_HEADERS_CMD_MERGE)
DEFINE_2ARG(on_config_header_set, H2O_HEADERS_CMD_SET)
DEFINE_2ARG(on_config_header_setifempty, H2O_HEADERS_CMD_SETIFEMPTY)
#undef DEFINE_2ARG
void h2o_configurator_define_headers_commands(h2o_globalconf_t *global_conf, h2o_configurator_t *conf, const char *prefix,
h2o_configurator_get_headers_commands_cb get_commands)
{
struct headers_util_configurator_t *c = (void *)h2o_configurator_create(global_conf, sizeof(*c));
c->child = conf;
c->get_commands = get_commands;
size_t prefix_len = strlen(prefix);
#define DEFINE_CMD_NAME(name, suffix) \
char *name = h2o_mem_alloc(prefix_len + sizeof(suffix)); \
memcpy(name, prefix, prefix_len); \
memcpy(name + prefix_len, suffix, sizeof(suffix))
DEFINE_CMD_NAME(add_directive, ".add");
DEFINE_CMD_NAME(append_directive, ".append");
DEFINE_CMD_NAME(merge_directive, ".merge");
DEFINE_CMD_NAME(set_directive, ".set");
DEFINE_CMD_NAME(setifempty_directive, ".setifempty");
DEFINE_CMD_NAME(unset_directive, ".unset");
DEFINE_CMD_NAME(unsetunless_directive, ".unsetunless");
DEFINE_CMD_NAME(cookie_unset_directive, ".cookie.unset");
DEFINE_CMD_NAME(cookie_unsetunless_directive, ".cookie.unsetunless");
#undef DEFINE_CMD_NAME
#define DEFINE_CMD(name, cb) \
h2o_configurator_define_command(&c->super, name, \
H2O_CONFIGURATOR_FLAG_ALL_LEVELS | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR | \
H2O_CONFIGURATOR_FLAG_EXPECT_SEQUENCE | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING, \
cb)
DEFINE_CMD(add_directive, on_config_header_add);
DEFINE_CMD(append_directive, on_config_header_append);
DEFINE_CMD(merge_directive, on_config_header_merge);
DEFINE_CMD(set_directive, on_config_header_set);
DEFINE_CMD(setifempty_directive, on_config_header_setifempty);
DEFINE_CMD(unset_directive, on_config_header_unset);
DEFINE_CMD(unsetunless_directive, on_config_header_unsetunless);
DEFINE_CMD(cookie_unset_directive, on_config_cookie_unset);
DEFINE_CMD(cookie_unsetunless_directive, on_config_cookie_unsetunless);
#undef DEFINE_CMD
}
| 6,009 |
1,671 |
/*
* Copyright 2016 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.store;
import com.github.ambry.config.StoreConfig;
import com.github.ambry.utils.CrcInputStream;
import com.github.ambry.utils.CrcOutputStream;
import com.github.ambry.utils.Time;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Represents a record of the compaction process and helps in recovery in case of crashes.
*/
class CompactionLog implements Closeable {
static final short VERSION_0 = 0;
static final short VERSION_1 = 1;
static final short CURRENT_VERSION = VERSION_1;
static final String COMPACTION_LOG_SUFFIX = "_compactionLog";
private static final byte[] ZERO_LENGTH_ARRAY = new byte[0];
private static final long UNINITIALIZED_TIMESTAMP = -1;
private static final Logger logger = LoggerFactory.getLogger(CompactionLog.class);
/**
* The {@link Phase} of the current compaction cycle.
*/
enum Phase {
PREPARE, COPY, COMMIT, CLEANUP, DONE
}
final Long startTime;
final List<CycleLog> cycleLogs;
private final File file;
private final Time time;
private int currentIdx = 0;
private Offset startOffsetOfLastIndexSegmentForDeleteCheck = null;
/**
* Used to determine whether compaction is in progress.
* @param dir the directory where the compaction log is expected to exist (if any).
* @param storeId the ID of the store under compaction.
* @return whether compaction is in progress for the store.
*/
static boolean isCompactionInProgress(String dir, String storeId) {
return new File(dir, storeId + COMPACTION_LOG_SUFFIX).exists();
}
/**
* Creates a new compaction log.
* @param dir the directory at which the compaction log must be created.
* @param storeId the ID of the store.
* @param time the {@link Time} instance to use.
* @param compactionDetails the details about the compaction.
* @param config the store config to use in this compaction log.
*/
CompactionLog(String dir, String storeId, Time time, CompactionDetails compactionDetails, StoreConfig config)
throws IOException {
this.time = time;
file = new File(dir, storeId + COMPACTION_LOG_SUFFIX);
if (!file.createNewFile()) {
throw new IllegalArgumentException(file.getAbsolutePath() + " already exists");
}
startTime = time.milliseconds();
cycleLogs = new ArrayList<>();
cycleLogs.add(new CycleLog(compactionDetails));
flush();
if (config.storeSetFilePermissionEnabled) {
Files.setPosixFilePermissions(file.toPath(), config.storeOperationFilePermission);
}
logger.trace("Created compaction log: {}", file);
}
/**
* Loads an existing compaction log.
* @param dir the directory at which the log exists.
* @param storeId the ID of the store.
* @param storeKeyFactory the {@link StoreKeyFactory} that is used for keys in the {@link BlobStore} being compacted.
* @param time the {@link Time} instance to use.
* @param config the store config to use in this compaction log.
*/
CompactionLog(String dir, String storeId, StoreKeyFactory storeKeyFactory, Time time, StoreConfig config)
throws IOException {
this(new File(dir, storeId + COMPACTION_LOG_SUFFIX), storeKeyFactory, time, config);
}
/**
* Loads an existing compaction log from {@link File}.
* @param compactionLogFile The compaction log {@link File}.
* @param storeKeyFactory The {@link StoreKeyFactory} that is used for keys in the {@link BlobStore} being compacted.
* @param time The {@link Time} instance to use.
* @param config The store config to use in this compaction log.
* @throws IOException
*/
CompactionLog(File compactionLogFile, StoreKeyFactory storeKeyFactory, Time time, StoreConfig config)
throws IOException {
if (!compactionLogFile.exists()) {
throw new IllegalArgumentException(compactionLogFile.getAbsolutePath() + " does not exist");
}
this.file = compactionLogFile;
this.time = time;
try (FileInputStream fileInputStream = new FileInputStream(file)) {
CrcInputStream crcInputStream = new CrcInputStream(fileInputStream);
DataInputStream stream = new DataInputStream(crcInputStream);
short version = stream.readShort();
switch (version) {
case VERSION_0:
startTime = stream.readLong();
currentIdx = stream.readInt();
int cycleLogsSize = stream.readInt();
cycleLogs = new ArrayList<>(cycleLogsSize);
while (cycleLogs.size() < cycleLogsSize) {
cycleLogs.add(CycleLog.fromBytes(stream, storeKeyFactory));
}
long crc = crcInputStream.getValue();
if (crc != stream.readLong()) {
throw new IllegalStateException("CRC of data read does not match CRC in file");
}
break;
case VERSION_1:
startTime = stream.readLong();
if (stream.readByte() == (byte) 1) {
startOffsetOfLastIndexSegmentForDeleteCheck = Offset.fromBytes(stream);
}
currentIdx = stream.readInt();
cycleLogsSize = stream.readInt();
cycleLogs = new ArrayList<>(cycleLogsSize);
while (cycleLogs.size() < cycleLogsSize) {
cycleLogs.add(CycleLog.fromBytes(stream, storeKeyFactory));
}
crc = crcInputStream.getValue();
if (crc != stream.readLong()) {
throw new IllegalStateException("CRC of data read does not match CRC in file");
}
break;
default:
throw new IllegalArgumentException("Unrecognized version");
}
if (config.storeSetFilePermissionEnabled) {
Files.setPosixFilePermissions(file.toPath(), config.storeOperationFilePermission);
}
logger.trace("Loaded compaction log: {}", file);
}
}
/**
* @return the current phase of compaction.
*/
Phase getCompactionPhase() {
return currentIdx >= cycleLogs.size() ? Phase.DONE : getCurrentCycleLog().getPhase();
}
/**
* @return the index of the {@link CompactionDetails} being provided.
*/
int getCurrentIdx() {
return currentIdx < cycleLogs.size() ? currentIdx : -1;
}
/**
* @return the {@link CompactionDetails} for the compaction cycle in progress.
*/
CompactionDetails getCompactionDetails() {
return getCurrentCycleLog().compactionDetails;
}
/**
* @return the {@link StoreFindToken} until which data has been copied and flushed. Returns {@code null} if nothing
* has been set yet.
*/
StoreFindToken getSafeToken() {
return getCurrentCycleLog().safeToken;
}
/**
* Sets the {@link StoreFindToken} until which data is copied and flushed.
* @param safeToken the {@link StoreFindToken} until which data is copied and flushed.
*/
void setSafeToken(StoreFindToken safeToken) {
CycleLog cycleLog = getCurrentCycleLog();
if (!cycleLog.getPhase().equals(Phase.COPY)) {
throw new IllegalStateException("Cannot set a safe token - not in COPY phase");
}
cycleLog.safeToken = safeToken;
flush();
logger.trace("{}: Set safe token to {} during compaction of {}", file, cycleLog.safeToken,
cycleLog.compactionDetails);
}
/**
* @return the start {@link Offset} of the last index segment for checking deletes. This is initially {@code null} and
* has to be set.
*/
Offset getStartOffsetOfLastIndexSegmentForDeleteCheck() {
return startOffsetOfLastIndexSegmentForDeleteCheck;
}
/**
* Sets the start {@link Offset} of the last index segment for checking deletes.
* @param startOffsetOfLastIndexSegmentForDeleteCheck he start {@link Offset} of the last index segment for checking
* deletes.
*/
void setStartOffsetOfLastIndexSegmentForDeleteCheck(Offset startOffsetOfLastIndexSegmentForDeleteCheck) {
this.startOffsetOfLastIndexSegmentForDeleteCheck = startOffsetOfLastIndexSegmentForDeleteCheck;
flush();
logger.trace("{}: Set startOffsetOfLastIndexSegmentForDeleteCheck to {}", file,
this.startOffsetOfLastIndexSegmentForDeleteCheck);
}
/**
* Marks the start of the copy phase.
*/
void markCopyStart() {
CycleLog cycleLog = getCurrentCycleLog();
if (!cycleLog.getPhase().equals(Phase.PREPARE)) {
throw new IllegalStateException("Should be in PREPARE phase to transition to COPY phase");
}
cycleLog.copyStartTime = time.milliseconds();
flush();
logger.trace("{}: Marked copy as started for {}", file, cycleLog.compactionDetails);
}
/**
* Splits the current cycle at {@code nextCycleStartSegment}. This means that a new next cycle will be created that
* starts at {@code nextCycleStartSegment} and ends at the end segment of the current cycle and the new current cycle
* starts at the first segment in the current cycle and ends at the segment just before {@code nextCycleStartSegment}.
* For e.g if the current cycle is 0_1.log,0_2.log,0_3.log,0_4.log,0_5.log and {@code nextCycleStartSegment} is
* 0_4.log, the new next cycle will be 0_4.log,0_5.log and the new current cycle will be 0_1.log,0_2.log,0_3.log.
* {@link CompactionLog}
* @param nextCycleStartSegment the segment to split the current cycle at.
*/
void splitCurrentCycle(LogSegmentName nextCycleStartSegment) {
CompactionDetails currentDetails = getCurrentCycleLog().compactionDetails;
List<LogSegmentName> updatedList = new ArrayList<>();
List<LogSegmentName> newList = new ArrayList<>();
boolean encounteredSplitPoint = false;
for (LogSegmentName segmentUnderCompaction : currentDetails.getLogSegmentsUnderCompaction()) {
if (!encounteredSplitPoint && !segmentUnderCompaction.equals(nextCycleStartSegment)) {
updatedList.add(segmentUnderCompaction);
} else {
encounteredSplitPoint = true;
newList.add(segmentUnderCompaction);
}
}
getCurrentCycleLog().compactionDetails = new CompactionDetails(currentDetails.getReferenceTimeMs(), updatedList,
null);
cycleLogs.add(new CycleLog(new CompactionDetails(currentDetails.getReferenceTimeMs(), newList, null)));
flush();
logger.trace("{}: Split current cycle into two lists: {} and {}", file, updatedList, newList);
}
/**
* Marks the start of the commit phase.
*/
void markCommitStart() {
CycleLog cycleLog = getCurrentCycleLog();
if (!cycleLog.getPhase().equals(Phase.COPY)) {
throw new IllegalStateException("Should be in COPY phase to transition to SWITCH phase");
}
cycleLog.commitStartTime = time.milliseconds();
flush();
logger.trace("{}: Marked commit as started for {}", file, cycleLog.compactionDetails);
}
/**
* Marks the start of the cleanup phase.
*/
void markCleanupStart() {
CycleLog cycleLog = getCurrentCycleLog();
if (!cycleLog.getPhase().equals(Phase.COMMIT)) {
throw new IllegalStateException("Should be in SWITCH phase to transition to CLEANUP phase");
}
cycleLog.cleanupStartTime = time.milliseconds();
flush();
logger.trace("{}: Marked cleanup as started for {}", file, cycleLog.compactionDetails);
}
/**
* Marks the current compaction cycle as complete.
*/
void markCycleComplete() {
CycleLog cycleLog = getCurrentCycleLog();
if (!cycleLog.getPhase().equals(Phase.CLEANUP)) {
throw new IllegalStateException("Should be in CLEANUP phase to complete cycle");
}
cycleLog.cycleEndTime = time.milliseconds();
currentIdx++;
flush();
logger.trace("{}: Marked cycle as complete for {}", file, cycleLog.compactionDetails);
}
/**
* Closes the compaction log. If compaction is complete, renames the log to keep a permanent record.
*/
@Override
public void close() {
if (file.exists() && getCompactionPhase().equals(Phase.DONE)) {
String dateString = new Date(startTime).toString();
File savedLog =
new File(file.getAbsolutePath() + BlobStore.SEPARATOR + startTime + BlobStore.SEPARATOR + dateString);
if (!file.renameTo(savedLog)) {
throw new IllegalStateException("Compaction log could not be renamed after completion of compaction");
}
}
}
/**
* @return the {@link CycleLog} for the current compaction cycle.
*/
private CycleLog getCurrentCycleLog() {
if (currentIdx >= cycleLogs.size()) {
throw new IllegalStateException("Operation not possible because there are no more compaction cycles left");
}
return cycleLogs.get(currentIdx);
}
/**
* Flushes all changes to the file backing this compaction log.
*/
private void flush() {
/*
Description of serialized format
Version 0:
version
startTime
index of current cycle's log
size of cycle log list
cycleLog1 (see CycleLog#toBytes())
cycleLog2
...
crc
Version 1:
version
startTime
byte to indicate whether startOffsetOfLastIndexSegmentForDeleteCheck is present (1) or not (0)
startOffsetOfLastIndexSegmentForDeleteCheck if not null
index of current cycle's log
size of cycle log list
cycleLog1 (see CycleLog#toBytes())
cycleLog2
...
crc
*/
File tempFile = new File(file.getAbsolutePath() + ".tmp");
try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) {
CrcOutputStream crcOutputStream = new CrcOutputStream(fileOutputStream);
DataOutputStream stream = new DataOutputStream(crcOutputStream);
stream.writeShort(CURRENT_VERSION);
stream.writeLong(startTime);
if (startOffsetOfLastIndexSegmentForDeleteCheck == null) {
stream.writeByte(0);
} else {
stream.writeByte(1);
stream.write(startOffsetOfLastIndexSegmentForDeleteCheck.toBytes());
}
stream.writeInt(currentIdx);
stream.writeInt(cycleLogs.size());
for (CycleLog cycleLog : cycleLogs) {
stream.write(cycleLog.toBytes());
}
stream.writeLong(crcOutputStream.getValue());
fileOutputStream.getChannel().force(true);
} catch (IOException e) {
throw new IllegalStateException(e);
}
if (!tempFile.renameTo(file)) {
throw new IllegalStateException("Newly written compaction log could not be saved");
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Version:")
.append(CURRENT_VERSION)
.append("\n")
.append("StartTime:")
.append(startTime)
.append("\n")
.append("StartOffsetOfLastIndexSegmentForDeleteCheck");
if (startOffsetOfLastIndexSegmentForDeleteCheck == null) {
sb.append("null");
} else {
sb.append(startOffsetOfLastIndexSegmentForDeleteCheck.toString());
}
sb.append("\n");
sb.append("CurrentIndex:")
.append(currentIdx)
.append("\n")
.append("CycleLogSize:")
.append(cycleLogs.size())
.append("\n")
.append("CycleLogs:");
for (CycleLog cycleLog : cycleLogs) {
sb.append(cycleLog.toString()).append("\n\n");
}
return sb.toString();
}
/**
* Details and log for a single compaction cycle.
*/
static class CycleLog {
private static final int TIMESTAMP_SIZE = 8;
private static final int STORE_TOKEN_PRESENT_FLAG_SIZE = 1;
private static final byte STORE_TOKEN_PRESENT = 1;
private static final byte STORE_TOKEN_ABSENT = 0;
// details about the cycle
CompactionDetails compactionDetails;
// start time of the copy phase
long copyStartTime = UNINITIALIZED_TIMESTAMP;
// start time of the commit phase
long commitStartTime = UNINITIALIZED_TIMESTAMP;
// start time of the cleanup phase
long cleanupStartTime = UNINITIALIZED_TIMESTAMP;
// end time of the current cycle
long cycleEndTime = UNINITIALIZED_TIMESTAMP;
// point until which copying is complete
StoreFindToken safeToken = null;
/**
* Create a log for a single cycle of compaction.
* @param compactionDetails the details for the compaction cycle.
*/
CycleLog(CompactionDetails compactionDetails) {
this.compactionDetails = compactionDetails;
}
/**
* Create a log for a compaction cycle from a {@code stream}.
* @param stream the {@link DataInputStream} that represents the serialized object.
* @param storeKeyFactory the {@link StoreKeyFactory} used to generate the {@link StoreFindToken}.
* @return a {@link CycleLog} that represents a cycle.
* @throws IOException if there is an I/O error while reading the stream.
*/
static CycleLog fromBytes(DataInputStream stream, StoreKeyFactory storeKeyFactory) throws IOException {
CompactionDetails compactionDetails = CompactionDetails.fromBytes(stream);
CycleLog cycleLog = new CycleLog(compactionDetails);
cycleLog.copyStartTime = stream.readLong();
cycleLog.commitStartTime = stream.readLong();
cycleLog.cleanupStartTime = stream.readLong();
cycleLog.cycleEndTime = stream.readLong();
cycleLog.safeToken =
stream.readByte() == STORE_TOKEN_PRESENT ? StoreFindToken.fromBytes(stream, storeKeyFactory) : null;
return cycleLog;
}
/**
* @return the current phase of this cycle of compaction.
*/
Phase getPhase() {
Phase phase;
if (copyStartTime == UNINITIALIZED_TIMESTAMP) {
phase = Phase.PREPARE;
} else if (commitStartTime == UNINITIALIZED_TIMESTAMP) {
phase = Phase.COPY;
} else if (cleanupStartTime == UNINITIALIZED_TIMESTAMP) {
phase = Phase.COMMIT;
} else if (cycleEndTime == UNINITIALIZED_TIMESTAMP) {
phase = Phase.CLEANUP;
} else {
phase = Phase.DONE;
}
return phase;
}
/**
* @return serialized version of the {@link CycleLog}.
*/
byte[] toBytes() {
/*
Description of serialized format
compactionDetails (see CompactionDetails#toBytes())
copyStartTime
commitStartTime
cleanupStartTime
cycleEndTime
storeTokenPresent flag
safeToken if not null (see StoreFindToken#toBytes())
*/
byte[] compactionDetailsBytes = compactionDetails.toBytes();
byte[] safeTokenBytes = safeToken != null ? safeToken.toBytes() : ZERO_LENGTH_ARRAY;
int size =
compactionDetailsBytes.length + 4 * TIMESTAMP_SIZE + STORE_TOKEN_PRESENT_FLAG_SIZE + safeTokenBytes.length;
byte[] buf = new byte[size];
ByteBuffer bufWrap = ByteBuffer.wrap(buf);
bufWrap.put(compactionDetailsBytes);
bufWrap.putLong(copyStartTime);
bufWrap.putLong(commitStartTime);
bufWrap.putLong(cleanupStartTime);
bufWrap.putLong(cycleEndTime);
bufWrap.put(safeToken != null ? STORE_TOKEN_PRESENT : STORE_TOKEN_ABSENT);
bufWrap.put(safeTokenBytes);
return buf;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("CompactionDetails:")
.append(compactionDetails.toString())
.append("\n")
.append("CopyStartTime:")
.append(copyStartTime)
.append("\n")
.append("CommitStartTime:")
.append(commitStartTime)
.append("\n")
.append("CleanupStartTime:")
.append(cleanupStartTime)
.append("\n")
.append("CycleEndTime:")
.append(cycleEndTime)
.append("\n");
if (safeToken != null) {
sb.append("SafeToken:").append(safeToken.toString()).append("\n");
}
return sb.toString();
}
}
}
| 7,465 |
3,182 |
// Generated by delombok at Sat Jun 11 11:12:44 CEST 2016
class LoggerSlf4jOnNonType {
void foo() {
}
}
| 44 |
1,487 |
package com.frogermcs.androiddevmetrics.internal.ui;
import android.content.res.Resources;
import android.text.Html;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.BaseExpandableListAdapter;
import android.widget.TextView;
import com.frogermcs.androiddevmetrics.R;
import com.frogermcs.androiddevmetrics.internal.MetricDescription;
import com.frogermcs.androiddevmetrics.internal.MetricDescriptionTreeItem;
import java.util.ArrayList;
import java.util.List;
/**
* Created by <NAME> on 30.01.2016.
*/
public class ExpandableMetricsListAdapter extends BaseExpandableListAdapter {
private final List<MetricDescription> metricDescriptionList = new ArrayList<>();
public void updateMetrics(List<MetricDescription> metricDescriptions) {
metricDescriptionList.clear();
metricDescriptionList.addAll(metricDescriptions);
}
@Override
public int getGroupCount() {
return metricDescriptionList.size();
}
@Override
public int getChildrenCount(int groupPosition) {
return metricDescriptionList.get(groupPosition).descriptionTreeItems.size();
}
@Override
public MetricDescription getGroup(int groupPosition) {
return metricDescriptionList.get(groupPosition);
}
@Override
public MetricDescriptionTreeItem getChild(int groupPosition, int childPosition) {
return metricDescriptionList.get(groupPosition).descriptionTreeItems.get(childPosition);
}
@Override
public long getGroupId(int groupPosition) {
return 0;
}
@Override
public long getChildId(int groupPosition, int childPosition) {
return 0;
}
@Override
public boolean hasStableIds() {
return false;
}
@Override
public View getGroupView(int groupPosition, boolean isExpanded, View convertView, ViewGroup parent) {
HeaderViewHolder viewHolder;
if (convertView == null) {
convertView = LayoutInflater.from(parent.getContext()).inflate(R.layout.adm_list_item_metrics_header, parent, false);
viewHolder = new HeaderViewHolder(convertView);
convertView.setTag(viewHolder);
} else {
viewHolder = (HeaderViewHolder) convertView.getTag();
}
MetricDescription metricDescription = getGroup(groupPosition);
viewHolder.bindView(metricDescription);
return convertView;
}
@Override
public View getChildView(int groupPosition, int childPosition, boolean isLastChild, View convertView, ViewGroup parent) {
DescriptionViewHolder viewHolder;
if (convertView == null) {
convertView = LayoutInflater.from(parent.getContext()).inflate(R.layout.adm_list_item_metrics_description, parent, false);
viewHolder = new DescriptionViewHolder(convertView);
convertView.setTag(viewHolder);
} else {
viewHolder = (DescriptionViewHolder) convertView.getTag();
}
MetricDescriptionTreeItem metricDescription = getChild(groupPosition, childPosition);
viewHolder.bindView(metricDescription);
return convertView;
}
@Override
public boolean isChildSelectable(int groupPosition, int childPosition) {
return false;
}
@Override
public void onGroupCollapsed(int groupPosition) {
super.onGroupCollapsed(groupPosition);
}
@Override
public void onGroupExpanded(int groupPosition) {
super.onGroupExpanded(groupPosition);
}
private class HeaderViewHolder {
View root;
TextView tvClassName;
TextView tvInitTime;
public HeaderViewHolder(View view) {
this.root = view;
tvClassName = (TextView) view.findViewById(R.id.tvClassName);
tvInitTime = (TextView) view.findViewById(R.id.tvInitTime);
}
public void bindView(MetricDescription metricDescription) {
tvClassName.setText(metricDescription.className);
tvInitTime.setText(Html.fromHtml(metricDescription.formattedInitTime));
final Resources resources = tvClassName.getContext().getResources();
if (metricDescription.warningLevel == 1) {
root.setBackgroundResource(R.color.d2m_bg_warning_1);
tvClassName.setTextColor(resources.getColor(R.color.d2m_font_warning_1_and_2));
tvInitTime.setTextColor(resources.getColor(R.color.d2m_font_warning_1_and_2));
} else if (metricDescription.warningLevel == 2) {
root.setBackgroundResource(R.color.d2m_bg_warning_2);
tvClassName.setTextColor(resources.getColor(R.color.d2m_font_warning_1_and_2));
tvInitTime.setTextColor(resources.getColor(R.color.d2m_font_warning_1_and_2));
} else if (metricDescription.warningLevel == 3) {
root.setBackgroundResource(R.color.d2m_bg_warning_3);
tvClassName.setTextColor(resources.getColor(R.color.d2m_font_warning_3));
tvInitTime.setTextColor(resources.getColor(R.color.d2m_font_warning_3));
} else {
root.setBackgroundResource(R.color.d2m_transparent);
tvInitTime.setTextColor(resources.getColor(R.color.d2m_font_default_description));
tvClassName.setTextColor(resources.getColor(R.color.d2m_font_default_title));
}
}
}
private class DescriptionViewHolder {
TextView tvTreeDescription;
public DescriptionViewHolder(View view) {
tvTreeDescription = (TextView) view.findViewById(R.id.tvTreeDescription);
}
public void bindView(MetricDescriptionTreeItem metricDescription) {
final Resources resources = tvTreeDescription.getContext().getResources();
tvTreeDescription.setText(Html.fromHtml(metricDescription.description));
if (metricDescription.warningLevel == 1) {
tvTreeDescription.setBackgroundResource(R.color.d2m_bg_warning_1);
tvTreeDescription.setTextColor(resources.getColor(R.color.d2m_font_warning_1_and_2));
} else if (metricDescription.warningLevel == 2) {
tvTreeDescription.setBackgroundResource(R.color.d2m_bg_warning_2);
tvTreeDescription.setTextColor(resources.getColor(R.color.d2m_font_warning_1_and_2));
} else if (metricDescription.warningLevel == 3) {
tvTreeDescription.setBackgroundResource(R.color.d2m_bg_warning_3);
tvTreeDescription.setTextColor(resources.getColor(R.color.d2m_font_warning_3));
} else {
tvTreeDescription.setBackgroundResource(R.color.d2m_transparent);
tvTreeDescription.setTextColor(resources.getColor(R.color.d2m_font_default_description));
}
}
}
}
| 2,773 |
4,857 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.conf.Configuration;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public class MetricsUserAggregateFactory {
private MetricsUserAggregateFactory() {
}
public static final String METRIC_USER_ENABLED_CONF = "hbase.regionserver.user.metrics.enabled";
public static final boolean DEFAULT_METRIC_USER_ENABLED_CONF = false;
public static MetricsUserAggregate getMetricsUserAggregate(Configuration conf) {
if (conf.getBoolean(METRIC_USER_ENABLED_CONF, DEFAULT_METRIC_USER_ENABLED_CONF)) {
return new MetricsUserAggregateImpl(conf);
} else {
//NoOpMetricUserAggregate
return new MetricsUserAggregate() {
@Override public MetricsUserAggregateSource getSource() {
return null;
}
@Override public void updatePut(long t) {
}
@Override public void updateDelete(long t) {
}
@Override public void updateGet(long t) {
}
@Override public void updateIncrement(long t) {
}
@Override public void updateAppend(long t) {
}
@Override public void updateReplay(long t) {
}
@Override public void updateScanTime(long t) {
}
@Override public void updateFilteredReadRequests() {
}
@Override public void updateReadRequestCount() {
}
};
}
}
}
| 762 |
879 |
package org.zstack.test.storage.volume;
import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import org.zstack.core.cloudbus.CloudBus;
import org.zstack.core.componentloader.ComponentLoader;
import org.zstack.core.db.DatabaseFacade;
import org.zstack.header.configuration.DiskOfferingInventory;
import org.zstack.header.identity.AccountInventory;
import org.zstack.header.identity.SessionInventory;
import org.zstack.header.storage.primary.PrimaryStorageInventory;
import org.zstack.header.vm.VmInstanceInventory;
import org.zstack.header.volume.VolumeInventory;
import org.zstack.test.Api;
import org.zstack.test.ApiSenderException;
import org.zstack.test.DBUtil;
import org.zstack.test.deployer.Deployer;
import org.zstack.test.identity.IdentityCreator;
import java.util.List;
import static org.zstack.utils.CollectionDSL.list;
public class TestVmGetAttachableVolume1 {
Deployer deployer;
Api api;
ComponentLoader loader;
CloudBus bus;
DatabaseFacade dbf;
@Before
public void setUp() throws Exception {
DBUtil.reDeployDB();
deployer = new Deployer("deployerXml/volume/TestVmGetAttachableVolume.xml");
deployer.build();
api = deployer.getApi();
loader = deployer.getComponentLoader();
bus = loader.getComponent(CloudBus.class);
dbf = loader.getComponent(DatabaseFacade.class);
}
@Test
public void test() throws ApiSenderException, InterruptedException {
DiskOfferingInventory dinv = deployer.diskOfferings.get("TestDataDiskOffering");
VmInstanceInventory vm = deployer.vms.get("TestVm");
VmInstanceInventory adminVm = deployer.vms.get("TestVm1");
VolumeInventory adminVol1 = api.createDataVolume("data1", dinv.getUuid());
VolumeInventory adminVol2 = api.createDataVolume("data2", dinv.getUuid());
api.attachVolumeToVm(adminVm.getUuid(), adminVol1.getUuid());
api.detachVolumeFromVm(adminVol1.getUuid());
IdentityCreator identityCreator = new IdentityCreator(api);
AccountInventory test = identityCreator.useAccount("test");
SessionInventory session = identityCreator.getAccountSession();
// for account test
List<VolumeInventory> vols = api.getVmAttachableVolume(vm.getUuid(), session);
Assert.assertTrue(vols.isEmpty());
api.shareResource(list(adminVol1.getUuid(), adminVol2.getUuid()), list(test.getUuid()), false);
vols = api.getVmAttachableVolume(vm.getUuid(), session);
Assert.assertEquals(2, vols.size());
VolumeInventory userVol1 = api.createDataVolume("user-data1", dinv.getUuid(), session);
vols = api.getVmAttachableVolume(vm.getUuid(), session);
Assert.assertEquals(3, vols.size());
VolumeInventory userVol2 = api.createDataVolume("user-data2", dinv.getUuid(), session);
api.attachVolumeToVm(vm.getUuid(), userVol2.getUuid(), session);
api.detachVolumeFromVm(userVol2.getUuid(), session);
vols = api.getVmAttachableVolume(vm.getUuid(), session);
Assert.assertEquals(4, vols.size());
api.revokeAllResourceSharing(list(adminVol1.getUuid(), adminVol2.getUuid()), null);
vols = api.getVmAttachableVolume(vm.getUuid(), session);
Assert.assertEquals(2, vols.size());
// for admin
vols = api.getVmAttachableVolume(vm.getUuid());
Assert.assertEquals(4, vols.size());
// create a data volume on the primary storage not attached
// confirm it's not in the attachable candidate list
PrimaryStorageInventory ps3 = deployer.primaryStorages.get("TestPrimaryStorage2");
VolumeInventory data3 = api.createDataVolume("user-data3", dinv.getUuid(), ps3.getUuid(), null);
vols = api.getVmAttachableVolume(vm.getUuid());
for (VolumeInventory vol : vols) {
if (vol.getUuid().equals(data3.getUuid())) {
Assert.fail(String.format("volume %s should not be attachable", data3.getUuid()));
}
}
}
}
| 1,594 |
307 |
# Databricks notebook source
import os
import datetime
# For testing
base_path = 'dbfs:/mnt/datalake/data/lnd/2019_10_06_05_54_25'
parkingbay_filepath = os.path.join(base_path, "MelbParkingBayData.json")
sensors_filepath = os.path.join(base_path, "MelbParkingSensorData.json")
# COMMAND ----------
parkingbay_sdf = spark.read\
.option("multiLine", True)\
.json(parkingbay_filepath)
sensordata_sdf = spark.read\
.option("multiLine", True)\
.json(sensors_filepath)
# COMMAND ----------
display(parkingbay_sdf)
# COMMAND ----------
display(sensordata_sdf)
# COMMAND ----------
display(sensordata_sdf)
| 241 |
892 |
{
"schema_version": "1.2.0",
"id": "GHSA-65gc-65c8-rc75",
"modified": "2022-05-13T01:01:38Z",
"published": "2022-05-13T01:01:38Z",
"aliases": [
"CVE-2017-12114"
],
"details": "An exploitable improper authorization vulnerability exists in admin_peers API of cpp-ethereum's JSON-RPC (commit 4e1015743b95821849d001618a7ce82c7c073768). A JSON request can cause an access to the restricted functionality resulting in authorization bypass. An attacker can send JSON to trigger this vulnerability.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:N/A:N"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2017-12114"
},
{
"type": "WEB",
"url": "https://www.talosintelligence.com/vulnerability_reports/TALOS-2017-0466"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/102475"
}
],
"database_specific": {
"cwe_ids": [
"CWE-863"
],
"severity": "MODERATE",
"github_reviewed": false
}
}
| 511 |
386 |
package com.github.unclecatmyself.support;
import com.github.unclecatmyself.bootstrap.channel.protocol.Response;
import com.github.unclecatmyself.core.constant.Constants;
import java.util.HashMap;
import java.util.Map;
/**
* 列入项目中,默认返回实现
* Created by MySelf on 2018/11/23.
*/
public class InChatResponse implements Response {
public Map<String, String> loginSuccess() {
Map<String,String> backMap = new HashMap<>();
backMap.put(Constants.TYPE, Constants.LOGIN);
backMap.put(Constants.SUCCESS, Constants.TRUE);
return backMap;
}
public Map<String, String> loginError() {
Map<String,String> backMap = new HashMap<>();
backMap.put(Constants.TYPE, Constants.LOGIN);
backMap.put(Constants.SUCCESS, Constants.FALSE);
return backMap;
}
public Map<String, String> sendMe(String value) {
Map<String,String> backMap = new HashMap<>();
backMap.put(Constants.TYPE, Constants.SEND_ME);
backMap.put(Constants.VALUE,value);
return backMap;
}
public Map<String, String> sendBack(String otherOne, String value) {
Map<String,String> backMap = new HashMap<>();
backMap.put(Constants.TYPE, Constants.SEND_TO);
backMap.put(Constants.VALUE,value);
backMap.put(Constants.ONE,otherOne);
return backMap;
}
public Map<String, String> getMessage(String token, String value) {
Map<String,String> backMap = new HashMap<>();
backMap.put(Constants.TYPE, Constants.SEND_TO);
backMap.put(Constants.FROM,token);
backMap.put(Constants.VALUE,value);
return backMap;
}
public Map<String, String> sendGroup(String token, String value, String groupId) {
Map<String,String> backMap = new HashMap<>();
backMap.put(Constants.TYPE, Constants.SEND_GROUP);
backMap.put(Constants.FROM,token);
backMap.put(Constants.VALUE,value);
backMap.put(Constants.GROUP_ID,groupId);
return backMap;
}
}
| 861 |
897 |
<gh_stars>100-1000
/*
Given a N * N square matrix. A matrix has two diagonals. One diagonal is principal which starts from the first row, first column
another diagonal is secondary which starts from the first row, last column
Find the sum of it's principal and secondary diagonal elements.
*/
#include <bits/stdc++.h>
using namespace std;
int ar[500][500];
/* From this get_principal_secondary_diagonal_sum() function we will compute matrix's principal,secondary diagonal sum */
int get_principal_secondary_diagonal_sum(int N)
{
int principal_secondary_diagonal_sum = 0;
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
{
if(i == j)
{
/* here principal diagonal element is at cell [1 , 1] , [0 ,0],
[2 , 2] like this goes on
that's why condition is i == j
*/
principal_secondary_diagonal_sum += ar[i][j];
}
else if( (i + j == (N - 1)) and i != j )
{
/* here secondary diagonal element is at cell [1 , 4] , [2 ,3],
[3 , 2] like this goes on
that's why condition is i + j == ( n - 1) and i != j
*/
principal_secondary_diagonal_sum += ar[i][j];
}
}
}
return principal_secondary_diagonal_sum;
}
int main()
{
cout << "Enter the size of the matrix : \n";
int N;
cin >> N;
cout << "Enter matrix elements :\n";
for (int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
{
cin >> ar[i][j];
}
}
int principal_secondary_diagonal_sum = get_principal_secondary_diagonal_sum(N);
cout << "Sum of Principal and Secondary Diagonal of this matrix is : ";
cout << principal_secondary_diagonal_sum << endl;
}
/*
Standard Input and Output
Enter the size of the matrix :
4
Enter matrix elements :
1 2 3 4
5 6 7 8
9 10 11 12
13 14 15 16
Sum of Principal and Secondary Diagonal of this matrix is : 68
Time Complexity : O( N * N )
Space Complexity : O( 1 )
*/
| 992 |
572 |
<reponame>cassiasamp/live-de-python
from collections import ChainMap
# problema: Unir dois dicionário
a = {1: 'a', 2: 'b', 3: 'c'}
b = {2: 'x', 3: 'z', 4: 'w'}
c = ChainMap(a, b)
| 86 |
460 |
package org.flhy.ext.utils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
public class JsonUtils {
public static void success(String message) throws IOException {
success("系统提示", message);
}
public static void success(String title, String message) throws IOException {
response(true, title, message);
}
public static void fail(String message) throws IOException {
fail("系统提示", message);
}
public static void fail(String title, String message) throws IOException {
response(false, title, message);
}
public static void response(boolean success, String title, String message) throws IOException {
JSONObject jsonObject = new JSONObject();
jsonObject.put("success", success);
jsonObject.put("title", title);
jsonObject.put("message", message);
response(jsonObject);
}
public static void response(JSONObject jsonObject) throws IOException {
HttpServletResponse response = tl.get();
response.setContentType("text/html; charset=utf-8");
response.getWriter().write(jsonObject.toString());
}
public static void response(JSONArray jsonArray) throws IOException {
HttpServletResponse response = tl.get();
response.setContentType("text/html; charset=utf-8");
response.getWriter().write(jsonArray.toString());
}
public static void responseXml(String xml) throws IOException {
HttpServletResponse response = tl.get();
response.setContentType("text/xml; charset=utf-8");
response.getWriter().write(xml);
}
public static void download(File file) throws IOException {
HttpServletResponse response = tl.get();
response.setContentType("multipart/form-data");
response.setHeader("Content-Disposition", "attachment;fileName=" + file.getName());
InputStream is = null;
try {
is = FileUtils.openInputStream(file);
IOUtils.copy(is, response.getOutputStream());
} finally {
IOUtils.closeQuietly(is);
}
}
private static ThreadLocal<HttpServletResponse> tl = new ThreadLocal<HttpServletResponse>();
public static void putResponse(HttpServletResponse response) {
tl.set(response);
}
}
| 752 |
14,668 |
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/css/properties/css_parsing_utils.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/core/css/parser/css_parser_context.h"
#include "third_party/blink/renderer/core/css/parser/css_parser_token_stream.h"
#include "third_party/blink/renderer/core/css/parser/css_tokenizer.h"
#include "third_party/blink/renderer/core/html/html_html_element.h"
#include "third_party/blink/renderer/core/testing/dummy_page_holder.h"
#include "third_party/blink/renderer/platform/instrumentation/use_counter.h"
namespace blink {
namespace {
using css_parsing_utils::AtDelimiter;
using css_parsing_utils::AtIdent;
using css_parsing_utils::ConsumeAngle;
using css_parsing_utils::ConsumeIdSelector;
using css_parsing_utils::ConsumeIfDelimiter;
using css_parsing_utils::ConsumeIfIdent;
CSSParserContext* MakeContext() {
return MakeGarbageCollected<CSSParserContext>(
kHTMLStandardMode, SecureContextMode::kInsecureContext);
}
TEST(CSSParsingUtilsTest, BasicShapeUseCount) {
auto dummy_page_holder =
std::make_unique<DummyPageHolder>(gfx::Size(800, 600));
Page::InsertOrdinaryPageForTesting(&dummy_page_holder->GetPage());
Document& document = dummy_page_holder->GetDocument();
WebFeature feature = WebFeature::kCSSBasicShape;
EXPECT_FALSE(document.IsUseCounted(feature));
document.documentElement()->setInnerHTML(
"<style>span { shape-outside: circle(); }</style>");
EXPECT_TRUE(document.IsUseCounted(feature));
}
TEST(CSSParsingUtilsTest, Revert) {
EXPECT_TRUE(css_parsing_utils::IsCSSWideKeyword(CSSValueID::kRevert));
EXPECT_TRUE(css_parsing_utils::IsCSSWideKeyword("revert"));
}
TEST(CSSParsingUtilsTest, ConsumeIdSelector) {
{
String text = "#foo";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_EQ("#foo", ConsumeIdSelector(range)->CssText());
}
{
String text = "#bar ";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_EQ("#bar", ConsumeIdSelector(range)->CssText());
EXPECT_TRUE(range.AtEnd())
<< "ConsumeIdSelector cleans up trailing whitespace";
}
{
String text = "#123";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
ASSERT_TRUE(range.Peek().GetType() == kHashToken &&
range.Peek().GetHashTokenType() == kHashTokenUnrestricted);
EXPECT_FALSE(ConsumeIdSelector(range))
<< "kHashTokenUnrestricted is not a valid <id-selector>";
}
{
String text = "#";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_FALSE(ConsumeIdSelector(range));
}
{
String text = " #foo";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_FALSE(ConsumeIdSelector(range))
<< "ConsumeIdSelector does not accept preceding whitespace";
EXPECT_EQ(kWhitespaceToken, range.Peek().GetType());
}
{
String text = "foo";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_FALSE(ConsumeIdSelector(range));
}
{
String text = "##";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_FALSE(ConsumeIdSelector(range));
}
{
String text = "10px";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_FALSE(ConsumeIdSelector(range));
}
}
double ConsumeAngleValue(String target) {
auto tokens = CSSTokenizer(target).TokenizeToEOF();
CSSParserTokenRange range(tokens);
return ConsumeAngle(range, *MakeContext(), absl::nullopt)->ComputeDegrees();
}
double ConsumeAngleValue(String target, double min, double max) {
auto tokens = CSSTokenizer(target).TokenizeToEOF();
CSSParserTokenRange range(tokens);
return ConsumeAngle(range, *MakeContext(), absl::nullopt, min, max)
->ComputeDegrees();
}
TEST(CSSParsingUtilsTest, ConsumeAngles) {
const double kMaxDegreeValue = 2867080569122160;
EXPECT_EQ(10.0, ConsumeAngleValue("10deg"));
EXPECT_EQ(-kMaxDegreeValue, ConsumeAngleValue("-3.40282e+38deg"));
EXPECT_EQ(kMaxDegreeValue, ConsumeAngleValue("3.40282e+38deg"));
EXPECT_EQ(kMaxDegreeValue, ConsumeAngleValue("calc(infinity * 1deg)"));
EXPECT_EQ(-kMaxDegreeValue, ConsumeAngleValue("calc(-infinity * 1deg)"));
EXPECT_EQ(kMaxDegreeValue, ConsumeAngleValue("calc(NaN * 1deg)"));
// Math function with min and max ranges
EXPECT_EQ(-100, ConsumeAngleValue("calc(-3.40282e+38deg)", -100, 100));
EXPECT_EQ(100, ConsumeAngleValue("calc(3.40282e+38deg)", -100, 100));
}
TEST(CSSParsingUtilsTest, AtIdent_Range) {
String text = "foo,bar,10px";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_FALSE(AtIdent(range.Consume(), "bar")); // foo
EXPECT_FALSE(AtIdent(range.Consume(), "bar")); // ,
EXPECT_TRUE(AtIdent(range.Consume(), "bar")); // bar
EXPECT_FALSE(AtIdent(range.Consume(), "bar")); // ,
EXPECT_FALSE(AtIdent(range.Consume(), "bar")); // 10px
EXPECT_FALSE(AtIdent(range.Consume(), "bar")); // EOF
}
TEST(CSSParsingUtilsTest, AtIdent_Stream) {
String text = "foo,bar,10px";
CSSTokenizer tokenizer(text);
CSSParserTokenStream stream(tokenizer);
EXPECT_FALSE(AtIdent(stream.Consume(), "bar")); // foo
EXPECT_FALSE(AtIdent(stream.Consume(), "bar")); // ,
EXPECT_TRUE(AtIdent(stream.Consume(), "bar")); // bar
EXPECT_FALSE(AtIdent(stream.Consume(), "bar")); // ,
EXPECT_FALSE(AtIdent(stream.Consume(), "bar")); // 10px
EXPECT_FALSE(AtIdent(stream.Consume(), "bar")); // EOF
}
TEST(CSSParsingUtilsTest, ConsumeIfIdent_Range) {
String text = "foo,bar,10px";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_TRUE(AtIdent(range.Peek(), "foo"));
EXPECT_FALSE(ConsumeIfIdent(range, "bar"));
EXPECT_TRUE(AtIdent(range.Peek(), "foo"));
EXPECT_TRUE(ConsumeIfIdent(range, "foo"));
EXPECT_EQ(kCommaToken, range.Peek().GetType());
}
TEST(CSSParsingUtilsTest, ConsumeIfIdent_Stream) {
String text = "foo,bar,10px";
CSSTokenizer tokenizer(text);
CSSParserTokenStream stream(tokenizer);
EXPECT_TRUE(AtIdent(stream.Peek(), "foo"));
EXPECT_FALSE(ConsumeIfIdent(stream, "bar"));
EXPECT_TRUE(AtIdent(stream.Peek(), "foo"));
EXPECT_TRUE(ConsumeIfIdent(stream, "foo"));
EXPECT_EQ(kCommaToken, stream.Peek().GetType());
}
TEST(CSSParsingUtilsTest, AtDelimiter_Range) {
String text = "foo,<,10px";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_FALSE(AtDelimiter(range.Consume(), '<')); // foo
EXPECT_FALSE(AtDelimiter(range.Consume(), '<')); // ,
EXPECT_TRUE(AtDelimiter(range.Consume(), '<')); // <
EXPECT_FALSE(AtDelimiter(range.Consume(), '<')); // ,
EXPECT_FALSE(AtDelimiter(range.Consume(), '<')); // 10px
EXPECT_FALSE(AtDelimiter(range.Consume(), '<')); // EOF
}
TEST(CSSParsingUtilsTest, AtDelimiter_Stream) {
String text = "foo,<,10px";
CSSTokenizer tokenizer(text);
CSSParserTokenStream stream(tokenizer);
EXPECT_FALSE(AtDelimiter(stream.Consume(), '<')); // foo
EXPECT_FALSE(AtDelimiter(stream.Consume(), '<')); // ,
EXPECT_TRUE(AtDelimiter(stream.Consume(), '<')); // <
EXPECT_FALSE(AtDelimiter(stream.Consume(), '<')); // ,
EXPECT_FALSE(AtDelimiter(stream.Consume(), '<')); // 10px
EXPECT_FALSE(AtDelimiter(stream.Consume(), '<')); // EOF
}
TEST(CSSParsingUtilsTest, ConsumeIfDelimiter_Range) {
String text = "<,=,10px";
auto tokens = CSSTokenizer(text).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_TRUE(AtDelimiter(range.Peek(), '<'));
EXPECT_FALSE(ConsumeIfDelimiter(range, '='));
EXPECT_TRUE(AtDelimiter(range.Peek(), '<'));
EXPECT_TRUE(ConsumeIfDelimiter(range, '<'));
EXPECT_EQ(kCommaToken, range.Peek().GetType());
}
TEST(CSSParsingUtilsTest, ConsumeIfDelimiter_Stream) {
String text = "<,=,10px";
CSSTokenizer tokenizer(text);
CSSParserTokenStream stream(tokenizer);
EXPECT_TRUE(AtDelimiter(stream.Peek(), '<'));
EXPECT_FALSE(ConsumeIfDelimiter(stream, '='));
EXPECT_TRUE(AtDelimiter(stream.Peek(), '<'));
EXPECT_TRUE(ConsumeIfDelimiter(stream, '<'));
EXPECT_EQ(kCommaToken, stream.Peek().GetType());
}
TEST(CSSParsingUtilsTest, ConsumeAnyValue) {
struct {
// The input string to parse as <any-value>.
const char* input;
// The expected result from ConsumeAnyValue.
bool expected;
// The serialization of the tokens remaining in the range.
const char* remainder;
} tests[] = {
{"1", true, ""},
{"1px", true, ""},
{"1px ", true, ""},
{"ident", true, ""},
{"(([ident]))", true, ""},
{" ( ( 1 ) ) ", true, ""},
{"rgb(1, 2, 3)", true, ""},
{"rgb(1, 2, 3", true, ""},
{"!!!;;;", true, ""},
{"asdf)", false, ")"},
{")asdf", false, ")asdf"},
{"(ab)cd) e", false, ") e"},
{"(as]df) e", false, " e"},
{"(a b [ c { d ) e } f ] g h) i", false, " i"},
{"a url(() b", false, "url(() b"},
};
for (const auto& test : tests) {
String input(test.input);
SCOPED_TRACE(input);
auto tokens = CSSTokenizer(input).TokenizeToEOF();
CSSParserTokenRange range(tokens);
EXPECT_EQ(test.expected, css_parsing_utils::ConsumeAnyValue(range));
EXPECT_EQ(String(test.remainder), range.Serialize());
}
}
} // namespace
} // namespace blink
| 3,889 |
468 |
{
"name": "metamask",
"author": "<NAME>",
"license": "CC BY-NC-SA 4.0",
"raster": "http://hexb.in/hexagons/metamask.png"
}
| 62 |
369 |
<filename>FreeRTOSv10.4.1/FreeRTOS/Demo/CORTEX_LM3S316_IAR/hw_include/diag.h
//*****************************************************************************
//
// diag.h - Prototypes for the diagnostic functions.
//
// Copyright (c) 2005,2006 Luminary Micro, Inc. All rights reserved.
//
// Software License Agreement
//
// Luminary Micro, Inc. (LMI) is supplying this software for use solely and
// exclusively on LMI's Stellaris Family of microcontroller products.
//
// The software is owned by LMI and/or its suppliers, and is protected under
// applicable copyright laws. All rights are reserved. Any use in violation
// of the foregoing restrictions may subject the user to criminal sanctions
// under applicable laws, as well as to civil liability for the breach of the
// terms and conditions of this license.
//
// THIS SOFTWARE IS PROVIDED "AS IS". NO WARRANTIES, WHETHER EXPRESS, IMPLIED
// OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
// LMI SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
// CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
//
// This is part of revision 635 of the Stellaris Driver Library.
//
//*****************************************************************************
#ifndef __DIAG_H__
#define __DIAG_H__
#ifdef __cplusplus
extern "C"
{
#endif
//*****************************************************************************
//
// Values that can be passed as the iMode parater to DiagOpen, DiagRead, and
// DiagWrite.
//
//*****************************************************************************
#define OPEN_R 0 // read access
#define OPEN_W 4 // write access
#define OPEN_A 8 // append to file
#define OPEN_B 1 // binary access
#define OPEN_PLUS 2 // read and write access
//*****************************************************************************
//
// Prototypes for the APIs.
//
//*****************************************************************************
extern int DiagOpenStdio(void);
extern int DiagOpen(const char *pcName, int iMode);
extern int DiagClose(int iHandle);
extern int DiagWrite(int iHandle, const char *pcBuf, unsigned long ulLen,
int iMode);
extern int DiagRead(int iHandle, char *pcBuf, unsigned long ulLen, int iMode);
extern long DiagFlen(int iHandle);
extern void DiagExit(int iRet);
extern char *DiagCommandString(char *pcBuf, unsigned long ulLen);
#ifdef __cplusplus
}
#endif
#endif // __DIAG_H__
| 840 |
3,861 |
#ifndef GRUB_OFPATH_MACHINE_UTIL_HEADER
#define GRUB_OFPATH_MACHINE_UTIL_HEADER 1
char *grub_util_devname_to_ofpath (const char *devname);
#endif /* ! GRUB_OFPATH_MACHINE_UTIL_HEADER */
| 88 |
983 |
package org.xm;
/**
* @author xuming
*/
public class PhraseSimilarityDemo {
public static void main(String[] args) {
String phrase1 = "继续努力";
String phrase2 = "持续发展";
double result = Similarity.phraseSimilarity(phrase1, phrase2);
System.out.println(phrase1 + " vs " + phrase2 + " 短语相似度值:" + result);
}
}
| 168 |
522 |
from unittest import mock
import pytest
from briefcase.platforms.macOS.app import macOSAppBuildCommand
@pytest.fixture
def build_command(tmp_path):
command = macOSAppBuildCommand(base_path=tmp_path)
command.select_identity = mock.MagicMock()
command.sign_app = mock.MagicMock()
command.sign_file = mock.MagicMock()
return command
def test_build_app(build_command, first_app_with_binaries):
"""A macOS App is adhoc signed as part of the build process."""
# Build the app
build_command.build_app(first_app_with_binaries)
# A request has been made to sign the app
build_command.sign_app.assert_called_once_with(
app=first_app_with_binaries, identity="-"
)
# No request to select a signing identity was made
build_command.select_identity.assert_not_called()
# No attempt was made to sign a specific file;
# This ignores the calls that would have been made transitively
# by calling sign_app()
build_command.sign_file.assert_not_called()
| 349 |
684 |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.activiti.explorer.ui.task.listener;
import org.activiti.engine.IdentityService;
import org.activiti.engine.ProcessEngines;
import org.activiti.engine.TaskService;
import org.activiti.engine.identity.User;
import org.activiti.engine.task.IdentityLink;
import org.activiti.engine.task.Task;
import org.activiti.explorer.ExplorerApp;
import org.activiti.explorer.I18nManager;
import org.activiti.explorer.Messages;
import org.activiti.explorer.ViewManager;
import org.activiti.explorer.ui.custom.ConfirmationDialogPopupWindow;
import org.activiti.explorer.ui.event.ConfirmationEvent;
import org.activiti.explorer.ui.event.ConfirmationEventListener;
import org.activiti.explorer.ui.task.TaskDetailPanel;
import com.vaadin.ui.Button.ClickEvent;
import com.vaadin.ui.Button.ClickListener;
/**
* @author <NAME>
*/
public class RemoveInvolvedPersonListener implements ClickListener {
private static final long serialVersionUID = 1L;
protected IdentityLink identityLink;
protected Task task;
protected TaskDetailPanel taskDetailPanel;
protected I18nManager i18nManager;
protected ViewManager viewManager;
protected transient IdentityService identityService;
protected transient TaskService taskService;
public RemoveInvolvedPersonListener(IdentityLink identityLink, Task task, TaskDetailPanel taskDetailPanel) {
this.identityLink = identityLink;
this.task = task;
this.taskDetailPanel = taskDetailPanel;
this.i18nManager = ExplorerApp.get().getI18nManager();
this.viewManager = ExplorerApp.get().getViewManager();
this.taskService = ProcessEngines.getDefaultProcessEngine().getTaskService();
this.identityService = ProcessEngines.getDefaultProcessEngine().getIdentityService();
}
public void buttonClick(ClickEvent event) {
User user = identityService.createUserQuery().userId(identityLink.getUserId()).singleResult();
String name = user.getFirstName() + " " + user.getLastName();
ConfirmationDialogPopupWindow confirmationPopup = new ConfirmationDialogPopupWindow(
i18nManager.getMessage(Messages.TASK_INVOLVED_REMOVE_CONFIRMATION_TITLE, name),
i18nManager.getMessage(Messages.TASK_INVOLVED_REMOVE_CONFIRMATION_DESCRIPTION, name, task.getName()));
confirmationPopup.addListener(new ConfirmationEventListener() {
private static final long serialVersionUID = 1L;
protected void rejected(ConfirmationEvent event) {
}
protected void confirmed(ConfirmationEvent event) {
taskService.deleteUserIdentityLink(identityLink.getTaskId(), identityLink.getUserId(), identityLink.getType());
taskDetailPanel.notifyPeopleInvolvedChanged();
}
});
viewManager.showPopupWindow(confirmationPopup);
}
}
| 1,032 |
431 |
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text # or whatever
| 49 |
5,169 |
<gh_stars>1000+
{
"name": "Leto",
"version": "2.0.0",
"summary": "掌通无线旗下的小游戏工具库",
"description": "Leto使用了双WebView的结构, 在iOS/Android上都是这样. 一个WebView可以叫Service WebView, 这个就用来提供微信api的功能. 一个叫做Page WebView, 用来提供ui组件的那些功能. 在Page WebView里想要调用微信api的时候, 是通过释放一个PAGE EVENT这种间接的方式去调用Service WebView",
"homepage": "https://github.com/ztwireless",
"license": {
"type": "Copyright",
"text": "Ztwireless Inc. 2019"
},
"authors": {
"ztwireless": "<EMAIL>"
},
"platforms": {
"ios": "9.0"
},
"source": {
"http": "https://down.mgc-games.com/sdk/iosframework.zip"
},
"resources": [
"framework/baidumobadsdk.bundle",
"framework/BUAdSDK.bundle"
],
"vendored_frameworks": "framework/Leto.framework",
"requires_arc": true,
"xcconfig": {
"ENABLE_BITCODE": "No"
}
}
| 494 |
2,106 |
from __future__ import absolute_import, division, print_function
from .serialization import *
from .json_dumps import json_dumps
from .json_dumps_trusted import json_dumps_trusted
from .object_hook import object_hook
from .object_hook_trusted import object_hook_trusted
| 79 |
831 |
<reponame>qq1056779951/android<gh_stars>100-1000
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.ui.wizard;
import com.intellij.ui.components.JBScrollPane;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.awt.*;
import static javax.swing.ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER;
import static javax.swing.ScrollPaneConstants.VERTICAL_SCROLLBAR_AS_NEEDED;
/**
* A panel that provides a standard look and feel across wizard steps used in Android Studio.
*/
public final class StudioWizardStepPanel extends JPanel {
private JPanel myRootPanel;
public StudioWizardStepPanel(@NotNull JPanel innerPanel) {
super(new BorderLayout());
myRootPanel.add(innerPanel);
add(myRootPanel);
}
/**
* When creating a StudioWizardStepPanel which may be so tall as to require vertical scrolling,
* using this helper method to automatically wrap it with an appropriate JScrollPane.
*/
@NotNull
public static JBScrollPane wrappedWithVScroll(@NotNull JPanel innerPanel) {
JBScrollPane sp = new JBScrollPane(new StudioWizardStepPanel(innerPanel), VERTICAL_SCROLLBAR_AS_NEEDED, HORIZONTAL_SCROLLBAR_NEVER);
sp.setBorder(BorderFactory.createEmptyBorder());
return sp;
}
}
| 572 |
1,562 |
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/transport/status_metadata.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/transport/static_metadata.h"
/* we offset status by a small amount when storing it into transport metadata
as metadata cannot store a 0 value (which is used as OK for grpc_status_codes
*/
#define STATUS_OFFSET 1
static void destroy_status(void* /*ignored*/) {}
grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md) {
if (grpc_mdelem_static_value_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) {
return GRPC_STATUS_OK;
}
if (grpc_mdelem_static_value_eq(md, GRPC_MDELEM_GRPC_STATUS_1)) {
return GRPC_STATUS_CANCELLED;
}
if (grpc_mdelem_static_value_eq(md, GRPC_MDELEM_GRPC_STATUS_2)) {
return GRPC_STATUS_UNKNOWN;
}
void* user_data = grpc_mdelem_get_user_data(md, destroy_status);
if (user_data != nullptr) {
return static_cast<grpc_status_code>((intptr_t)user_data - STATUS_OFFSET);
}
uint32_t status;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) {
status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
}
grpc_mdelem_set_user_data(
md, destroy_status, (void*)static_cast<intptr_t>(status + STATUS_OFFSET));
return static_cast<grpc_status_code>(status);
}
grpc_mdelem grpc_get_reffed_status_elem_slowpath(int status_code) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(status_code, tmp);
return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS,
grpc_core::UnmanagedMemorySlice(tmp));
}
| 834 |
383 |
<reponame>MerijnHendriks/gelectron<gh_stars>100-1000
{
"prettier.tabWidth": 2,
"editor.formatOnSave": true,
"tslint.enable": true,
"tslint.autoFixOnSave": true,
"files.autoSave": "off"
}
| 95 |
892 |
{
"schema_version": "1.2.0",
"id": "GHSA-j433-wpfx-cxg3",
"modified": "2022-05-13T01:04:15Z",
"published": "2022-05-13T01:04:15Z",
"aliases": [
"CVE-2018-6508"
],
"details": "Puppet Enterprise 2017.3.x prior to 2017.3.3 are vulnerable to a remote execution bug when a specially crafted string was passed into the facter_task or puppet_conf tasks. This vulnerability only affects tasks in the affected modules, if you are not using puppet tasks you are not affected by this vulnerability.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:H/A:H"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2018-6508"
},
{
"type": "WEB",
"url": "https://puppet.com/security/cve/CVE-2018-6508"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/103020"
}
],
"database_specific": {
"cwe_ids": [
"CWE-134"
],
"severity": "HIGH",
"github_reviewed": false
}
}
| 494 |
2,436 |
<filename>cpp/perspective/src/include/perspective/path.h<gh_stars>1000+
/******************************************************************************
*
* Copyright (c) 2017, the Perspective Authors.
*
* This file is part of the Perspective library, distributed under the terms of
* the Apache License 2.0. The full license can be found in the LICENSE file.
*
*/
#pragma once
#include <perspective/first.h>
#include <perspective/scalar.h>
#include <perspective/exports.h>
#include <vector>
namespace perspective {
struct PERSPECTIVE_EXPORT t_path {
t_path();
t_path(const std::vector<t_tscalar>& path);
const std::vector<t_tscalar>& path() const;
std::vector<t_tscalar>& path();
std::vector<t_tscalar> m_path;
};
} // end namespace perspective
| 258 |
310 |
//
// AppDelegate.h
// PropertyCross
//
// Created by <NAME> on 11/10/2012.
// Copyright (c) 2012 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
#import <CoreData/CoreData.h>
@class PropertyCrossViewController;
@class PersistentDataStore;
@interface AppDelegate : UIResponder <UIApplicationDelegate>
@property (strong, nonatomic) UIWindow *window;
// gets the persistent datastore for the application
@property (readonly) PersistentDataStore* persistentDataStore;
@end
| 158 |
1,209 |
<reponame>JJPowelly/u8glib<filename>fntsrc/u8g_font_courb08.c<gh_stars>1000+
/*
Fontname: -Adobe-Courier-Bold-R-Normal--11-80-100-100-M-60-ISO10646-1
Copyright: Copyright (c) 1984, 1987 Adobe Systems Incorporated. All Rights Reserved. Copyright (c) 1988, 1991 Digital Equipment Corporation. All Rights Reserved.
Capital A Height: 6, '1' Height: 7
Calculated Max Values w= 7 h=10 x= 2 y= 6 dx= 6 dy= 0 ascent= 9 len=10
Font Bounding box w=12 h=16 x=-3 y=-4
Calculated Min Values x=-1 y=-2 dx= 0 dy= 0
Pure Font ascent = 6 descent=-2
X Font ascent = 7 descent=-2
Max Font ascent = 9 descent=-2
*/
#include "u8g.h"
const u8g_fntpgm_uint8_t u8g_font_courB08[2425] U8G_FONT_SECTION("u8g_font_courB08") = {
0,12,16,253,252,6,1,146,3,13,32,255,254,9,254,7,
254,0,0,0,6,0,1,2,6,6,6,1,0,192,192,192,
192,0,192,3,3,3,6,1,3,160,160,160,5,8,8,6,
0,255,80,80,248,80,80,248,80,80,5,9,9,6,0,255,
32,120,200,240,120,24,216,240,32,5,7,7,6,0,0,224,
168,240,32,120,168,56,6,6,6,6,255,0,56,96,48,124,
216,124,1,3,3,6,2,3,128,128,128,3,8,8,6,1,
255,32,64,192,192,192,192,64,32,3,8,8,6,1,255,128,
64,96,96,96,96,64,128,4,4,4,6,0,3,32,240,96,
144,5,5,5,6,0,1,32,32,248,32,32,3,3,3,6,
1,254,96,64,128,5,1,1,6,0,3,248,2,1,1,6,
1,0,192,4,8,8,6,1,255,16,16,32,32,64,64,128,
128,5,7,7,6,0,0,112,216,216,216,216,216,112,6,7,
7,6,0,0,48,240,48,48,48,48,252,5,7,7,6,0,
0,112,216,24,48,96,216,248,5,7,7,6,0,0,112,216,
24,112,24,216,112,6,7,7,6,0,0,24,56,88,216,252,
24,24,5,7,7,6,0,0,248,192,240,216,24,152,240,5,
7,7,6,0,0,112,216,192,240,216,216,112,5,7,7,6,
0,0,248,216,24,48,48,96,96,5,7,7,6,0,0,112,
216,216,112,216,216,112,5,7,7,6,0,0,112,216,216,120,
24,216,112,2,4,4,6,1,0,192,0,0,192,3,6,6,
6,0,254,96,0,0,96,64,128,4,5,5,6,0,1,48,
96,192,96,48,4,3,3,6,0,2,240,0,240,4,5,5,
6,1,1,192,96,48,96,192,5,6,6,6,0,0,112,152,
48,96,0,96,6,8,8,6,0,255,112,200,152,168,168,156,
192,112,7,6,6,6,255,0,120,56,40,124,108,238,6,6,
6,6,255,0,248,108,120,108,108,248,5,6,6,6,0,0,
120,216,192,192,216,112,6,6,6,6,255,0,248,108,108,108,
108,248,6,6,6,6,255,0,252,96,120,96,108,252,6,6,
6,6,255,0,252,96,120,96,96,240,5,6,6,6,0,0,
112,216,192,248,216,120,7,6,6,6,255,0,238,108,124,108,
108,238,4,6,6,6,0,0,240,96,96,96,96,240,6,6,
6,6,255,0,60,24,24,216,216,112,7,6,6,6,255,0,
236,104,112,120,108,246,6,6,6,6,255,0,240,96,96,96,
108,252,6,6,6,6,255,0,196,108,108,124,84,212,7,6,
6,6,255,0,238,116,116,108,108,228,5,6,6,6,0,0,
112,216,216,216,216,112,6,6,6,6,255,0,248,108,108,120,
96,240,5,7,7,6,0,255,112,216,216,216,216,112,24,7,
6,6,6,255,0,248,108,108,120,108,246,5,6,6,6,0,
0,120,200,240,56,152,240,6,6,6,6,255,0,252,180,48,
48,48,120,7,6,6,6,255,0,238,108,108,108,108,56,7,
6,6,6,255,0,238,108,40,56,56,16,7,6,6,6,255,
0,214,84,84,124,56,40,6,6,6,6,0,0,204,120,48,
48,120,204,7,6,6,6,255,0,230,102,60,24,24,60,5,
6,6,6,0,0,248,216,48,96,216,248,3,8,8,6,1,
255,224,192,192,192,192,192,192,224,4,8,8,6,0,255,128,
128,64,64,32,32,16,16,3,8,8,6,1,255,224,96,96,
96,96,96,96,224,5,3,3,6,0,4,32,112,216,6,1,
1,6,0,254,252,2,2,2,6,2,6,128,64,6,5,5,
6,0,0,112,216,120,216,252,6,7,7,6,255,0,224,96,
120,108,108,108,248,5,5,5,6,0,0,112,216,192,216,112,
6,7,7,6,0,0,56,24,120,216,216,216,124,5,5,5,
6,0,0,112,216,248,192,120,5,7,7,6,0,0,56,96,
248,96,96,96,248,6,7,7,6,0,254,108,216,216,216,120,
24,240,6,7,7,6,255,0,224,96,120,108,108,108,108,6,
7,7,6,0,0,48,0,240,48,48,48,252,4,9,9,6,
0,254,48,0,240,48,48,48,48,48,224,7,7,7,6,255,
0,224,96,108,120,112,120,110,6,7,7,6,0,0,240,48,
48,48,48,48,252,6,5,5,6,255,0,248,124,84,84,84,
6,5,5,6,255,0,216,108,108,108,108,5,5,5,6,0,
0,112,216,216,216,112,6,7,7,6,255,254,248,108,108,108,
120,96,240,6,7,7,6,0,254,108,216,216,216,120,24,60,
6,5,5,6,0,0,220,116,96,96,240,6,5,5,6,0,
0,120,224,120,28,248,6,7,7,6,0,0,96,96,248,96,
96,108,56,7,5,5,6,255,0,236,108,108,108,62,6,5,
5,6,255,0,236,108,56,56,16,7,5,5,6,255,0,214,
84,124,60,40,6,5,5,6,0,0,236,120,48,120,220,7,
7,7,6,255,254,238,108,108,40,56,48,224,5,5,5,6,
0,0,248,176,96,216,248,4,8,8,6,1,255,48,96,96,
192,96,96,96,48,1,7,7,6,2,255,128,128,128,128,128,
128,128,4,8,8,6,0,255,192,96,96,48,96,96,96,192,
5,2,2,6,0,3,104,176,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,0,0,0,6,0,1,2,
7,7,6,1,254,192,0,192,192,192,192,192,5,8,8,6,
0,255,32,32,120,200,192,120,32,32,6,7,7,6,0,0,
56,104,32,248,32,100,248,5,5,5,6,0,1,136,112,80,
112,136,6,7,7,6,0,0,204,72,252,48,252,48,120,1,
9,9,6,2,254,128,128,128,128,0,128,128,128,128,5,8,
8,6,0,255,120,72,96,144,72,48,144,240,3,1,1,6,
1,5,160,6,7,7,6,0,0,48,72,180,164,180,72,48,
4,5,5,6,1,2,224,16,208,0,240,7,5,5,6,255,
0,54,108,216,108,54,5,3,3,6,0,2,248,8,8,5,
1,1,6,0,3,248,7,7,7,6,0,0,56,68,186,178,
170,68,56,4,1,1,6,0,5,240,4,3,3,6,0,4,
96,144,96,5,6,6,6,0,0,32,32,248,32,0,248,3,
4,4,6,1,3,96,160,64,224,3,4,4,6,1,3,224,
64,32,192,2,2,2,6,2,5,64,128,7,7,7,6,255,
254,236,108,108,108,126,64,64,6,8,8,6,0,255,124,168,
168,104,40,40,40,108,2,1,1,6,1,3,192,3,3,3,
6,1,254,64,32,192,3,4,4,6,1,3,192,64,64,224,
4,5,5,6,1,2,96,144,96,0,240,7,5,5,6,255,
0,216,108,54,108,216,7,7,7,6,255,0,192,68,72,244,
44,94,4,7,7,7,6,255,0,192,68,72,246,42,68,14,
7,7,7,6,255,0,224,68,40,212,44,94,4,5,7,7,
6,0,254,48,0,48,48,96,200,112,7,9,9,6,255,0,
32,16,0,120,56,40,124,108,238,7,9,9,6,255,0,16,
32,0,120,56,40,124,108,238,7,9,9,6,255,0,16,40,
0,120,56,40,124,108,238,7,9,9,6,255,0,52,72,0,
120,56,40,124,108,238,7,8,8,6,255,0,40,0,120,56,
40,124,108,238,7,9,9,6,255,0,48,72,48,120,56,40,
124,108,238,7,6,6,6,255,0,126,58,108,120,218,222,5,
8,8,6,0,254,120,216,192,192,216,112,16,96,6,9,9,
6,255,0,32,16,0,252,100,120,96,108,252,6,9,9,6,
255,0,16,32,0,252,100,120,96,108,252,6,9,9,6,255,
0,32,80,0,252,100,120,96,108,252,6,8,8,6,255,0,
80,0,252,100,120,96,108,252,4,9,9,6,0,0,64,32,
0,240,96,96,96,96,240,4,9,9,6,0,0,32,64,0,
240,96,96,96,96,240,4,9,9,6,0,0,64,160,0,240,
96,96,96,96,240,4,8,8,6,0,0,160,0,240,96,96,
96,96,240,6,6,6,6,255,0,248,108,244,100,108,248,7,
9,9,6,255,0,52,72,0,238,100,116,124,108,236,5,9,
9,6,0,0,64,32,0,112,216,216,216,216,112,5,9,9,
6,0,0,32,64,0,112,216,216,216,216,112,5,9,9,6,
0,0,32,80,0,112,216,216,216,216,112,5,9,9,6,0,
0,104,144,0,112,216,216,216,216,112,5,8,8,6,0,0,
80,0,112,216,216,216,216,112,5,5,5,6,0,1,136,80,
32,80,136,7,6,6,6,255,0,58,108,124,108,108,184,7,
9,9,6,255,0,32,16,0,238,108,108,108,108,56,7,9,
9,6,255,0,8,16,0,238,108,108,108,108,56,7,9,9,
6,255,0,16,40,0,238,108,108,108,108,56,7,8,8,6,
255,0,40,0,238,108,108,108,108,56,7,9,9,6,255,0,
4,8,0,230,102,60,24,24,60,6,6,6,6,255,0,224,
120,108,108,120,224,7,6,6,6,255,0,56,104,124,102,102,
236,6,8,8,6,0,0,32,16,0,112,152,120,216,252,6,
8,8,6,0,0,16,32,0,112,152,120,216,252,6,8,8,
6,0,0,32,80,0,112,152,120,216,252,6,8,8,6,0,
0,104,144,0,112,152,120,216,252,6,7,7,6,0,0,80,
0,112,152,120,216,252,6,9,9,6,0,0,48,72,48,0,
112,152,120,216,252,6,5,5,6,255,0,108,180,124,176,220,
5,7,7,6,0,254,112,216,192,216,112,16,96,5,8,8,
6,0,0,64,32,0,112,216,248,192,120,5,8,8,6,0,
0,32,64,0,112,216,248,192,120,5,8,8,6,0,0,32,
80,0,112,216,248,192,120,5,7,7,6,0,0,80,0,112,
216,248,192,120,6,8,8,6,0,0,32,16,0,112,48,48,
48,252,6,8,8,6,0,0,16,32,0,112,48,48,48,252,
6,8,8,6,0,0,32,80,0,112,48,48,48,252,6,7,
7,6,0,0,80,0,112,48,48,48,252,5,8,8,6,0,
0,208,96,176,120,216,216,216,112,7,8,8,6,255,0,52,
72,0,216,108,108,108,110,5,8,8,6,0,0,64,32,0,
112,216,216,216,112,5,8,8,6,0,0,32,64,0,112,216,
216,216,112,5,8,8,6,0,0,32,80,0,112,216,216,216,
112,5,8,8,6,0,0,104,144,0,112,216,216,216,112,5,
7,7,6,0,0,80,0,112,216,216,216,112,5,5,5,6,
0,1,32,0,248,0,32,5,7,7,6,0,255,8,112,216,
248,216,112,128,7,8,8,6,255,0,32,16,0,236,108,108,
108,62,7,8,8,6,255,0,16,32,0,236,108,108,108,62,
7,8,8,6,255,0,16,40,0,236,108,108,108,62,7,7,
7,6,255,0,40,0,236,108,108,108,62,7,10,10,6,255,
254,8,16,0,238,108,108,40,56,48,240,6,9,9,6,255,
254,224,96,120,108,108,108,120,96,240,7,9,9,6,255,254,
40,0,238,108,108,40,56,48,240};
| 5,464 |
4,535 |
<filename>pmlc/dialect/eltwise/ir/ops.h
// Copyright 2019, Intel Corporation
#pragma once
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/StandardTypes.h"
#include "pmlc/dialect/eltwise/ir/types.h"
#include "pmlc/dialect/eltwise/ir/util.h"
#include "pmlc/util/interfaces.h"
namespace pmlc::dialect::eltwise {
using llvm::SmallVector;
using mlir::AbstractOperation;
using mlir::APInt;
using mlir::ArrayRef;
using mlir::Attribute;
using mlir::Builder;
using mlir::FloatAttr;
using mlir::FloatType;
using mlir::IndexType;
using mlir::IntegerAttr;
using mlir::IntegerType;
using mlir::Location;
using mlir::LogicalResult;
using mlir::MLIRContext;
using mlir::NamedAttribute;
using mlir::Op;
using mlir::OpBuilder;
using mlir::Operation;
using mlir::OperationState;
using mlir::OpFoldResult;
using mlir::OpInterface;
using mlir::OwningRewritePatternList;
using mlir::RankedTensorType;
using mlir::ShapedType;
using mlir::StringRef;
using mlir::TensorType;
using mlir::Type;
using mlir::TypeAttr;
using mlir::Value;
using mlir::ValueRange;
using mlir::VectorType;
using util::GenericBuilder;
namespace OpTrait = mlir::OpTrait;
#include "pmlc/dialect/eltwise/ir/interfaces.h.inc"
#define GET_OP_CLASSES
#include "pmlc/dialect/eltwise/ir/ops.h.inc"
} // namespace pmlc::dialect::eltwise
| 513 |
435 |
{
"copyright_text": "Standard YouTube License",
"description": "Recorrido pr\u00e1ctico a trav\u00e9s de las opciones que Python ofrece para la optimizaci\u00f3n de c\u00f3digo n\u00famerico de baja fricci\u00f3n (esto es, sin dejar de escribir Python), incluyendo: agotando CPython, NumPy, Numba, Parakeet, Cython, Theano, PyPy/NumPyPy, Pyston y Blaze.",
"duration": 2704,
"language": "spa",
"recorded": "2015-04-06",
"related_urls": [],
"speakers": [
"<NAME>"
],
"tags": [
"NumPy",
"Numba",
"Parakeet",
"Cython",
"Theano",
"PyPy",
"NumPyPy",
"Pyston",
"Blaze"
],
"thumbnail_url": "https://i.ytimg.com/vi/1AamKu-FwCU/maxresdefault.jpg",
"title": "Speed without drag",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=1AamKu-FwCU"
}
]
}
| 394 |
1,751 |
/*-
* #%L
* rapidoid-http-server
* %%
* Copyright (C) 2014 - 2020 <NAME> and contributors
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package org.rapidoid.http.handler;
import org.rapidoid.annotation.Authors;
import org.rapidoid.annotation.Since;
import org.rapidoid.http.HttpStatus;
import org.rapidoid.http.HttpUtils;
import org.rapidoid.http.Req;
import org.rapidoid.http.impl.RouteOptions;
import org.rapidoid.http.impl.lowlevel.HttpIO;
import org.rapidoid.io.Res;
import org.rapidoid.net.abstracts.Channel;
import org.rapidoid.u.U;
@Authors("<NAME>")
@Since("4.3.0")
public class ResourceHttpHandler extends AbstractHttpHandler {
private final Res resource;
public ResourceHttpHandler(RouteOptions options, Res resource) {
super(options);
this.resource = resource;
}
@Override
public HttpStatus handle(Channel ctx, boolean isKeepAlive, Req req) {
byte[] bytes = resource.getBytesOrNull();
if (bytes != null) {
HttpIO.INSTANCE.write200(HttpUtils.maybe(req), ctx, isKeepAlive, contentType, bytes);
return HttpStatus.DONE;
} else {
return HttpStatus.NOT_FOUND;
}
}
@Override
public String toString() {
return contentTypeInfo(U.frmt("() -> (resource %s)", resource.getName()));
}
}
| 656 |
760 |
<reponame>hexiangyuan/react-native-multibundler
package com.facebook.react.bridge;
import android.content.Context;
public class BridgeUtil {
public static void loadScriptFromAsset(Context context,
CatalystInstance instance,
String assetName,boolean loadSynchronously) {
String source = assetName;
if(!assetName.startsWith("assets://")) {
source = "assets://" + assetName;
}
((CatalystInstanceImpl)instance).loadScriptFromAssets(context.getAssets(), source,loadSynchronously);
}
public static void loadScriptFromFile(String fileName,
CatalystInstance instance,
String sourceUrl,boolean loadSynchronously) {
((CatalystInstanceImpl)instance).loadScriptFromFile(fileName, sourceUrl,loadSynchronously);
}
}
| 401 |
335 |
<reponame>Safal08/Hacktoberfest-1
{
"word": "Meh",
"definitions": [
"Uninspiring; unexceptional.",
"Unenthusiastic; apathetic."
],
"parts-of-speech": "Adjective"
}
| 96 |
1,658 |
package org.rocksdb;
import org.rocksdb.Cache;
/**
* Java wrapper over native write_buffer_manager class
*/
public class WriteBufferManager extends RocksObject {
static {
RocksDB.loadLibrary();
}
/**
* Construct a new instance of WriteBufferManager.
*
* Check <a href="https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager">
* https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager</a>
* for more details on when to use it
*
* @param bufferSizeBytes buffer size(in bytes) to use for native write_buffer_manager
* @param cache cache whose memory should be bounded by this write buffer manager
*/
public WriteBufferManager(final long bufferSizeBytes, final Cache cache){
super(newWriteBufferManager(bufferSizeBytes, cache.nativeHandle_));
}
private native static long newWriteBufferManager(final long bufferSizeBytes, final long cacheHandle);
@Override
protected native void disposeInternal(final long handle);
}
| 285 |
1,104 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.search.SolrIndexSearcher;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
/**
*/
class RunExecutableListener extends AbstractSolrEventListener {
public RunExecutableListener(SolrCore core) {
super(core);
}
protected String[] cmd;
protected File dir;
protected String[] envp;
protected boolean wait=true;
@Override
public void init(NamedList args) {
super.init(args);
List cmdlist = new ArrayList();
cmdlist.add(args.get("exe"));
List lst = (List)args.get("args");
if (lst != null) cmdlist.addAll(lst);
cmd = (String[])cmdlist.toArray(new String[cmdlist.size()]);
lst = (List)args.get("env");
if (lst != null) {
envp = (String[])lst.toArray(new String[lst.size()]);
}
String str = (String)args.get("dir");
if (str==null || str.equals("") || str.equals(".") || str.equals("./")) {
dir = null;
} else {
dir = new File(str);
}
if ("false".equals(args.get("wait")) || Boolean.FALSE.equals(args.get("wait"))) wait=false;
}
/**
* External executable listener.
*
* @param callback Unused (As of solr 1.4-dev)
* @return Error code indicating if the command has executed successfully. <br />
* 0 , indicates normal termination.<br />
* non-zero , otherwise.
*/
protected int exec(String callback) {
int ret = 0;
try {
boolean doLog = log.isDebugEnabled();
if (doLog) {
log.debug("About to exec " + cmd[0]);
}
Process proc = Runtime.getRuntime().exec(cmd, envp ,dir);
if (wait) {
try {
ret = proc.waitFor();
} catch (InterruptedException e) {
SolrException.log(log,e);
ret = INVALID_PROCESS_RETURN_CODE;
}
}
if (wait && doLog) {
log.debug("Executable " + cmd[0] + " returned " + ret);
}
} catch (IOException e) {
// don't throw exception, just log it...
SolrException.log(log,e);
ret = INVALID_PROCESS_RETURN_CODE;
}
return ret;
}
@Override
public void postCommit() {
// anything generic need to be passed to the external program?
// the directory of the index? the command that caused it to be
// invoked? the version of the index?
exec("postCommit");
}
@Override
public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
exec("newSearcher");
}
/** Non-zero value for an invalid return code **/
private static int INVALID_PROCESS_RETURN_CODE = -1;
}
| 1,254 |
8,629 |
#pragma once
#include <Core/Names.h>
#include <Core/NamesAndTypes.h>
#include <Core/SettingsEnums.h>
#include <Parsers/ASTTablesInSelectQuery.h>
#include <Interpreters/IJoin.h>
#include <Interpreters/join_common.h>
#include <Interpreters/asof.h>
#include <QueryPipeline/SizeLimits.h>
#include <DataTypes/getLeastSupertype.h>
#include <Storages/IStorage_fwd.h>
#include <Common/Exception.h>
#include <Parsers/IAST_fwd.h>
#include <cstddef>
#include <unordered_map>
#include <utility>
#include <memory>
#include <base/types.h>
#include <Common/logger_useful.h>
namespace DB
{
class Context;
class ASTSelectQuery;
struct DatabaseAndTableWithAlias;
class Block;
class DictionaryReader;
class StorageJoin;
class StorageDictionary;
struct ColumnWithTypeAndName;
using ColumnsWithTypeAndName = std::vector<ColumnWithTypeAndName>;
struct Settings;
class IVolume;
using VolumePtr = std::shared_ptr<IVolume>;
enum class JoinTableSide
{
Left,
Right
};
class TableJoin
{
public:
using NameToTypeMap = std::unordered_map<String, DataTypePtr>;
/// Corresponds to one disjunct
struct JoinOnClause
{
Names key_names_left;
Names key_names_right; /// Duplicating right key names are qualified.
ASTPtr on_filter_condition_left;
ASTPtr on_filter_condition_right;
JoinOnClause() = default;
std::pair<String, String> condColumnNames() const
{
std::pair<String, String> res;
if (on_filter_condition_left)
res.first = on_filter_condition_left->getColumnName();
if (on_filter_condition_right)
res.second = on_filter_condition_right->getColumnName();
return res;
}
size_t keysCount() const
{
assert(key_names_left.size() == key_names_right.size());
return key_names_right.size();
}
String formatDebug() const
{
return fmt::format("Left keys: [{}] Right keys [{}] Condition columns: '{}', '{}'",
fmt::join(key_names_left, ", "), fmt::join(key_names_right, ", "),
condColumnNames().first, condColumnNames().second);
}
};
using Clauses = std::vector<JoinOnClause>;
private:
/** Query of the form `SELECT expr(x) AS k FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k`
* The join is made by column k.
* During the JOIN,
* - in the "right" table, it will be available by alias `k`, since `Project` action for the subquery was executed.
* - in the "left" table, it will be accessible by the name `expr(x)`, since `Project` action has not been executed yet.
* You must remember both of these options.
*
* Query of the form `SELECT ... from t1 ANY LEFT JOIN (SELECT ... from t2) ON expr(t1 columns) = expr(t2 columns)`
* to the subquery will be added expression `expr(t2 columns)`.
* It's possible to use name `expr(t2 columns)`.
*/
friend class TreeRewriter;
SizeLimits size_limits;
const size_t default_max_bytes = 0;
const bool join_use_nulls = false;
const size_t max_joined_block_rows = 0;
JoinAlgorithm join_algorithm = JoinAlgorithm::AUTO;
const size_t partial_merge_join_rows_in_right_blocks = 0;
const size_t partial_merge_join_left_table_buffer_bytes = 0;
const size_t max_files_to_merge = 0;
const String temporary_files_codec = "LZ4";
/// the limit has no technical reasons, it supposed to improve safety
const size_t MAX_DISJUNCTS = 16; /// NOLINT
ASTs key_asts_left;
ASTs key_asts_right;
Clauses clauses;
ASTTableJoin table_join;
ASOF::Inequality asof_inequality = ASOF::Inequality::GreaterOrEquals;
/// All columns which can be read from joined table. Duplicating names are qualified.
NamesAndTypesList columns_from_joined_table;
/// Columns will be added to block by JOIN.
/// It's a subset of columns_from_joined_table
/// Note: without corrected Nullability or type, see correctedColumnsAddedByJoin
NamesAndTypesList columns_added_by_join;
/// Target type to convert key columns before join
NameToTypeMap left_type_map;
NameToTypeMap right_type_map;
/// Name -> original name. Names are the same as in columns_from_joined_table list.
std::unordered_map<String, String> original_names;
/// Original name -> name. Only renamed columns.
std::unordered_map<String, String> renames;
VolumePtr tmp_volume;
std::shared_ptr<StorageJoin> right_storage_join;
std::shared_ptr<StorageDictionary> right_storage_dictionary;
std::shared_ptr<DictionaryReader> dictionary_reader;
Names requiredJoinedNames() const;
/// Create converting actions and change key column names if required
ActionsDAGPtr applyKeyConvertToTable(
const ColumnsWithTypeAndName & cols_src, const NameToTypeMap & type_mapping, NameToNameMap & key_column_rename,
bool make_nullable) const;
void addKey(const String & left_name, const String & right_name, const ASTPtr & left_ast, const ASTPtr & right_ast = nullptr);
void assertHasOneOnExpr() const;
/// Calculates common supertypes for corresponding join key columns.
template <typename LeftNamesAndTypes, typename RightNamesAndTypes>
void inferJoinKeyCommonType(const LeftNamesAndTypes & left, const RightNamesAndTypes & right, bool allow_right);
NamesAndTypesList correctedColumnsAddedByJoin() const;
public:
TableJoin() = default;
TableJoin(const Settings & settings, VolumePtr tmp_volume_);
/// for StorageJoin
TableJoin(SizeLimits limits, bool use_nulls, ASTTableJoin::Kind kind, ASTTableJoin::Strictness strictness,
const Names & key_names_right)
: size_limits(limits)
, default_max_bytes(0)
, join_use_nulls(use_nulls)
, join_algorithm(JoinAlgorithm::HASH)
{
clauses.emplace_back().key_names_right = key_names_right;
table_join.kind = kind;
table_join.strictness = strictness;
}
ASTTableJoin::Kind kind() const { return table_join.kind; }
ASTTableJoin::Strictness strictness() const { return table_join.strictness; }
bool sameStrictnessAndKind(ASTTableJoin::Strictness, ASTTableJoin::Kind) const;
const SizeLimits & sizeLimits() const { return size_limits; }
VolumePtr getTemporaryVolume() { return tmp_volume; }
bool allowMergeJoin() const;
bool preferMergeJoin() const { return join_algorithm == JoinAlgorithm::PREFER_PARTIAL_MERGE; }
bool forceMergeJoin() const { return join_algorithm == JoinAlgorithm::PARTIAL_MERGE; }
bool allowParallelHashJoin() const;
bool forceHashJoin() const
{
/// HashJoin always used for DictJoin
return dictionary_reader || join_algorithm == JoinAlgorithm::HASH || join_algorithm == JoinAlgorithm::PARALLEL_HASH;
}
bool forceNullableRight() const { return join_use_nulls && isLeftOrFull(table_join.kind); }
bool forceNullableLeft() const { return join_use_nulls && isRightOrFull(table_join.kind); }
size_t defaultMaxBytes() const { return default_max_bytes; }
size_t maxJoinedBlockRows() const { return max_joined_block_rows; }
size_t maxRowsInRightBlock() const { return partial_merge_join_rows_in_right_blocks; }
size_t maxBytesInLeftBuffer() const { return partial_merge_join_left_table_buffer_bytes; }
size_t maxFilesToMerge() const { return max_files_to_merge; }
const String & temporaryFilesCodec() const { return temporary_files_codec; }
bool needStreamWithNonJoinedRows() const;
bool oneDisjunct() const;
JoinOnClause & getOnlyClause() { assertHasOneOnExpr(); return clauses[0]; }
const JoinOnClause & getOnlyClause() const { assertHasOneOnExpr(); return clauses[0]; }
std::vector<JoinOnClause> & getClauses() { return clauses; }
const std::vector<JoinOnClause> & getClauses() const { return clauses; }
Names getAllNames(JoinTableSide side) const;
void resetCollected();
void addUsingKey(const ASTPtr & ast);
void addDisjunct();
void addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast);
/* Conditions for left/right table from JOIN ON section.
*
* Conditions for left and right tables stored separately and united with 'and' function into one column.
* For example for query:
* SELECT ... JOIN ... ON t1.id == t2.id AND expr11(t1) AND expr21(t2) AND expr12(t1) AND expr22(t2)
*
* We will build two new ASTs: `expr11(t1) AND expr12(t1)`, `expr21(t2) AND expr22(t2)`
* Such columns will be added and calculated for left and right tables respectively.
* Only rows where conditions are met (where new columns have non-zero value) will be joined.
*
* NOTE: non-equi condition containing columns from different tables (like `... ON t1.id = t2.id AND t1.val > t2.val)
* doesn't supported yet, it can be added later.
*/
void addJoinCondition(const ASTPtr & ast, bool is_left);
bool hasUsing() const { return table_join.using_expression_list != nullptr; }
bool hasOn() const { return table_join.on_expression != nullptr; }
NamesWithAliases getNamesWithAliases(const NameSet & required_columns) const;
NamesWithAliases getRequiredColumns(const Block & sample, const Names & action_required_columns) const;
void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix);
size_t rightKeyInclusion(const String & name) const;
NameSet requiredRightKeys() const;
bool leftBecomeNullable(const DataTypePtr & column_type) const;
bool rightBecomeNullable(const DataTypePtr & column_type) const;
void addJoinedColumn(const NameAndTypePair & joined_column);
template <typename TColumns>
void addJoinedColumnsAndCorrectTypesImpl(TColumns & left_columns, bool correct_nullability);
void addJoinedColumnsAndCorrectTypes(NamesAndTypesList & left_columns, bool correct_nullability);
void addJoinedColumnsAndCorrectTypes(ColumnsWithTypeAndName & left_columns, bool correct_nullability);
/// Calculate converting actions, rename key columns in required
/// For `USING` join we will convert key columns inplace and affect into types in the result table
/// For `JOIN ON` we will create new columns with converted keys to join by.
std::pair<ActionsDAGPtr, ActionsDAGPtr>
createConvertingActions(const ColumnsWithTypeAndName & left_sample_columns, const ColumnsWithTypeAndName & right_sample_columns);
void setAsofInequality(ASOF::Inequality inequality) { asof_inequality = inequality; }
ASOF::Inequality getAsofInequality() { return asof_inequality; }
ASTPtr leftKeysList() const;
ASTPtr rightKeysList() const; /// For ON syntax only
const NamesAndTypesList & columnsFromJoinedTable() const { return columns_from_joined_table; }
Names columnsAddedByJoin() const
{
Names res;
for (const auto & col : columns_added_by_join)
res.push_back(col.name);
return res;
}
/// StorageJoin overrides key names (cause of different names qualification)
void setRightKeys(const Names & keys) { getOnlyClause().key_names_right = keys; }
Block getRequiredRightKeys(const Block & right_table_keys, std::vector<String> & keys_sources) const;
String renamedRightColumnName(const String & name) const;
void resetKeys();
void resetToCross();
std::unordered_map<String, String> leftToRightKeyRemap() const;
void setStorageJoin(std::shared_ptr<StorageJoin> storage);
void setStorageJoin(std::shared_ptr<StorageDictionary> storage);
std::shared_ptr<StorageJoin> getStorageJoin() { return right_storage_join; }
bool tryInitDictJoin(const Block & sample_block, ContextPtr context);
bool isSpecialStorage() const { return right_storage_dictionary || right_storage_join; }
const DictionaryReader * getDictionaryReader() const { return dictionary_reader.get(); }
};
}
| 4,260 |
12,278 |
/* Copyright 2016 <NAME>.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See http://www.boost.org/libs/poly_collection for library home page.
*/
#include "test_algorithm1.hpp"
#include "any_types.hpp"
#include "test_algorithm_impl.hpp"
void test_algorithm1()
{
test_algorithm<
any_types::collection,jammed_auto_increment,any_types::to_int,
any_types::t1,any_types::t2,any_types::t3,
any_types::t4,any_types::t5>();
}
| 215 |
3,262 |
package com.tencent.angel.graph.embedding.node2vec;
import com.tencent.angel.ml.matrix.psf.update.base.PartitionUpdateParam;
import com.tencent.angel.ml.matrix.psf.update.base.UpdateFunc;
import com.tencent.angel.ps.storage.matrix.ServerMatrix;
import com.tencent.angel.ps.storage.partition.RowBasedPartition;
import com.tencent.angel.ps.storage.vector.ServerLongAnyRow;
import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
import it.unimi.dsi.fastutil.objects.ObjectIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class InitAliasTable extends UpdateFunc {
private static final Logger LOG = LoggerFactory.getLogger(InitAliasTable.class);
/**
* Create a new UpdateParam
*/
public InitAliasTable(InitAliasTableParam param) {
super(param);
}
public InitAliasTable() {
this(null);
}
@Override
public void partitionUpdate(PartitionUpdateParam partParam) {
InitAliasTablePartParam param = (InitAliasTablePartParam) partParam;
ServerMatrix matrix = psContext.getMatrixStorageManager().getMatrix(partParam.getMatrixId());
RowBasedPartition part = (RowBasedPartition) matrix
.getPartition(partParam.getPartKey().getPartitionId());
ServerLongAnyRow row = (ServerLongAnyRow) part.getRow(0);
ObjectIterator<Long2ObjectMap.Entry<AliasElement>> iter = param.getNodeId2Neighbors().long2ObjectEntrySet().iterator();
row.startWrite();
try {
while (iter.hasNext()) {
Long2ObjectMap.Entry<AliasElement> entry = iter.next();
AliasElement element = entry.getValue();
if (element == null) {
row.set(entry.getLongKey(), null);
} else {
row.set(entry.getLongKey(), new AliasElement(element.getNeighborIds(), element.getAccept(), element.getAlias()));
}
}
} finally {
row.endWrite();
}
}
}
| 815 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.