max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,108 | import pytest
import django
from raven.contrib.django.resolver import RouteResolver
try:
from django.conf.urls import url, include
except ImportError:
# for Django version less than 1.4
from django.conf.urls.defaults import url, include
@pytest.fixture
def route_resolver():
return RouteResolver()
@pytest.fixture
def urlconf():
if django.VERSION < (1, 9):
included_url_conf = (
url(r'^foo/bar/(?P<param>[\w]+)', lambda x: ''),
), '', ''
else:
included_url_conf = ((
url(r'^foo/bar/(?P<param>[\w]+)', lambda x: ''),
), '')
if django.VERSION >= (2, 0):
from django.urls import path, re_path
example_url_conf = (
re_path(r'^api/(?P<project_id>[\w_-]+)/store/$', lambda x: ''),
re_path(r'^report/', lambda x: ''),
re_path(r'^example/', include(included_url_conf)),
path('api/v2/<int:project_id>/store/', lambda x: '')
)
return example_url_conf
| 473 |
4,895 | import superimport
import daft
# Colors.
p_color = {"ec": "#46a546"}
s_color = {"ec": "#f89406"}
r_color = {"ec": "#dc143c"}
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
pgm = daft.PGM(shape=(12, 5), origin=(-1,0))
pgm.add_node("Ax", r"$\theta^h$", 2.5, 3, observed=False)
for i in range(4):
pgm.add_node("x{}".format(i), r"$x_{}$".format(i), i+1, 1, observed=True)
pgm.add_node("hx{}".format(i), r"$h^x_{}$".format(i), i + 1, 2, observed=False)
pgm.add_edge("Ax", "hx{}".format(i))
pgm.add_edge("hx{}".format(i), "x{}".format(i))
if i>0:
pgm.add_edge("hx{}".format(i - 1), "hx{}".format(i))
pgm.add_node("Ay", r"$\theta^h$", 7.5, 3, observed=False)
delta = 5
for i in range(4):
pgm.add_node("y{}".format(i), r"$y_{}$".format(i), i+1+delta, 1, observed=True)
pgm.add_node("hy{}".format(i), r"$h^y_{}$".format(i), i + 1+delta, 2, observed=False)
pgm.add_edge("Ay", "hy{}".format(i))
pgm.add_edge("hy{}".format(i), "y{}".format(i))
if i>0:
pgm.add_edge("hy{}".format(i - 1), "hy{}".format(i))
pgm.add_node("z", r"$z$", 5, 4, observed=False)
pgm.add_edge("z", "Ax")
pgm.add_edge("z", "Ay")
pgm.add_node("thetax", r"$\theta^x$", 0, 1, observed=False)
pgm.add_node("thetay", r"$\theta^y$", 10, 1, observed=False)
pgm.add_edge("thetax", "x0")
pgm.add_edge("thetay", "y3")
pgm.render()
pgm.savefig('../figures/visual_spelling_hmm_pgm.pdf')
pgm.show() | 753 |
952 | <filename>bucket/apimtemplate.json
{
"version": "0.7",
"description": "Azure API Management DevOps Resource Kit",
"homepage": "https://github.com/Azure/azure-api-management-devops-resource-kit",
"license": "MIT",
"depends": "azure-cli",
"architecture": {
"64bit": {
"url": "https://github.com/Azure/azure-api-management-devops-resource-kit/releases/download/v0.7/reskit0.7.zip",
"hash": "0bdab42015beb6220c1c015484f6ae07be49a8ca4cc0de95108f747daae6a8e1",
"extract_dir": "win64"
}
},
"bin": "apimtemplate.exe",
"checkver": {
"url": "https://github.com/Azure/azure-api-management-devops-resource-kit/releases",
"regex": "tag/v([\\w.-]+)"
},
"autoupdate": {
"architecture": {
"64bit": {
"url": "https://github.com/Azure/azure-api-management-devops-resource-kit/releases/download/v$version/reskit$version.zip"
}
}
}
}
| 513 |
1,688 | //
// SMState.h
// CarpoolBusiness
//
// Created by DaiMing on 2017/11/21.
//
#import <Foundation/Foundation.h>
@interface SMState : NSObject
@end
| 60 |
5,279 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.metrics;
import java.io.Serializable;
import org.apache.beam.sdk.annotations.Internal;
/**
* Implementation of {@link Distribution} that delegates to the instance for the current context.
*/
@Internal
public class DelegatingDistribution implements Metric, Distribution, Serializable {
private final MetricName name;
private final boolean processWideContainer;
public DelegatingDistribution(MetricName name) {
this(name, false);
}
public DelegatingDistribution(MetricName name, boolean processWideContainer) {
this.name = name;
this.processWideContainer = processWideContainer;
}
@Override
public void update(long value) {
MetricsContainer container =
this.processWideContainer
? MetricsEnvironment.getProcessWideContainer()
: MetricsEnvironment.getCurrentContainer();
if (container != null) {
container.getDistribution(name).update(value);
}
}
@Override
public void update(long sum, long count, long min, long max) {
MetricsContainer container =
this.processWideContainer
? MetricsEnvironment.getProcessWideContainer()
: MetricsEnvironment.getCurrentContainer();
if (container != null) {
container.getDistribution(name).update(sum, count, min, max);
}
}
@Override
public MetricName getName() {
return name;
}
}
| 645 |
575 | <reponame>Ron423c/chromium
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_UNIFIED_CONSENT_UNIFIED_CONSENT_SERVICE_H_
#define COMPONENTS_UNIFIED_CONSENT_UNIFIED_CONSENT_SERVICE_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
#include "base/observer_list.h"
#include "base/values.h"
#include "components/keyed_service/core/keyed_service.h"
#include "components/prefs/pref_change_registrar.h"
#include "components/signin/public/identity_manager/identity_manager.h"
#include "components/sync/base/model_type.h"
#include "components/sync/driver/sync_service_observer.h"
#include "components/sync_preferences/pref_service_syncable_observer.h"
#include "components/unified_consent/unified_consent_metrics.h"
namespace user_prefs {
class PrefRegistrySyncable;
}
namespace sync_preferences {
class PrefServiceSyncable;
}
namespace syncer {
class SyncService;
}
namespace unified_consent {
enum class MigrationState : int {
kNotInitialized = 0,
kInProgressWaitForSyncInit = 1,
// Reserve space for other kInProgress* entries to be added here.
kCompleted = 10,
};
// A browser-context keyed service that is used to manage the user consent
// when UnifiedConsent feature is enabled.
//
// This service makes sure that UrlKeyedAnonymizedDataCollection is turned on
// during sync opt-in and turned off when the user opts out.
//
// During the advanced opt-in through settings, the changes the user makes to
// the service toggles(prefs) are applied after prefs start syncing. This is
// done to prevent changes the user makes during sync setup to be overridden by
// syncing down older changes.
class UnifiedConsentService
: public KeyedService,
public signin::IdentityManager::Observer,
public syncer::SyncServiceObserver,
public sync_preferences::PrefServiceSyncableObserver {
public:
// Initializes the service. The |service_pref_names| vector is used to track
// pref changes during the first sync setup.
UnifiedConsentService(sync_preferences::PrefServiceSyncable* pref_service,
signin::IdentityManager* identity_manager,
syncer::SyncService* sync_service,
const std::vector<std::string>& service_pref_names);
~UnifiedConsentService() override;
// Register the prefs used by this UnifiedConsentService.
static void RegisterPrefs(user_prefs::PrefRegistrySyncable* registry);
// Enables or disables URL-keyed anonymized data collection.
void SetUrlKeyedAnonymizedDataCollectionEnabled(bool enabled);
// KeyedService:
void Shutdown() override;
// IdentityManager::Observer:
void OnPrimaryAccountChanged(
const signin::PrimaryAccountChangeEvent& event) override;
private:
friend class UnifiedConsentServiceTest;
// syncer::SyncServiceObserver:
void OnStateChanged(syncer::SyncService* sync) override;
// sync_preferences::PrefServiceSyncableObserver:
void OnIsSyncingChanged() override;
// Helpers for observing changes in the service prefs.
void StartObservingServicePrefChanges();
void StopObservingServicePrefChanges();
void ServicePrefChanged(const std::string& name);
// Migration helpers.
MigrationState GetMigrationState();
void SetMigrationState(MigrationState migration_state);
// Called when the unified consent service is created.
void MigrateProfileToUnifiedConsent();
// Updates the settings preferences for the migration when the sync engine is
// initialized. When it is not, this function will be called again from
// |OnStateChanged| when the sync engine is initialized.
void UpdateSettingsForMigration();
sync_preferences::PrefServiceSyncable* pref_service_;
signin::IdentityManager* identity_manager_;
syncer::SyncService* sync_service_;
// Used for tracking the service pref states during the advanced sync opt-in.
const std::vector<std::string> service_pref_names_;
std::map<std::string, base::Value> service_pref_changes_;
PrefChangeRegistrar service_pref_change_registrar_;
DISALLOW_COPY_AND_ASSIGN(UnifiedConsentService);
};
} // namespace unified_consent
#endif // COMPONENTS_UNIFIED_CONSENT_UNIFIED_CONSENT_SERVICE_H_
| 1,346 |
330 | package chapter_10;
/**
* 只写一次的变量更好
*
* @author biezhi
* @date 2018/7/25
*/
public class Example6 {
public static final int NUM_THREADS = 10;
}
| 76 |
997 | <reponame>mkannwischer/PQClean<filename>crypto_sign/sphincs-haraka-192f-robust/aesni/sign.c<gh_stars>100-1000
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "address.h"
#include "api.h"
#include "fors.h"
#include "hash.h"
#include "hash_state.h"
#include "params.h"
#include "randombytes.h"
#include "thash.h"
#include "utils.h"
#include "wots.h"
/**
* Computes the leaf at a given address. First generates the WOTS key pair,
* then computes leaf by hashing horizontally.
*/
static void wots_gen_leaf(unsigned char *leaf, const unsigned char *sk_seed,
const unsigned char *pub_seed,
uint32_t addr_idx, const uint32_t tree_addr[8],
const hash_state *hash_state_seeded) {
unsigned char pk[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_WOTS_BYTES];
uint32_t wots_addr[8] = {0};
uint32_t wots_pk_addr[8] = {0};
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
wots_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_WOTS);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
wots_pk_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_WOTSPK);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_copy_subtree_addr(
wots_addr, tree_addr);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_keypair_addr(
wots_addr, addr_idx);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_wots_gen_pk(
pk, sk_seed, pub_seed, wots_addr, hash_state_seeded);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_copy_keypair_addr(
wots_pk_addr, wots_addr);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_thash_WOTS_LEN(
leaf, pk, pub_seed, wots_pk_addr, hash_state_seeded);
}
/*
* Returns the length of a secret key, in bytes
*/
size_t PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_secretkeybytes(void) {
return PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SECRETKEYBYTES;
}
/*
* Returns the length of a public key, in bytes
*/
size_t PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_publickeybytes(void) {
return PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES;
}
/*
* Returns the length of a signature, in bytes
*/
size_t PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_bytes(void) {
return PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_BYTES;
}
/*
* Returns the length of the seed required to generate a key pair, in bytes
*/
size_t PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_seedbytes(void) {
return PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SEEDBYTES;
}
/*
* Generates an SPX key pair given a seed of length
* Format sk: [SK_SEED || SK_PRF || PUB_SEED || root]
* Format pk: [PUB_SEED || root]
*/
int PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_seed_keypair(
uint8_t *pk, uint8_t *sk, const uint8_t *seed) {
/* We do not need the auth path in key generation, but it simplifies the
code to have just one treehash routine that computes both root and path
in one function. */
unsigned char auth_path[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N];
uint32_t top_tree_addr[8] = {0};
hash_state hash_state_seeded;
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_layer_addr(
top_tree_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_D - 1);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
top_tree_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_HASHTREE);
/* Initialize SK_SEED, SK_PRF and PUB_SEED from seed. */
memcpy(sk, seed, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SEEDBYTES);
memcpy(pk, sk + 2 * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N);
/* This hook allows the hash function instantiation to do whatever
preparation or computation it needs, based on the public seed. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_initialize_hash_function(&hash_state_seeded, pk, sk);
/* Compute root node of the top-most subtree. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_treehash_TREE_HEIGHT(
sk + 3 * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N, auth_path, sk, sk + 2 * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N, 0, 0,
wots_gen_leaf, top_tree_addr, &hash_state_seeded);
memcpy(pk + PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N, sk + 3 * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N);
return 0;
}
/*
* Generates an SPX key pair.
* Format sk: [SK_SEED || SK_PRF || PUB_SEED || root]
* Format pk: [PUB_SEED || root]
*/
int PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_keypair(
uint8_t *pk, uint8_t *sk) {
// guarantee alignment of pk
union {
__m128 _x[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES / 16];
uint8_t pk[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES];
} aligned_pk;
// guarantee alignment of sk
union {
__m128 _x[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SECRETKEYBYTES / 16];
uint8_t sk[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SECRETKEYBYTES];
} aligned_sk;
union {
__m128 _x[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SEEDBYTES / 16];
uint8_t seed[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SEEDBYTES];
} aligned_seed;
randombytes(aligned_seed.seed, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SEEDBYTES);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_seed_keypair(
aligned_pk.pk, aligned_sk.sk, aligned_seed.seed);
memcpy(pk, aligned_pk.pk, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES);
memcpy(sk, aligned_sk.sk, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SECRETKEYBYTES);
return 0;
}
/**
* Returns an array containing a detached signature.
*/
int PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_signature(
uint8_t *sig, size_t *siglen,
const uint8_t *m, size_t mlen, const uint8_t *sk) {
// guarantee alignment of sk
union {
__m128 *_x;
uint8_t sk[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SECRETKEYBYTES];
} aligned_sk;
memcpy(aligned_sk.sk, sk, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_SECRETKEYBYTES);
sk = aligned_sk.sk;
// guarantee alignment of sig
union {
__m128 *_x;
uint8_t sig[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES];
} aligned_sig;
uint8_t *orig_sig = sig;
sig = (uint8_t *)aligned_sig.sig;
const unsigned char *sk_seed = sk;
const unsigned char *sk_prf = sk + PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N;
const unsigned char *pk = sk + 2 * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N;
const unsigned char *pub_seed = pk;
unsigned char optrand[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N];
unsigned char mhash[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_FORS_MSG_BYTES];
unsigned char root[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N];
uint32_t i;
uint64_t tree;
uint32_t idx_leaf;
uint32_t wots_addr[8] = {0};
uint32_t tree_addr[8] = {0};
hash_state hash_state_seeded;
/* This hook allows the hash function instantiation to do whatever
preparation or computation it needs, based on the public seed. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_initialize_hash_function(
&hash_state_seeded,
pub_seed, sk_seed);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
wots_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_WOTS);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
tree_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_HASHTREE);
/* Optionally, signing can be made non-deterministic using optrand.
This can help counter side-channel attacks that would benefit from
getting a large number of traces when the signer uses the same nodes. */
randombytes(optrand, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N);
/* Compute the digest randomization value. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_gen_message_random(
sig, sk_prf, optrand, m, mlen, &hash_state_seeded);
/* Derive the message digest and leaf index from R, PK and M. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_hash_message(
mhash, &tree, &idx_leaf, sig, pk, m, mlen, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N;
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_tree_addr(wots_addr, tree);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_keypair_addr(
wots_addr, idx_leaf);
/* Sign the message hash using FORS. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_fors_sign(
sig, root, mhash, sk_seed, pub_seed, wots_addr, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_FORS_BYTES;
for (i = 0; i < PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_D; i++) {
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_layer_addr(tree_addr, i);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_tree_addr(tree_addr, tree);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_copy_subtree_addr(
wots_addr, tree_addr);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_keypair_addr(
wots_addr, idx_leaf);
/* Compute a WOTS signature. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_wots_sign(
sig, root, sk_seed, pub_seed, wots_addr, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_WOTS_BYTES;
/* Compute the authentication path for the used WOTS leaf. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_treehash_TREE_HEIGHT(
root, sig, sk_seed, pub_seed, idx_leaf, 0,
wots_gen_leaf, tree_addr, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N;
/* Update the indices for the next layer. */
idx_leaf = (tree & ((1 << PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT) - 1));
tree = tree >> PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT;
}
memcpy(orig_sig, aligned_sig.sig, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES);
*siglen = PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES;
return 0;
}
/**
* Verifies a detached signature and message under a given public key.
*/
int PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_verify(
const uint8_t *sig, size_t siglen,
const uint8_t *m, size_t mlen, const uint8_t *pk) {
// guarantee alignment of pk
union {
__m128 *_x;
uint8_t pk[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES];
} aligned_pk;
memcpy(aligned_pk.pk, pk, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES);
pk = aligned_pk.pk;
const unsigned char *pub_seed = pk;
const unsigned char *pub_root = pk + PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N;
unsigned char mhash[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_FORS_MSG_BYTES];
unsigned char wots_pk[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_WOTS_BYTES];
unsigned char root[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N];
unsigned char leaf[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N];
unsigned int i;
uint64_t tree;
uint32_t idx_leaf;
uint32_t wots_addr[8] = {0};
uint32_t tree_addr[8] = {0};
uint32_t wots_pk_addr[8] = {0};
hash_state hash_state_seeded;
if (siglen != PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES) {
return -1;
}
/* This hook allows the hash function instantiation to do whatever
preparation or computation it needs, based on the public seed. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_initialize_hash_function(
&hash_state_seeded,
pub_seed, NULL);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
wots_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_WOTS);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
tree_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_HASHTREE);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_type(
wots_pk_addr, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_ADDR_TYPE_WOTSPK);
/* Derive the message digest and leaf index from R || PK || M. */
/* The additional PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N is a result of the hash domain separator. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_hash_message(
mhash, &tree, &idx_leaf, sig, pk, m, mlen, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N;
/* Layer correctly defaults to 0, so no need to set_layer_addr */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_tree_addr(wots_addr, tree);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_keypair_addr(
wots_addr, idx_leaf);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_fors_pk_from_sig(
root, sig, mhash, pub_seed, wots_addr, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_FORS_BYTES;
/* For each subtree.. */
for (i = 0; i < PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_D; i++) {
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_layer_addr(tree_addr, i);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_tree_addr(tree_addr, tree);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_copy_subtree_addr(
wots_addr, tree_addr);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_set_keypair_addr(
wots_addr, idx_leaf);
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_copy_keypair_addr(
wots_pk_addr, wots_addr);
/* The WOTS public key is only correct if the signature was correct. */
/* Initially, root is the FORS pk, but on subsequent iterations it is
the root of the subtree below the currently processed subtree. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_wots_pk_from_sig(
wots_pk, sig, root, pub_seed, wots_addr, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_WOTS_BYTES;
/* Compute the leaf node using the WOTS public key. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_thash_WOTS_LEN(
leaf, wots_pk, pub_seed, wots_pk_addr, &hash_state_seeded);
/* Compute the root node of this subtree. */
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_compute_root(
root, leaf, idx_leaf, 0, sig, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT,
pub_seed, tree_addr, &hash_state_seeded);
sig += PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT * PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N;
/* Update the indices for the next layer. */
idx_leaf = (tree & ((1 << PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT) - 1));
tree = tree >> PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_TREE_HEIGHT;
}
/* Check if the root node equals the root node in the public key. */
if (memcmp(root, pub_root, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_N) != 0) {
return -1;
}
return 0;
}
/**
* Returns an array containing the signature followed by the message.
*/
int PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign(
uint8_t *sm, size_t *smlen,
const uint8_t *m, size_t mlen, const uint8_t *sk) {
size_t siglen;
PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_signature(
sm, &siglen, m, mlen, sk);
memmove(sm + PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES, m, mlen);
*smlen = siglen + mlen;
return 0;
}
/**
* Verifies a given signature-message pair under a given public key.
*/
int PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_open(
uint8_t *m, size_t *mlen,
const uint8_t *sm, size_t smlen, const uint8_t *pk) {
// guarantee alignment of pk
union {
__m128 *_x;
uint8_t pk[PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES];
} aligned_pk;
memcpy(aligned_pk.pk, pk, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_CRYPTO_PUBLICKEYBYTES);
pk = aligned_pk.pk;
/* The API caller does not necessarily know what size a signature should be
but SPHINCS+ signatures are always exactly PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES. */
if (smlen < PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES) {
memset(m, 0, smlen);
*mlen = 0;
return -1;
}
*mlen = smlen - PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES;
if (PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_crypto_sign_verify(
sm, PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES, sm + PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES, *mlen, pk)) {
memset(m, 0, smlen);
*mlen = 0;
return -1;
}
/* If verification was successful, move the message to the right place. */
memmove(m, sm + PQCLEAN_SPHINCSHARAKA192FROBUST_AESNI_BYTES, *mlen);
return 0;
}
| 8,184 |
333 | <reponame>JustinKyleJames/irods
#include "rsModDataObjMeta.hpp"
#include "modDataObjMeta.h"
#include "icatHighLevelRoutines.hpp"
#include "objMetaOpr.hpp"
#include "dataObjOpr.hpp"
#include "miscServerFunct.hpp"
#include "irods_file_object.hpp"
#include "irods_stacktrace.hpp"
#include "irods_configuration_keywords.hpp"
#include "key_value_proxy.hpp"
#include "replica_state_table.hpp"
#include "boost/format.hpp"
#include <cstring>
namespace
{
namespace rst = irods::replica_state_table;
} // anonymous namespace
int _call_file_modified_for_modification(
rsComm_t* rsComm,
modDataObjMeta_t* modDataObjMetaInp );
int
rsModDataObjMeta( rsComm_t *rsComm, modDataObjMeta_t *modDataObjMetaInp ) {
int status;
rodsServerHost_t *rodsServerHost = NULL;
dataObjInfo_t *dataObjInfo;
dataObjInfo = modDataObjMetaInp->dataObjInfo;
status = getAndConnRcatHost(
rsComm,
MASTER_RCAT,
( const char* )dataObjInfo->objPath,
&rodsServerHost );
if ( status < 0 || NULL == rodsServerHost ) { // JMC cppcheck - nullptr
return status;
}
if ( rodsServerHost->localFlag == LOCAL_HOST ) {
std::string svc_role;
irods::error ret = get_catalog_service_role(svc_role);
if(!ret.ok()) {
irods::log(PASS(ret));
return ret.code();
}
if( irods::CFG_SERVICE_ROLE_PROVIDER == svc_role ) {
status = _rsModDataObjMeta( rsComm, modDataObjMetaInp );
} else if( irods::CFG_SERVICE_ROLE_CONSUMER == svc_role ) {
status = SYS_NO_RCAT_SERVER_ERR;
} else {
rodsLog(
LOG_ERROR,
"role not supported [%s]",
svc_role.c_str() );
status = SYS_SERVICE_ROLE_NOT_SUPPORTED;
}
}
else {
// Add IN_REPL_KW to prevent replication on the redirected server (the provider)
addKeyVal( modDataObjMetaInp->regParam, IN_REPL_KW, "" );
status = rcModDataObjMeta( rodsServerHost->conn, modDataObjMetaInp );
// Remove the keyword as we will want to replicate on this server (the consumer)
rmKeyVal(modDataObjMetaInp->regParam, IN_REPL_KW);
}
if ( status >= 0 ) {
const auto open_type = getValByKey(modDataObjMetaInp->regParam, OPEN_TYPE_KW);
const auto in_repl = getValByKey(modDataObjMetaInp->regParam, IN_REPL_KW);
if (!in_repl && open_type &&
(OPEN_FOR_WRITE_TYPE == std::atoi(open_type) || CREATE_TYPE == std::atoi(open_type))) {
// TODO check for IN_PDMO...
status = _call_file_modified_for_modification( rsComm, modDataObjMetaInp );
}
}
return status;
}
int
_rsModDataObjMeta( rsComm_t *rsComm, modDataObjMeta_t *modDataObjMetaInp ) {
std::string svc_role;
irods::error ret = get_catalog_service_role(svc_role);
if(!ret.ok()) {
irods::log(PASS(ret));
return ret.code();
}
if( irods::CFG_SERVICE_ROLE_PROVIDER == svc_role ) {
int status = 0;
dataObjInfo_t *dataObjInfo;
keyValPair_t *regParam;
int i;
ruleExecInfo_t rei2;
memset( ( char* )&rei2, 0, sizeof( ruleExecInfo_t ) );
rei2.rsComm = rsComm;
if ( rsComm != NULL ) {
rei2.uoic = &rsComm->clientUser;
rei2.uoip = &rsComm->proxyUser;
}
rei2.doi = modDataObjMetaInp->dataObjInfo;
rei2.condInputData = modDataObjMetaInp->regParam;
regParam = modDataObjMetaInp->regParam;
dataObjInfo = modDataObjMetaInp->dataObjInfo;
if ( regParam->len == 0 ) {
return 0;
}
/* In dataObjInfo, need just dataId. But it will accept objPath too,
* but less efficient
*/
/** June 1 2009 for pre-post processing rule hooks **/
rei2.doi = dataObjInfo;
i = applyRule( "acPreProcForModifyDataObjMeta", NULL, &rei2, NO_SAVE_REI );
if ( i < 0 ) {
if ( rei2.status < 0 ) {
i = rei2.status;
}
rodsLog( LOG_ERROR, "_rsModDataObjMeta:acPreProcForModifyDataObjMeta error stat=%d", i );
return i;
}
/** June 1 2009 for pre-post processing rule hooks **/
if ( getValByKey( regParam, ALL_KW ) != NULL ) {
/* all copies */
dataObjInfo_t *dataObjInfoHead = NULL;
dataObjInfo_t *tmpDataObjInfo;
dataObjInp_t dataObjInp;
std::memset(&dataObjInp, 0, sizeof(dataObjInp));
rstrcpy( dataObjInp.objPath, dataObjInfo->objPath, MAX_NAME_LEN );
status = getDataObjInfoIncSpecColl( rsComm, &dataObjInp, &dataObjInfoHead );
if ( status < 0 ) {
rodsLog( LOG_NOTICE, "%s - Failed to get data objects, status = %d", __FUNCTION__, status );
return status;
}
tmpDataObjInfo = dataObjInfoHead;
while ( tmpDataObjInfo != NULL ) {
if ( tmpDataObjInfo->specColl != NULL ) {
break;
}
status = chlModDataObjMeta( rsComm, tmpDataObjInfo, regParam );
if ( status < 0 ) {
rodsLog( LOG_ERROR,
"_rsModDataObjMeta:chlModDataObjMeta %s error stat=%d",
tmpDataObjInfo->objPath, status );
}
tmpDataObjInfo = tmpDataObjInfo->next;
}
freeAllDataObjInfo( dataObjInfoHead );
}
else {
status = chlModDataObjMeta( rsComm, dataObjInfo, regParam );
if ( status < 0 ) {
char* sys_error = NULL;
const char* rods_error = rodsErrorName( status, &sys_error );
std::stringstream msg;
msg << __FUNCTION__;
msg << " - Failed to modify the database for object \"";
msg << dataObjInfo->objPath;
msg << "\" - " << rods_error << " " << sys_error;
irods::error ret = ERROR( status, msg.str() );
irods::log( ret );
free( sys_error );
}
}
/** June 1 2009 for pre-post processing rule hooks **/
if ( status >= 0 ) {
i = applyRule( "acPostProcForModifyDataObjMeta", NULL, &rei2, NO_SAVE_REI );
if ( i < 0 ) {
if ( rei2.status < 0 ) {
i = rei2.status;
}
rodsLog( LOG_ERROR,
"_rsModDataObjMeta:acPostProcForModifyDataObjMeta error stat=%d", i );
return i;
}
}
else {
rodsLog( LOG_NOTICE, "%s - Failed updating the database with object info.", __FUNCTION__ );
return status;
}
/** June 1 2009 for pre-post processing rule hooks **/
return status;
} else if( irods::CFG_SERVICE_ROLE_CONSUMER == svc_role ) {
return SYS_NO_RCAT_SERVER_ERR;
} else {
rodsLog(
LOG_ERROR,
"role not supported [%s]",
svc_role.c_str() );
return SYS_SERVICE_ROLE_NOT_SUPPORTED;
}
}
int _call_file_modified_for_modification(
rsComm_t* rsComm,
modDataObjMeta_t* modDataObjMetaInp ) {
int status = 0;
dataObjInfo_t *dataObjInfo;
keyValPair_t *regParam;
ruleExecInfo_t rei2;
memset( ( char* )&rei2, 0, sizeof( ruleExecInfo_t ) );
rei2.rsComm = rsComm;
rei2.uoic = &rsComm->clientUser;
rei2.uoip = &rsComm->proxyUser;
rei2.doi = modDataObjMetaInp->dataObjInfo;
rei2.condInputData = modDataObjMetaInp->regParam;
regParam = modDataObjMetaInp->regParam;
dataObjInfo = modDataObjMetaInp->dataObjInfo;
if ( regParam->len == 0 ) {
return 0;
}
// The replica state table entry needs to be removed before triggering fileModified
if (rst::contains(dataObjInfo->dataId)) {
rst::erase(dataObjInfo->dataId);
}
if ( getValByKey( regParam, ALL_KW ) != NULL ) {
/* all copies */
dataObjInfo_t *dataObjInfoHead = NULL;
dataObjInfo_t *tmpDataObjInfo;
dataObjInp_t dataObjInp;
std::memset(&dataObjInp, 0, sizeof(dataObjInp));
rstrcpy( dataObjInp.objPath, dataObjInfo->objPath, MAX_NAME_LEN );
status = getDataObjInfoIncSpecColl( rsComm, &dataObjInp, &dataObjInfoHead );
if ( status < 0 ) {
rodsLog( LOG_NOTICE, "%s - Failed to get data objects.", __FUNCTION__ );
return status;
}
tmpDataObjInfo = dataObjInfoHead;
while ( tmpDataObjInfo != NULL ) {
if ( tmpDataObjInfo->specColl != NULL ) {
break;
}
irods::file_object_ptr file_obj(
new irods::file_object(
rsComm,
tmpDataObjInfo ) );
char* admin_kw = getValByKey( regParam, ADMIN_KW );
if ( admin_kw != NULL ) {
addKeyVal( (keyValPair_t*)&file_obj->cond_input(), ADMIN_KW, "" );
}
char* pdmo_kw = getValByKey( regParam, IN_PDMO_KW );
if ( pdmo_kw != NULL ) {
file_obj->in_pdmo( pdmo_kw );
}
const auto open_type{getValByKey(regParam, OPEN_TYPE_KW)};
if (open_type) {
addKeyVal((keyValPair_t*)&file_obj->cond_input(), OPEN_TYPE_KW, open_type);
}
irods::error ret = fileModified( rsComm, file_obj );
if ( !ret.ok() ) {
std::stringstream msg;
msg << __FUNCTION__;
msg << " - Failed to signal resource that the data object \"";
msg << tmpDataObjInfo->objPath;
msg << " was modified.";
ret = PASSMSG( msg.str(), ret );
irods::log( ret );
status = ret.code();
}
tmpDataObjInfo = tmpDataObjInfo->next;
}
freeAllDataObjInfo( dataObjInfoHead );
}
else {
// Construct file_obj twice because ctor gives some info that factory does not
irods::file_object_ptr file_obj(new irods::file_object(rsComm, dataObjInfo));
// Need to pass along admin keyword here to ensure replicas can be managed
dataObjInp_t dataObjInp{};
rstrcpy(dataObjInp.objPath, dataObjInfo->objPath, MAX_NAME_LEN);
if (getValByKey(regParam, ADMIN_KW)) {
addKeyVal(&dataObjInp.condInput, ADMIN_KW, "");
}
// Use temporary as file_object_factory overwrites dataObjInfo pointer
auto ret{file_object_factory(rsComm, &dataObjInp, file_obj)};
if (!ret.ok()) {
irods::log(ret);
return ret.code();
}
// Factory overwrites rescHier with the resource which holds replica 0 - put it back
file_obj->resc_hier(dataObjInfo->rescHier);
if (getValByKey(regParam, ADMIN_KW)) {
addKeyVal((keyValPair_t*)&file_obj->cond_input(), ADMIN_KW, "");
}
if (const auto pdmo_kw = getValByKey(regParam, IN_PDMO_KW); pdmo_kw) {
// TODO: log in_pdmo kw
file_obj->in_pdmo(pdmo_kw);
}
if (const auto open_type = getValByKey(regParam, OPEN_TYPE_KW); open_type) {
addKeyVal((keyValPair_t*)&file_obj->cond_input(), OPEN_TYPE_KW, open_type);
}
if (const char* sync = getValByKey(regParam, SYNC_OBJ_KW); sync) {
addKeyVal((keyValPair_t*)&file_obj->cond_input(), SYNC_OBJ_KW, sync);
}
if (const auto repl_status = getValByKey(regParam, REPL_STATUS_KW); repl_status) {
addKeyVal((keyValPair_t*)&file_obj->cond_input(), REPL_STATUS_KW, repl_status);
}
if (const char* selected_hier = getValByKey(regParam, SELECTED_HIERARCHY_KW); selected_hier) {
addKeyVal((keyValPair_t*)&file_obj->cond_input(), SELECTED_HIERARCHY_KW, selected_hier);
}
ret = fileModified(rsComm, file_obj);
if (!ret.ok()) {
irods::log(PASSMSG((boost::format(
"[%s] - Failed to signal the resource that the data object \"%s\"") %
__FUNCTION__ % dataObjInfo->objPath).str(), ret));
status = ret.code();
}
}
return status;
}
| 6,229 |
972 | package com.hannesdorfmann.fragmentargs.processor.test;
@com.hannesdorfmann.fragmentargs.annotation.FragmentWithArgs
public class InnerClassWithProtectedField extends android.app.Fragment {
@com.hannesdorfmann.fragmentargs.annotation.Arg
protected String arg;
@com.hannesdorfmann.fragmentargs.annotation.FragmentWithArgs
public static class InnerClass extends android.app.Fragment {
@com.hannesdorfmann.fragmentargs.annotation.Arg
protected String arg;
}
} | 181 |
1,723 | <gh_stars>1000+
#
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
import sys
import time
import random
import logging
import json
from framework.case_base import *
from framework.test_api import *
from harness.harness_api import *
class CTestCase(CTestCaseBase):
def __init__(self, suite):
CTestCaseBase.__init__(self, suite)
def get_case_name(self):
case_path = os.path.dirname(os.path.abspath( __file__ ))
return os.path.split(case_path)[1]
def on_get_case_description(self):
return "startup the executables"
def on_setup_case(self):
os.chdir(self.get_case_name())
start_env()
api_log_error("on_setup_case OK")
return True, ''
def on_cleanup_case(self):
stop_env()
api_log_error("on_cleanup_case OK")
return True, ''
# called by the framework
def on_run_case(self):
time.sleep(0.5)
#install App1
ret = install_app("App1", "06_timer.wasm")
if (ret != 65):
return False, ''
#query Apps
ret = query_app()
if (ret != 69):
return False, ''
ret = check_query_apps(["App1"])
if (ret == False):
return False, ''
#send request to App1
ret = send_request("/res1", "GET", None)
if (ret != 69):
return False, ''
time.sleep(3)
ret = send_request("/check_timer", "GET", None)
if (ret != 69):
return False, ''
expect_response_payload = {"num":2}
ret = check_response_payload(expect_response_payload)
if (ret == False):
return False, ''
return True, ''
| 807 |
351 | # Copyright: (c) 2018, <NAME> (@jborean93) <<EMAIL>>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
import struct
class GssChannelBindingsStruct(object):
INITIATOR_ADDTYPE = 'initiator_addtype'
INITIATOR_ADDRESS_LENGTH = 'initiator_address_length'
ACCEPTOR_ADDRTYPE = 'acceptor_addrtype'
ACCEPTOR_ADDRESS_LENGTH = 'acceptor_address_length'
APPLICATION_DATA_LENGTH = 'application_data_length'
INITIATOR_ADDRESS = 'initiator_address'
ACCEPTOR_ADDRESS = 'acceptor_address'
APPLICATION_DATA = 'application_data'
def __init__(self):
"""
Used to send the out of band channel info as part of the authentication
process. This is used as a way of verifying the target is who it says
it is as this information is provided by the higher layer. In most
cases, the CBT is just the hash of the server's TLS certificate to the
application_data field.
This bytes string of the packed structure is then MD5 hashed and
included in the NTv2 response.
"""
self.fields = {
self.INITIATOR_ADDTYPE: 0,
self.INITIATOR_ADDRESS_LENGTH: 0,
self.ACCEPTOR_ADDRTYPE: 0,
self.ACCEPTOR_ADDRESS_LENGTH: 0,
self.APPLICATION_DATA_LENGTH: 0,
self.INITIATOR_ADDRESS: b"",
self.ACCEPTOR_ADDRESS: b"",
self.APPLICATION_DATA: b""
}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, key):
return self.fields[key]
def get_data(self):
# Set the lengths of each len field in case they have changed
self[self.INITIATOR_ADDRESS_LENGTH] = len(self[self.INITIATOR_ADDRESS])
self[self.ACCEPTOR_ADDRESS_LENGTH] = len(self[self.ACCEPTOR_ADDRESS])
self[self.APPLICATION_DATA_LENGTH] = len(self[self.APPLICATION_DATA])
# Add all the values together to create the gss_channel_bindings_struct
data = struct.pack("<L", self[self.INITIATOR_ADDTYPE])
data += struct.pack("<L", self[self.INITIATOR_ADDRESS_LENGTH])
data += self[self.INITIATOR_ADDRESS]
data += struct.pack("<L", self[self.ACCEPTOR_ADDRTYPE])
data += struct.pack("<L", self[self.ACCEPTOR_ADDRESS_LENGTH])
data += self[self.ACCEPTOR_ADDRESS]
data += struct.pack("<L", self[self.APPLICATION_DATA_LENGTH])
data += self[self.APPLICATION_DATA]
return data
| 1,072 |
435 | <filename>writethedocs-na-2018/videos/making-your-code-examples-shine-larry-ullman-write-the-docs-portland-2018.json
{
"abstract": "Thorough and optimal technical documentation often requires not just an\nAPI reference of endpoints and parameters, or long contextual\ninstructions, but also code examples that users can cut and paste into\ntheir applications. As a technical writer at Stripe, I\u2019m partially\nresponsible for maintaining 150 code examples, each of which needs to be\nin 8 programming languages. In this presentation, I\u2019ll explain the\nprocesses we\u2019re using, the hurdles we\u2019re facing, and the solutions we\u2019ve\ndevised thus far to make and maintain code examples that really shine.\n\nSpecific concepts covered include:\n\n- Defining a style guide for your code\n- Generating code examples programmatically\n- Testing examples to ensure they work\n- Using common snippets to save time\n- Enhancing examples for a better user experience\n\nAlthough we haven\u2019t solved all our problems in this area, we\u2019ve made\ngreat progress with great results, and I look forward to sharing our\nbest policies and practices.\n",
"copyright_text": null,
"description": "",
"duration": 1674,
"language": "eng",
"recorded": "2018-05-08",
"related_urls": [
{
"label": "Conference schedule",
"url": "http://www.writethedocs.org/conf/portland/2018/schedule/"
}
],
"speakers": [
"<NAME>"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/td15D2BLa4c/maxresdefault.jpg",
"title": "Making Your Code Examples Shine",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=td15D2BLa4c"
}
]
}
| 552 |
648 | <filename>spec/hl7.fhir.core/1.0.2/package/SearchParameter-Composition-context.json<gh_stars>100-1000
{"resourceType":"SearchParameter","id":"Composition-context","url":"http://hl7.org/fhir/SearchParameter/Composition-context","name":"context","publisher":"Health Level Seven International (Structured Documents)","contact":[{"telecom":[{"system":"other","value":"http://hl7.org/fhir"}]},{"telecom":[{"system":"other","value":"http://www.hl7.org/Special/committees/structure/index.cfm"}]}],"date":"2015-10-24T07:41:03+11:00","code":"context","base":"Composition","type":"token","description":"Code(s) that apply to the event being documented","xpath":"f:Composition/f:event/f:code","xpathUsage":"normal"} | 216 |
369 | <reponame>Shiva-D/rtos-course
/* ----------------------------------------------------------------------------
* SAM Software Package License
* ----------------------------------------------------------------------------
* Copyright (c) 2012, Atmel Corporation
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following condition is met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* Atmel's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
#ifndef _SAMA5_PMC_INSTANCE_
#define _SAMA5_PMC_INSTANCE_
/* ========== Register definition for PMC peripheral ========== */
#if (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__))
#define REG_PMC_SCER (0xFFFFFC00U) /**< \brief (PMC) System Clock Enable Register */
#define REG_PMC_SCDR (0xFFFFFC04U) /**< \brief (PMC) System Clock Disable Register */
#define REG_PMC_SCSR (0xFFFFFC08U) /**< \brief (PMC) System Clock Status Register */
#define REG_PMC_PCER0 (0xFFFFFC10U) /**< \brief (PMC) Peripheral Clock Enable Register 0 */
#define REG_PMC_PCDR0 (0xFFFFFC14U) /**< \brief (PMC) Peripheral Clock Disable Register 0 */
#define REG_PMC_PCSR0 (0xFFFFFC18U) /**< \brief (PMC) Peripheral Clock Status Register 0 */
#define REG_CKGR_UCKR (0xFFFFFC1CU) /**< \brief (PMC) UTMI Clock Register */
#define REG_CKGR_MOR (0xFFFFFC20U) /**< \brief (PMC) Main Oscillator Register */
#define REG_CKGR_MCFR (0xFFFFFC24U) /**< \brief (PMC) Main Clock Frequency Register */
#define REG_CKGR_PLLAR (0xFFFFFC28U) /**< \brief (PMC) PLLA Register */
#define REG_PMC_MCKR (0xFFFFFC30U) /**< \brief (PMC) Master Clock Register */
#define REG_PMC_USB (0xFFFFFC38U) /**< \brief (PMC) USB Clock Register */
#define REG_PMC_SMD (0xFFFFFC3CU) /**< \brief (PMC) Soft Modem Clock Register */
#define REG_PMC_PCK (0xFFFFFC40U) /**< \brief (PMC) Programmable Clock 0 Register */
#define REG_PMC_IER (0xFFFFFC60U) /**< \brief (PMC) Interrupt Enable Register */
#define REG_PMC_IDR (0xFFFFFC64U) /**< \brief (PMC) Interrupt Disable Register */
#define REG_PMC_SR (0xFFFFFC68U) /**< \brief (PMC) Status Register */
#define REG_PMC_IMR (0xFFFFFC6CU) /**< \brief (PMC) Interrupt Mask Register */
#define REG_PMC_PLLICPR (0xFFFFFC80U) /**< \brief (PMC) PLL Charge Pump Current Register */
#define REG_PMC_WPMR (0xFFFFFCE4U) /**< \brief (PMC) Write Protect Mode Register */
#define REG_PMC_WPSR (0xFFFFFCE8U) /**< \brief (PMC) Write Protect Status Register */
#define REG_PMC_PCER1 (0xFFFFFD00U) /**< \brief (PMC) Peripheral Clock Enable Register 1 */
#define REG_PMC_PCDR1 (0xFFFFFD04U) /**< \brief (PMC) Peripheral Clock Disable Register 1 */
#define REG_PMC_PCSR1 (0xFFFFFD08U) /**< \brief (PMC) Peripheral Clock Status Register 1 */
#define REG_PMC_PCR (0xFFFFFD0CU) /**< \brief (PMC) Peripheral Control Register */
#else
#define REG_PMC_SCER (*(WoReg*)0xFFFFFC00U) /**< \brief (PMC) System Clock Enable Register */
#define REG_PMC_SCDR (*(WoReg*)0xFFFFFC04U) /**< \brief (PMC) System Clock Disable Register */
#define REG_PMC_SCSR (*(RoReg*)0xFFFFFC08U) /**< \brief (PMC) System Clock Status Register */
#define REG_PMC_PCER0 (*(WoReg*)0xFFFFFC10U) /**< \brief (PMC) Peripheral Clock Enable Register 0 */
#define REG_PMC_PCDR0 (*(WoReg*)0xFFFFFC14U) /**< \brief (PMC) Peripheral Clock Disable Register 0 */
#define REG_PMC_PCSR0 (*(RoReg*)0xFFFFFC18U) /**< \brief (PMC) Peripheral Clock Status Register 0 */
#define REG_CKGR_UCKR (*(RwReg*)0xFFFFFC1CU) /**< \brief (PMC) UTMI Clock Register */
#define REG_CKGR_MOR (*(RwReg*)0xFFFFFC20U) /**< \brief (PMC) Main Oscillator Register */
#define REG_CKGR_MCFR (*(RoReg*)0xFFFFFC24U) /**< \brief (PMC) Main Clock Frequency Register */
#define REG_CKGR_PLLAR (*(RwReg*)0xFFFFFC28U) /**< \brief (PMC) PLLA Register */
#define REG_PMC_MCKR (*(RwReg*)0xFFFFFC30U) /**< \brief (PMC) Master Clock Register */
#define REG_PMC_USB (*(RwReg*)0xFFFFFC38U) /**< \brief (PMC) USB Clock Register */
#define REG_PMC_SMD (*(RwReg*)0xFFFFFC3CU) /**< \brief (PMC) Soft Modem Clock Register */
#define REG_PMC_PCK (*(RwReg*)0xFFFFFC40U) /**< \brief (PMC) Programmable Clock 0 Register */
#define REG_PMC_IER (*(WoReg*)0xFFFFFC60U) /**< \brief (PMC) Interrupt Enable Register */
#define REG_PMC_IDR (*(WoReg*)0xFFFFFC64U) /**< \brief (PMC) Interrupt Disable Register */
#define REG_PMC_SR (*(RoReg*)0xFFFFFC68U) /**< \brief (PMC) Status Register */
#define REG_PMC_IMR (*(RoReg*)0xFFFFFC6CU) /**< \brief (PMC) Interrupt Mask Register */
#define REG_PMC_PLLICPR (*(WoReg*)0xFFFFFC80U) /**< \brief (PMC) PLL Charge Pump Current Register */
#define REG_PMC_WPMR (*(RwReg*)0xFFFFFCE4U) /**< \brief (PMC) Write Protect Mode Register */
#define REG_PMC_WPSR (*(RoReg*)0xFFFFFCE8U) /**< \brief (PMC) Write Protect Status Register */
#define REG_PMC_PCER1 (*(WoReg*)0xFFFFFD00U) /**< \brief (PMC) Peripheral Clock Enable Register 1 */
#define REG_PMC_PCDR1 (*(WoReg*)0xFFFFFD04U) /**< \brief (PMC) Peripheral Clock Disable Register 1 */
#define REG_PMC_PCSR1 (*(RoReg*)0xFFFFFD08U) /**< \brief (PMC) Peripheral Clock Status Register 1 */
#define REG_PMC_PCR (*(RwReg*)0xFFFFFD0CU) /**< \brief (PMC) Peripheral Control Register */
#endif /* (defined(__ASSEMBLY__) || defined(__IAR_SYSTEMS_ASM__)) */
#endif /* _SAMA5_PMC_INSTANCE_ */
| 2,665 |
775 | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import torch
import numpy as np
from mmcv.utils.registry import build_from_cfg
from mmcls.datasets.builder import DATASETS, PIPELINES
from mmcls.datasets.pipelines import Compose
from mmcls.datasets.base_dataset import BaseDataset
from mpa.utils.logger import get_logger
logger = get_logger()
@DATASETS.register_module()
class MPAClsDataset(BaseDataset):
def __init__(self, old_new_indices=None, ote_dataset=None, labels=None, **kwargs):
self.ote_dataset = ote_dataset
self.labels = labels
self.CLASSES = list(label.name for label in labels)
self.gt_labels = []
pipeline = kwargs['pipeline']
self.img_indices = dict(old=[], new=[])
self.num_classes = len(self.CLASSES)
if old_new_indices is not None:
self.img_indices['old'] = old_new_indices['old']
self.img_indices['new'] = old_new_indices['new']
if isinstance(pipeline, dict):
self.pipeline = {}
for k, v in pipeline.items():
_pipeline = [dict(type='LoadImageFromOTEDataset'), *v]
_pipeline = [build_from_cfg(p, PIPELINES) for p in _pipeline]
self.pipeline[k] = Compose(_pipeline)
self.num_pipes = len(pipeline)
elif isinstance(pipeline, list):
self.num_pipes = 1
_pipeline = [dict(type='LoadImageFromOTEDataset'), *pipeline]
self.pipeline = Compose([build_from_cfg(p, PIPELINES) for p in _pipeline])
self.load_annotations()
def load_annotations(self):
for dataset_item in self.ote_dataset:
if dataset_item.get_annotations() == []:
label = None
else:
label = int(dataset_item.get_annotations()[0].get_labels()[0].id_)
self.gt_labels.append(label)
self.gt_labels = np.array(self.gt_labels)
def __getitem__(self, index):
dataset_item = self.ote_dataset[index]
if self.pipeline is None:
return dataset_item
results = {}
results['index'] = index
results['dataset_item'] = dataset_item
results['height'], results['width'], _ = dataset_item.numpy.shape
results['gt_label'] = None if self.gt_labels[index] is None else torch.tensor(self.gt_labels[index])
results = self.pipeline(results)
return results
def get_gt_labels(self):
"""Get all ground-truth labels (categories).
Returns:
list[int]: categories for all images.
"""
return self.gt_labels
def __len__(self):
return len(self.ote_dataset)
def evaluate(self,
results,
metric='accuracy',
metric_options=None,
logger=None):
"""Evaluate the dataset with new metric 'class_accuracy'
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `accuracy`.
'accuracy', 'precision', 'recall', 'f1_score', 'support', 'class_accuracy'
metric_options (dict, optional): Options for calculating metrics.
Allowed keys are 'topk', 'thrs' and 'average_mode'.
Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if metric_options is None:
metric_options = {'topk': (1, 5) if self.num_classes >= 5 else (1, )}
if isinstance(metric, str):
metrics = [metric]
else:
metrics = metric
if 'class_accuracy' in metrics:
metrics.remove('class_accuracy')
self.class_acc = True
eval_results = super().evaluate(results, metrics, metric_options, logger)
# Add Evaluation Accuracy score per Class
if self.class_acc:
results = np.vstack(results)
gt_labels = self.get_gt_labels()
accuracies = self.class_accuracy(results, gt_labels)
eval_results.update({f'{c} accuracy': a for c, a in zip(self.CLASSES, accuracies)})
eval_results.update({'mean accuracy': np.mean(accuracies)})
return eval_results
def class_accuracy(self, results, gt_labels):
accracies = []
pred_label = results.argsort(axis=1)[:, -1:][:, ::-1]
for i in range(self.num_classes):
cls_pred = pred_label == i
cls_pred = cls_pred[gt_labels == i]
cls_acc = np.sum(cls_pred) / len(cls_pred)
accracies.append(cls_acc)
return accracies
| 2,269 |
647 | <filename>include/trick/compat/sim_services/Integrator/include/IntegJobClassId.hh
#include "trick/IntegJobClassId.hh"
| 41 |
348 | {"nom":"Courrières","circ":"11ème circonscription","dpt":"Pas-de-Calais","inscrits":7881,"abs":4311,"votants":3570,"blancs":139,"nuls":104,"exp":3327,"res":[{"nuance":"FN","nom":"Mme <NAME>","voix":1774},{"nuance":"REM","nom":"Mme <NAME>","voix":1553}]} | 106 |
1,062 | /**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <map>
#include <string>
#include <sstream>
#include <iomanip>
#include <stdexcept>
#include "metadata/metadata_api.h"
#include "util/util_api.h"
namespace MR4C {
class PrimitiveImpl {
friend class Primitive;
private:
std::map<std::string,Primitive::Type> m_stringToEnum;
std::map<Primitive::Type,std::string> m_enumToString;
static PrimitiveImpl& instance() {
static PrimitiveImpl s_instance;
return s_instance;
}
PrimitiveImpl() {
mapType(Primitive::BOOLEAN, "BOOLEAN");
mapType(Primitive::BYTE, "BYTE");
mapType(Primitive::INTEGER, "INTEGER");
mapType(Primitive::FLOAT, "FLOAT");
mapType(Primitive::DOUBLE, "DOUBLE");
mapType(Primitive::STRING, "STRING");
mapType(Primitive::SIZE_T, "SIZE_T");
mapType(Primitive::LONG_DOUBLE, "LONG_DOUBLE");
}
void mapType(Primitive::Type type, const std::string& strType) {
m_stringToEnum[strType] = type;
m_enumToString[type] = strType;
}
Primitive::Type enumFromString(std::string strType) {
if ( m_stringToEnum.count(strType)==0 ) {
MR4C_THROW(std::invalid_argument, "No primitive type named [" << strType << "]");
}
return instance().m_stringToEnum[strType];
}
std::string enumToString(Primitive::Type type) {
if ( m_enumToString.count(type)==0 ) {
MR4C_THROW(std::invalid_argument, "No primitive type enum = " << type);
}
return instance().m_enumToString[type];
}
template<typename T> std::string toString(const T& val) {
std::ostringstream ss;
ss << std::boolalpha << std::setprecision(16) << val;
return ss.str();
}
template<typename T> T fromString(const std::string& str) {
std::istringstream ss(str);
T val;
ss >> std::boolalpha >> std::setprecision(16) >> val;
return val;
}
template<typename T> void toString(const T* vals, std::string* strs, size_t size) {
for ( size_t i=0; i<size; i++ ) {
strs[i] = toString<T>(vals[i]);
}
}
template<typename T> void fromString(const std::string* strs, T* vals, size_t size ) {
for ( size_t i=0; i<size; i++ ) {
vals[i] = fromString<T>(strs[i]);
}
}
};
// specializations for char representing a byte
template<> std::string PrimitiveImpl::toString(const char& val) {
return PrimitiveImpl::toString<int>((int)val);
}
template<> char PrimitiveImpl::fromString(const std::string& str) {
return (char) PrimitiveImpl::fromString<int>(str);
}
Primitive::Type Primitive::enumFromString(std::string strType) {
return PrimitiveImpl::instance().enumFromString(strType);
}
std::string Primitive::enumToString(Primitive::Type type) {
return PrimitiveImpl::instance().enumToString(type);
}
template<typename T> std::string Primitive::toString(const T& val) {
return PrimitiveImpl::instance().toString<T>(val);
}
template<typename T> T Primitive::fromString(const std::string& str) {
return PrimitiveImpl::instance().fromString<T>(str);
}
template<typename T> void Primitive::toString(const T* vals, std::string* strs, size_t size) {
PrimitiveImpl::instance().toString<T>(vals,strs,size);
}
template<typename T> void Primitive::fromString(const std::string* strs, T* vals, size_t size ) {
PrimitiveImpl::instance().fromString<T>(strs,vals,size);
}
// These are all the known instantiations of the template
// They are included here to avoid having to put implementations into header files
template std::string Primitive::toString<bool>(const bool& val);
template bool Primitive::fromString<bool>(const std::string& str);
template void Primitive::toString<bool>(const bool* vals, std::string* strs, size_t size);
template void Primitive::fromString<bool>(const std::string* strs, bool* vals, size_t size );
template std::string Primitive::toString<char>(const char& val);
template char Primitive::fromString<char>(const std::string& str);
template void Primitive::toString<char>(const char* vals, std::string* strs, size_t size);
template void Primitive::fromString<char>(const std::string* strs, char* vals, size_t size );
template std::string Primitive::toString<int>(const int& val);
template int Primitive::fromString<int>(const std::string& str);
template void Primitive::toString<int>(const int* vals, std::string* strs, size_t size);
template void Primitive::fromString<int>(const std::string* strs, int* vals, size_t size );
template std::string Primitive::toString<float>(const float& val);
template float Primitive::fromString<float>(const std::string& str);
template void Primitive::toString<float>(const float* vals, std::string* strs, size_t size);
template void Primitive::fromString<float>(const std::string* strs, float* vals, size_t size );
template std::string Primitive::toString<double>(const double& val);
template double Primitive::fromString<double>(const std::string& str);
template void Primitive::toString<double>(const double* vals, std::string* strs, size_t size);
template void Primitive::fromString<double>(const std::string* strs, double* vals, size_t size );
template std::string Primitive::toString<long double>(const long double& val);
template long double Primitive::fromString<long double>(const std::string& str);
template void Primitive::toString<long double>(const long double* vals, std::string* strs, size_t size);
template void Primitive::fromString<long double>(const std::string* strs, long double* vals, size_t size );
template std::string Primitive::toString<std::string>(const std::string& val);
template std::string Primitive::fromString<std::string>(const std::string& str);
template void Primitive::toString<std::string>(const std::string* vals, std::string* strs, size_t size);
template void Primitive::fromString<std::string>(const std::string* strs, std::string* vals, size_t size );
template std::string Primitive::toString<size_t>(const size_t& val);
template size_t Primitive::fromString<size_t>(const std::string& str);
template void Primitive::toString<size_t>(const size_t* vals, std::string* strs, size_t size);
template void Primitive::fromString<size_t>(const std::string* strs, size_t* vals, size_t size );
}
| 2,311 |
1,529 | <reponame>IgiArdiyanto/PINTO_model_zoo<gh_stars>1000+
from openvino.inference_engine import IENetwork, IECore
import cv2
import numpy as np
import pprint
ie = IECore()
vino_model_path = f'saved_model/openvino/FP16'
vino_model = 'saved_model'
net = ie.read_network(model=f'{vino_model_path}/{vino_model}.xml', weights=f'{vino_model_path}/{vino_model}.bin')
exec_net = ie.load_network(network=net, device_name='CPU', num_requests=2)
input_blob = next(iter(net.input_info))
out_blob = [o for o in net.outputs]
pprint.pprint('input_blob:')
pprint.pprint(input_blob)
pprint.pprint('out_blob:')
pprint.pprint(out_blob)
cap = cv2.VideoCapture('person.png')
ret, frame = cap.read()
frame_h, frame_w = frame.shape[:2]
width = 320
height = 320
im = cv2.resize(frame.copy(), (width, height))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = im.transpose((2, 0, 1))
im = im[np.newaxis, :, :, :]
inputs = {input_blob: im}
exec_net.requests[0].wait(-1)
exec_net.start_async(request_id=0, inputs=inputs)
if exec_net.requests[0].wait(-1) == 0:
res = [
exec_net.requests[0].output_blobs[out_blob[0]].buffer,
exec_net.requests[0].output_blobs[out_blob[1]].buffer,
exec_net.requests[0].output_blobs[out_blob[2]].buffer,
exec_net.requests[0].output_blobs[out_blob[3]].buffer,
exec_net.requests[0].output_blobs[out_blob[4]].buffer,
exec_net.requests[0].output_blobs[out_blob[5]].buffer,
]
pprint.pprint('res:')
pprint.pprint(out_blob[0])
pprint.pprint(res[0].shape)
pprint.pprint(res[0])
pprint.pprint(out_blob[1])
pprint.pprint(res[1].shape)
pprint.pprint(res[1])
pprint.pprint(out_blob[2])
pprint.pprint(res[2].shape)
pprint.pprint(res[2])
pprint.pprint(out_blob[3])
pprint.pprint(res[3].shape)
pprint.pprint(res[3])
pprint.pprint(out_blob[4])
pprint.pprint(res[4].shape)
pprint.pprint(res[4])
pprint.pprint(out_blob[5])
pprint.pprint(res[5].shape)
pprint.pprint(res[5])
person = res[5][0][0]
print('person:', person)
ymin = int(person[0] * frame_h)
xmin = int(person[1] * frame_w)
ymax = int(person[2] * frame_h)
xmax = int(person[3] * frame_w)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 0, 0))
KEYPOINT_EDGES = [
(0, 1),
(0, 2),
(1, 3),
(2, 4),
(0, 5),
(0, 6),
(5, 7),
(7, 9),
(6, 8),
(8, 10),
(5, 6),
(5, 11),
(6, 12),
(11, 12),
(11, 13),
(13, 15),
(12, 14),
(14, 16),
]
print('res1:', res[1].shape)
bone_y = res[1][0][0][0]
bone_x = res[1][0][1][0]
print('bone_x.shape:', bone_x.shape)
print('bone_x:', bone_x)
print('bone_y.shape:', bone_y.shape)
print('bone_y:', bone_y)
for keypoint_x, keypoint_y in zip(bone_x, bone_y):
cv2.circle(
frame,
(int(keypoint_x * frame_w), int(keypoint_y * frame_h)),
2,
(0, 255, 0))
for keypoint_start, keypoint_end in KEYPOINT_EDGES:
cv2.line(
frame,
(int(bone_x[keypoint_start] * frame_w), int(bone_y[keypoint_start] * frame_h)),
(int(bone_x[keypoint_end] * frame_w), int(bone_y[keypoint_end] * frame_h)),
(0, 255, 0),
2)
cv2.namedWindow('centernet')
cv2.imshow('centernet', frame)
cv2.waitKey(0)
cv2.destroyAllWindows() | 1,806 |
14,668 | <gh_stars>1000+
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Constants used by MimeHandlerView.
#ifndef EXTENSIONS_BROWSER_GUEST_VIEW_MIME_HANDLER_VIEW_MIME_HANDLER_VIEW_CONSTANTS_H_
#define EXTENSIONS_BROWSER_GUEST_VIEW_MIME_HANDLER_VIEW_MIME_HANDLER_VIEW_CONSTANTS_H_
namespace mime_handler_view {
// API namespace.
extern const char kAPINamespace[];
// Other.
extern const char kViewId[];
} // namespace mime_handler_view
#endif // EXTENSIONS_BROWSER_GUEST_VIEW_MIME_HANDLER_VIEW_MIME_HANDLER_VIEW_CONSTANTS_H_
| 246 |
14,668 | <reponame>zealoussnow/chromium<filename>media/base/media_content_type.cc
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/base/media_content_type.h"
namespace media {
namespace {
const int kMinimumContentDurationSecs = 5;
} // anonymous namespace
MediaContentType DurationToMediaContentType(base::TimeDelta duration) {
// A zero duration indicates that the duration is unknown. "Persistent" type
// should be used in this case.
return (duration.is_zero() ||
duration > base::Seconds(kMinimumContentDurationSecs))
? MediaContentType::Persistent
: MediaContentType::Transient;
}
} // namespace media
| 245 |
6,036 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/core/optimizer/dropout_recompute.h"
#include "orttraining/core/graph/recompute_graph_utils.h"
namespace onnxruntime {
Node& InsertDropoutRecompute(Graph& graph, Node& node, bool use_original_input) {
NodeArg* input = node.MutableInputDefs()[0];
if (!use_original_input) {
auto& recomputed_input = graph.GetOrCreateNodeArg(graph_utils::RecomputeName(input->Name()),
input->TypeAsProto());
input = &recomputed_input;
}
const auto& output = node.OutputDefs()[0];
auto& recomputed_output = graph.GetOrCreateNodeArg(graph_utils::RecomputeName(output->Name()),
output->TypeAsProto());
Node& recompute_node = graph.AddNode(node.Name() + "_recompute",
"DropoutGrad",
"Recompute of " + node.Name(),
{
input, // X
node.MutableOutputDefs()[1], // mask
node.MutableInputDefs()[1], // ratio
node.MutableInputDefs()[2] // training_mode
},
{&recomputed_output},
{},
kMSDomain);
return recompute_node;
}
} // namespace onnxruntime
| 943 |
468 | <reponame>benparsons/hexbin<gh_stars>100-1000
{
"name": "netlify",
"author": "<NAME>",
"license": "CC0",
"vector": "http://hexb.in/vector/netlify.svg",
"description": "The all-in-one platform for automating modern web projects.",
"raster": "http://hexb.in/hexagons/netlify.png",
"filename": "meta/netlify.json"
} | 131 |
474 | package org.gearvrf.widgetlib.widget.custom;
import android.graphics.Color;
import static org.gearvrf.widgetlib.main.Utility.equal;
import org.gearvrf.widgetlib.log.Log;
import org.gearvrf.widgetlib.adapter.Adapter;
import org.gearvrf.widgetlib.adapter.BaseAdapter;
import org.gearvrf.widgetlib.main.WidgetLib;
import org.gearvrf.widgetlib.widget.GroupWidget;
import org.gearvrf.widgetlib.widget.ListWidget;
import org.gearvrf.widgetlib.widget.Widget;
import org.gearvrf.widgetlib.widget.layout.Layout;
import org.gearvrf.widgetlib.widget.layout.OrientedLayout;
import org.gearvrf.widgetlib.widget.layout.basic.LinearLayout;
import org.gearvrf.GVRContext;
import org.gearvrf.GVRTexture;
import static org.gearvrf.utility.Log.tag;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* MultiPageWidget extension applying {@link OrientedLayout.Orientation#STACK stack layout} to the
* list of pages.
*/
public class MultiPageStack extends MultiPageWidget {
/**
* Create the instance of MultiPageStack
* @param context
* @param pageWidth width of one page
* @param pageHeight height of one page
* @param pageCount number of pages
* @param maxVisiblePageCount max number of pages visible on the screen at the same time
* @param adapter data set adapter
*/
public MultiPageStack(final GVRContext context, final float pageWidth, final float pageHeight,
final int pageCount, final int maxVisiblePageCount,
final Adapter adapter) {
super(context,
new PageAdapter(context, pageCount, pageWidth, pageHeight),
0, 0, maxVisiblePageCount);
if (adapter != null) {
setAdapter(adapter);
}
// stack layout
mStackLayout = new LinearLayout();
mStackLayout.setOrientation(OrientedLayout.Orientation.STACK);
mStackLayout.setDividerPadding(DEFAULT_PAGE_PADDING_Z, Layout.Axis.Z);
mStackLayout.setGravity(LinearLayout.Gravity.FRONT);
mStackLayout.enableUniformSize(true);
mStackLayout.enableClipping(true);
applyListLayout(mStackLayout);
// vertical shift layout
mShiftLayout = new LinearLayout();
mShiftLayout.setOrientation(OrientedLayout.Orientation.VERTICAL);
mShiftLayout.setDividerPadding(DEFAULT_PAGE_PADDING_X, Layout.Axis.X);
mShiftLayout.setDividerPadding(DEFAULT_PAGE_PADDING_Y, Layout.Axis.Y);
mShiftLayout.setGravity(LinearLayout.Gravity.TOP);
mShiftLayout.enableUniformSize(true);
mShiftLayout.enableClipping(true);
applyListLayout(mShiftLayout);
}
/**
* Sets padding between the pages
* @param padding
* @param axis
*/
public void setPadding(float padding, Layout.Axis axis) {
OrientedLayout layout = null;
switch(axis) {
case X:
layout = mShiftLayout;
break;
case Y:
layout = mShiftLayout;
break;
case Z:
layout = mStackLayout;
break;
}
if (layout != null) {
if (!equal(layout.getDividerPadding(axis), padding)) {
layout.setDividerPadding(padding, axis);
if (layout.getOrientationAxis() == axis) {
requestLayout();
}
}
}
}
/**
* Sets page shift orientation. The pages might be shifted horizontally or vertically relative
* to each other to make the content of each page on the screen at least partially visible
* @param orientation
*/
public void setShiftOrientation(OrientedLayout.Orientation orientation) {
if (mShiftLayout.getOrientation() != orientation &&
orientation != OrientedLayout.Orientation.STACK) {
mShiftLayout.setOrientation(orientation);
requestLayout();
}
}
@Override
protected void setItemsPerPage(int itemNum) {
super.setItemsPerPage(itemNum);
if (mItemAdapter != null && mAdapter != null) {
int pageCount = (int) Math.ceil((float) mItemAdapter.getCount() /itemNum);
Log.d(Log.SUBSYSTEM.WIDGET, TAG, "setPageCount = %d", pageCount);
((PageAdapter)mAdapter).setCount(pageCount);
recalculateViewPort(mAdapter);
}
}
private static final String TAG = tag(MultiPageStack.class);
/**
* Private adapter class for list of pages
*/
private static class PageAdapter extends BaseAdapter {
private static final String TAG = tag(PageAdapter.class);
private int mPageCount;
private final GVRContext mGvrContext;
private final float mPageWidth, mPageHeight;
private final Map<Integer, ListWidget> mPages;
private final List<GVRTexture> mPageBgTextures;
private final static int[] mPageRainbowColors = {
Color.RED,
0xFFFFA500, // ORANGE
Color.YELLOW,
Color.GREEN,
Color.CYAN,
Color.BLUE,
Color.MAGENTA,
};
private final static int[] mPageGrayColors = {
Color.LTGRAY,
Color.GRAY,
};
PageAdapter(GVRContext gvrContext, int pageCount, float pageWidth,
float pageHeight) {
mGvrContext = gvrContext;
mPageCount = pageCount;
mPageWidth = pageWidth;
mPageHeight = pageHeight;
mPages = new HashMap<>(pageCount);
mPageBgTextures = new ArrayList<>(mPageGrayColors.length);
for (int color: mPageGrayColors){
mPageBgTextures.add(WidgetLib.getTextureHelper().getSolidColorTexture(color));
}
}
private void setCount(int pageCount) {
Log.d(Log.SUBSYSTEM.WIDGET, TAG, "setCount: pageCount = %d", pageCount);
mPageCount = pageCount;
mPages.clear();
notifyDataSetChanged();
}
@Override
public int getCount() {
return mPageCount;
}
@Override
public Object getItem(int position) {
return null;
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public boolean hasUniformViewSize() {
return true;
}
@Override
public float getUniformWidth() {
return mPageWidth;
}
@Override
public float getUniformHeight() {
return mPageHeight;
}
@Override
public float getUniformDepth() {
return 0.1f;
}
@Override
public Widget getView(final int position, Widget convertView, GroupWidget parent) {
if (position < 0 && position >= mPageCount) {
return null;
}
ListWidget page = mPages.get(position);
if (page == null) {
if (convertView != null && convertView instanceof ListWidget) {
page = ((ListWidget)convertView);
page.clear();
} else {
ListWidget widget = new ListWidget(mGvrContext,
null, mPageWidth, mPageHeight);
widget.setName("Page:" + position);
page = widget;
}
int bgId = position % mPageBgTextures.size();
page.setTexture(mPageBgTextures.get(bgId));
mPages.put(position, page);
}
Log.d(Log.SUBSYSTEM.WIDGET, TAG, "getView[%d] %s", position, page);
return page;
}
}
private final LinearLayout mStackLayout, mShiftLayout;
private static final float DEFAULT_PAGE_PADDING_Z = 5;
private static final float DEFAULT_PAGE_PADDING_Y = -6;
private static final float DEFAULT_PAGE_PADDING_X = -5;
}
| 3,685 |
831 | <filename>profilers/testSrc/com/android/tools/profilers/memory/MemoryUsageTest.java
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.profilers.memory;
import com.android.tools.adtui.model.FakeTimer;
import com.android.tools.adtui.model.Range;
import com.android.tools.adtui.model.RangedContinuousSeries;
import com.android.tools.adtui.model.SeriesData;
import com.android.tools.idea.transport.faketransport.FakeGrpcChannel;
import com.android.tools.idea.transport.faketransport.FakeTransportService;
import com.android.tools.profiler.proto.Memory;
import com.android.tools.profilers.FakeIdeProfilerServices;
import com.android.tools.profilers.ProfilerClient;
import com.android.tools.profilers.ProfilersTestData;
import com.android.tools.profilers.StudioProfilers;
import com.google.common.truth.Truth;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
public class MemoryUsageTest {
// Use an arbitrary stream id because we don't care in the data series.
private static final int STREAM_ID = 1;
private final FakeTimer myTimer = new FakeTimer();
private final FakeTransportService myService = new FakeTransportService(myTimer);
@Rule public FakeGrpcChannel myGrpcChannel = new FakeGrpcChannel("MemoryUsageTEst", myService);
private FakeIdeProfilerServices myIdeProfilerServices;
private StudioProfilers myProfilers;
@Before
public void setup() {
myIdeProfilerServices = new FakeIdeProfilerServices();
myIdeProfilerServices.enableEventsPipeline(true);
myProfilers = new StudioProfilers(new ProfilerClient(myGrpcChannel.getChannel()), myIdeProfilerServices, myTimer);
// insert memory data for new pipeline.
for (int i = 0; i < 10; i++) {
myService.addEventToStream(STREAM_ID,
// Space out the data by 10 seconds to work around the 1 second buffer in UnifiedEventDataSeries.
ProfilersTestData.generateMemoryUsageData(
TimeUnit.SECONDS.toMicros(i * 10),
Memory.MemoryUsageData.newBuilder().setTotalMem(i * 10).build()).build());
}
myProfilers.getTimeline().getDataRange().set(0, TimeUnit.SECONDS.toMicros(100));
}
@Test
public void testNewPipelineGetData() {
MemoryUsage usage = new MemoryUsage(myProfilers);
RangedContinuousSeries rangedSeries = usage.getTotalMemorySeries();
Range range = rangedSeries.getXRange();
// Request full range
range.set(0, TimeUnit.SECONDS.toMicros(100));
List<SeriesData<Long>> series = rangedSeries.getSeries();
Truth.assertThat(series.size()).isEqualTo(10);
for (int i = 0; i < series.size(); i++) {
Truth.assertThat(series.get(i).value).isEqualTo(i * 10);
}
// Request negative to mid range
range.set(TimeUnit.SECONDS.toMicros(-50), TimeUnit.SECONDS.toMicros(45));
series = rangedSeries.getSeries();
Truth.assertThat(series.size()).isEqualTo(6);
for (int i = 0; i < series.size(); i++) {
Truth.assertThat(series.get(i).value).isEqualTo(i * 10);
}
// Request mid to high range
range.set(TimeUnit.SECONDS.toMicros(45), TimeUnit.SECONDS.toMicros(200));
series = rangedSeries.getSeries();
Truth.assertThat(series.size()).isEqualTo(6);
for (int i = 0; i < series.size(); i++) {
Truth.assertThat(series.get(i).value).isEqualTo((i + 4) * 10);
}
}
} | 1,432 |
2,151 | <reponame>zipated/src
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/vr/model/controller_model.h"
namespace vr {
ControllerModel::ControllerModel() = default;
ControllerModel::ControllerModel(const ControllerModel& other)
: transform(other.transform),
laser_direction(other.laser_direction),
laser_origin(other.laser_origin),
touchpad_button_state(other.touchpad_button_state),
app_button_state(other.app_button_state),
home_button_state(other.home_button_state),
opacity(other.opacity),
quiescent(other.quiescent),
resting_in_viewport(other.resting_in_viewport),
handedness(other.handedness) {}
ControllerModel::~ControllerModel() = default;
} // namespace vr
| 296 |
434 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iterative Refinement for linear systems."""
# TODO(lbethune): in the future Chebychev acceleration of Iterative Refinement
# could be implemented:
# <NAME>. and <NAME>., 2014. Chebyshev acceleration of iterative refinement.
# Numerical Algorithms, 66(3), pp.591-608.
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
from typing import Union
from dataclasses import dataclass
from functools import partial
import jax
import jax.numpy as jnp
from jaxopt._src import loop
from jaxopt._src import base
from jaxopt._src import implicit_diff as idf
from jaxopt._src.tree_util import tree_zeros_like, tree_add, tree_sub
from jaxopt._src.tree_util import tree_add_scalar_mul, tree_scalar_mul
from jaxopt._src.tree_util import tree_vdot, tree_negative, tree_l2_norm
from jaxopt._src.linear_operator import _make_linear_operator
import jaxopt._src.linear_solve as linear_solve
class IterativeRefinementState(NamedTuple):
"""Named tuple containing state information.
Attributes:
iter_num: iteration number.
error: error used as stop criterion, deduced from residuals b - Ax.
target_residuals: residuals of the current target.
init: init params for warm start.
"""
iter_num: int
error: float
target_residuals: Any
init: Any
# TODO(lbethune): in the future return the state of the internal
# solver (iter_num, error) as part of the current state.
@dataclass(eq=False)
class IterativeRefinement(base.IterativeSolver):
r"""`Iterativement refinement
<https://en.wikipedia.org/wiki/Iterative_refinement>`_ algorithm.
This is a meta-algorithm for solving the linear system ``Ax = b`` based on
a provided linear system solver. Our implementation is a slight generalization
of the standard algorithm. It starts with :math:`(r_0, x_0) = (b, 0)` and
iterates
.. math::
\begin{aligned}
x &= \text{solution of } \bar{A} x = r_{t-1}\\
x_t &= x_{t-1} + x\\
r_t &= b - A x_t
\end{aligned}
where :math:`\bar{A}` is some approximation of A, with preferably
better preconditonning than A. By default, we use
:math:`\bar{A} = A`, which is the standard iterative refinement algorithm.
This method has the advantage of converging even if the solve step is
inaccurate. This is particularly useful for ill-posed problems.
Attributes:
matvec_A: (optional) a Callable matvec_A(A, x).
By default, matvec_A(A, x) = tree_dot(A, x), where pytree
A matches x structure.
matvec_A_bar: (optional) a Callable.
If None, then :math:`\bar{A}=A`. Otherwise, a Callable matvec_A_bar(x).
solve: a Callable that accepts A as first argument, b as second,
and a warm start ``init`` as third argument.
This solver can be inaccurate and run with low precision.
maxiter: maximum number of iterations (default: 10).
tol: absolute tolerance for stoping criterion (default: 1e-7).
verbose: If verbose=1, print error at each iteration.
implicit_diff: whether to enable implicit diff or autodiff of unrolled
iterations.
implicit_diff_solve: the linear system solver to use.
jit: whether to JIT-compile the optimization loop (default: "auto").
unroll: whether to unroll the optimization loop (default: "auto")
References:
[1] <NAME>. Rounding Errors in Algebraic Processes. Prentice Hall, Englewood Cliffs, NJ, 1963.
[2] <NAME>., 1967. Iterative refinement in floating point. Journal of the ACM (JACM), 14(2), pp.316-321.
[3] https://en.wikipedia.org/wiki/Iterative_refinement.
"""
matvec_A: Optional[Callable] = None
matvec_A_bar: Optional[Callable] = None
solve: Callable = partial(linear_solve.solve_gmres, ridge=1e-6)
maxiter: int = 10
tol: float = 1e-7
verbose: int = 0
implicit_diff_solve: Optional[Callable] = None
jit: base.AutoOrBoolean = "auto"
unroll: base.AutoOrBoolean = "auto"
def init_state(self,
init_params,
A: Any,
b: Any,
A_bar: Any = None):
return IterativeRefinementState(
iter_num=jnp.asarray(0),
error=jnp.asarray(jnp.inf),
target_residuals=b,
init=init_params)
def init_params(self,
A: Any,
b: Any,
A_bar: Any = None):
return tree_zeros_like(b)
def update(self,
params: Any,
state: IterativeRefinementState,
A: Any,
b: Any,
A_bar: Optional[Any] = None):
if self._copy_A:
A_bar = A
matvec_A = self.matvec_A(A)
matvec_A_bar = self.matvec_A_bar(A_bar)
# TODO(lbethune): support preconditioners ?
# Could it be done by user with partial(solver, M=precond) ?
residual_sol = self.solve(matvec_A_bar, state.target_residuals, init=state.init)
params = tree_add(params, residual_sol)
target_residuals = tree_sub(b, matvec_A(params))
error = tree_l2_norm(target_residuals)
state = IterativeRefinementState(
iter_num=state.iter_num+1,
error=error,
target_residuals=target_residuals,
init=self.init_params(A, b, A_bar))
return base.OptStep(params, state)
def run(self,
init_params,
A: Any,
b: Any,
A_bar: Optional[Any] = None):
"""Runs the iterative refinement.
Args:
init_params: init_params for warm start.
A: params for ``self.matvec_A``.
b: vector ``b`` in ``Ax=b``.
A_bar: optional parameters for ``matvec_A_bar``.
Returns:
(params, state), ``params = (primal_var, dual_var_eq, dual_var_ineq)``
"""
if init_params is None:
init_params = self.init_params(A, b, A_bar)
return super().run(init_params, A, b, A_bar)
def optimality_fun(self,
params: Any,
A: Any,
b: Any,
A_bar: Optional[Any] = None):
del A_bar # unused
A = self.matvec_A(A)
return tree_sub(b, A(params))
def l2_optimality_error(self,
params: Any,
A: Any,
b: Any,
A_bar: Optional[Any] = None):
del A_bar # unused
return tree_l2_norm(self.optimality_fun(params, A, b))
def __post_init__(self):
self._copy_A = False
if self.matvec_A_bar is None:
self.matvec_A_bar = self.matvec_A
self._copy_A = True
self.matvec_A = _make_linear_operator(self.matvec_A)
self.matvec_A_bar = _make_linear_operator(self.matvec_A_bar)
def solve_iterative_refinement(matvec: Callable,
b: Any,
init: Optional[Any] = None,
maxiter: int = 10,
tol: float = 1e-7,
solve: Callable = linear_solve.solve_gmres,
**kwargs) -> Any:
"""Solves ``A x = b`` using iterative refinement.
Args:
matvec: product between ``A`` and a vector.
b: pytree.
maxiter: maximum number of refinement steps (default: 10).
tol: absolute tolerance residuals (default: 1e-7).
solve: optional solve function (default: linear_solve.solve_gmres).
kwargs: additional parameters for IterativeRefinement.
Returns:
Pytree with the same structure as ``b``.
"""
iterative_refinement = IterativeRefinement(matvec_A=lambda _,x: matvec(x),
solve=solve,
maxiter=maxiter,
tol=tol,
**kwargs)
return iterative_refinement.run(init, A=None, b=b)[0]
| 3,508 |
6,304 | /*
* Copyright 2018 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrDriverBugWorkarounds_DEFINED
#define GrDriverBugWorkarounds_DEFINED
// External embedders of Skia can override this to use their own list
// of workaround names.
#ifdef SK_GPU_WORKAROUNDS_HEADER
#include SK_GPU_WORKAROUNDS_HEADER
#else
// To regenerate this file, set gn arg "skia_generate_workarounds = true".
// This is not rebuilt by default to avoid embedders having to have extra
// build steps.
#include "include/gpu/GrDriverBugWorkaroundsAutogen.h"
#endif
#include "include/core/SkTypes.h"
#include <stdint.h>
#include <vector>
enum GrDriverBugWorkaroundType {
#define GPU_OP(type, name) type,
GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
#undef GPU_OP
NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES
};
class SK_API GrDriverBugWorkarounds {
public:
GrDriverBugWorkarounds();
GrDriverBugWorkarounds(const GrDriverBugWorkarounds&) = default;
explicit GrDriverBugWorkarounds(const std::vector<int32_t>& workarounds);
GrDriverBugWorkarounds& operator=(const GrDriverBugWorkarounds&) = default;
// Turn on any workarounds listed in |workarounds| (but don't turn any off).
void applyOverrides(const GrDriverBugWorkarounds& workarounds);
~GrDriverBugWorkarounds();
#define GPU_OP(type, name) bool name = false;
GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
#undef GPU_OP
};
#endif
| 490 |
2,035 | <reponame>chakpongchung/tinyflow
"""Wrapping of certain ops for positional arguments.
Mainly because NNVM accepts kwargs for some additional arguments,
while TF sometimes support positional ops.
"""
from __future__ import absolute_import as _abs
from nnvm import symbol
from nnvm import _symbol_internal
def argmax(x, axis):
return _symbol_internal._argmax(x, reduction_indices=[axis])
def zeros(shape):
return symbol.zeros(shape=shape)
def normal(shape, stdev=1.0):
return symbol.normal(shape=shape, stdev=stdev)
| 171 |
348 | {"nom":"Guillac","circ":"4ème circonscription","dpt":"Morbihan","inscrits":997,"abs":450,"votants":547,"blancs":5,"nuls":8,"exp":534,"res":[{"nuance":"REM","nom":"<NAME>","voix":321},{"nuance":"FI","nom":"Mme <NAME>","voix":64},{"nuance":"FN","nom":"Mme <NAME>","voix":57},{"nuance":"LR","nom":"Mme <NAME>","voix":43},{"nuance":"ECO","nom":"Mme <NAME>","voix":19},{"nuance":"DLF","nom":"M. <NAME>","voix":10},{"nuance":"DIV","nom":"Mme <NAME>","voix":9},{"nuance":"EXD","nom":"M. <NAME>","voix":5},{"nuance":"EXG","nom":"M. <NAME>","voix":4},{"nuance":"DVD","nom":"M. <NAME>","voix":2},{"nuance":"ECO","nom":"Mme <NAME>","voix":0}]} | 260 |
348 | {"nom":"Belle-et-Houllefort","circ":"6ème circonscription","dpt":"Pas-de-Calais","inscrits":413,"abs":166,"votants":247,"blancs":7,"nuls":4,"exp":236,"res":[{"nuance":"REM","nom":"M<NAME>","voix":106},{"nuance":"LR","nom":"<NAME>","voix":48},{"nuance":"FN","nom":"M<NAME>","voix":34},{"nuance":"FI","nom":"Mme <NAME>","voix":22},{"nuance":"ECO","nom":"Mme <NAME>","voix":12},{"nuance":"DIV","nom":"Mme <NAME>","voix":6},{"nuance":"DLF","nom":"Mme <NAME>","voix":4},{"nuance":"COM","nom":"Mme <NAME>","voix":3},{"nuance":"EXG","nom":"Mme <NAME>","voix":1}]} | 229 |
575 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/javascript_dialogs/android/app_modal_dialog_view_android.h"
#include "base/android/jni_android.h"
#include "base/android/jni_string.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "components/javascript_dialogs/android/jni_headers/JavascriptAppModalDialog_jni.h"
#include "components/javascript_dialogs/app_modal_dialog_controller.h"
#include "components/javascript_dialogs/app_modal_dialog_manager.h"
#include "components/javascript_dialogs/app_modal_dialog_queue.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/web_contents.h"
#include "content/public/browser/web_contents_delegate.h"
#include "content/public/common/javascript_dialog_type.h"
#include "ui/android/window_android.h"
using base::android::AttachCurrentThread;
using base::android::ConvertUTF16ToJavaString;
using base::android::JavaParamRef;
using base::android::ScopedJavaGlobalRef;
using base::android::ScopedJavaLocalRef;
namespace javascript_dialogs {
AppModalDialogViewAndroid::AppModalDialogViewAndroid(
JNIEnv* env,
AppModalDialogController* controller,
gfx::NativeWindow parent)
: controller_(controller),
parent_jobject_weak_ref_(env, parent->GetJavaObject().obj()) {
controller->web_contents()->GetDelegate()->ActivateContents(
controller->web_contents());
}
void AppModalDialogViewAndroid::ShowAppModalDialog() {
JNIEnv* env = AttachCurrentThread();
// Keep a strong ref to the parent window while we make the call to java to
// display the dialog.
ScopedJavaLocalRef<jobject> parent_jobj = parent_jobject_weak_ref_.get(env);
if (parent_jobj.is_null()) {
CancelAppModalDialog();
return;
}
ScopedJavaLocalRef<jobject> dialog_object;
ScopedJavaLocalRef<jstring> title =
ConvertUTF16ToJavaString(env, controller_->title());
ScopedJavaLocalRef<jstring> message =
ConvertUTF16ToJavaString(env, controller_->message_text());
switch (controller_->javascript_dialog_type()) {
case content::JAVASCRIPT_DIALOG_TYPE_ALERT: {
dialog_object = Java_JavascriptAppModalDialog_createAlertDialog(
env, title, message, controller_->display_suppress_checkbox());
break;
}
case content::JAVASCRIPT_DIALOG_TYPE_CONFIRM: {
if (controller_->is_before_unload_dialog()) {
dialog_object = Java_JavascriptAppModalDialog_createBeforeUnloadDialog(
env, title, message, controller_->is_reload(),
controller_->display_suppress_checkbox());
} else {
dialog_object = Java_JavascriptAppModalDialog_createConfirmDialog(
env, title, message, controller_->display_suppress_checkbox());
}
break;
}
case content::JAVASCRIPT_DIALOG_TYPE_PROMPT: {
ScopedJavaLocalRef<jstring> default_prompt_text =
ConvertUTF16ToJavaString(env, controller_->default_prompt_text());
dialog_object = Java_JavascriptAppModalDialog_createPromptDialog(
env, title, message, controller_->display_suppress_checkbox(),
default_prompt_text);
break;
}
default:
NOTREACHED();
}
// Keep a ref to the java side object until we get a confirm or cancel.
dialog_jobject_.Reset(dialog_object);
Java_JavascriptAppModalDialog_showJavascriptAppModalDialog(
env, dialog_object, parent_jobj, reinterpret_cast<intptr_t>(this));
}
void AppModalDialogViewAndroid::ActivateAppModalDialog() {
// This is called on desktop (Views) when interacting with a browser window
// that does not host the currently active app modal dialog, as a way to
// redirect activation to the app modal dialog host. It's not relevant on
// Android.
NOTREACHED();
}
void AppModalDialogViewAndroid::CloseAppModalDialog() {
CancelAppModalDialog();
}
void AppModalDialogViewAndroid::AcceptAppModalDialog() {
std::u16string prompt_text;
controller_->OnAccept(prompt_text, false);
delete this;
}
void AppModalDialogViewAndroid::DidAcceptAppModalDialog(
JNIEnv* env,
const JavaParamRef<jobject>&,
const JavaParamRef<jstring>& prompt,
bool should_suppress_js_dialogs) {
std::u16string prompt_text =
base::android::ConvertJavaStringToUTF16(env, prompt);
controller_->OnAccept(prompt_text, should_suppress_js_dialogs);
delete this;
}
void AppModalDialogViewAndroid::CancelAppModalDialog() {
controller_->OnCancel(false);
delete this;
}
bool AppModalDialogViewAndroid::IsShowing() const {
return true;
}
void AppModalDialogViewAndroid::DidCancelAppModalDialog(
JNIEnv* env,
const JavaParamRef<jobject>&,
bool should_suppress_js_dialogs) {
controller_->OnCancel(should_suppress_js_dialogs);
delete this;
}
const ScopedJavaGlobalRef<jobject>& AppModalDialogViewAndroid::GetDialogObject()
const {
return dialog_jobject_;
}
AppModalDialogViewAndroid::~AppModalDialogViewAndroid() {
// In case the dialog is still displaying, tell it to close itself.
// This can happen if you trigger a dialog but close the Tab before it's
// shown, and then accept the dialog.
if (!dialog_jobject_.is_null()) {
JNIEnv* env = AttachCurrentThread();
Java_JavascriptAppModalDialog_dismiss(env, dialog_jobject_);
}
}
// static
ScopedJavaLocalRef<jobject> JNI_JavascriptAppModalDialog_GetCurrentModalDialog(
JNIEnv* env) {
AppModalDialogController* controller =
AppModalDialogQueue::GetInstance()->active_dialog();
if (!controller || !controller->view())
return ScopedJavaLocalRef<jobject>();
AppModalDialogViewAndroid* js_dialog =
static_cast<AppModalDialogViewAndroid*>(controller->view());
return ScopedJavaLocalRef<jobject>(js_dialog->GetDialogObject());
}
} // namespace javascript_dialogs
| 2,069 |
659 | <reponame>xiaosimao/ip_pool
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by shimeng on 17-9-19
"""
代理网址及解析字典
status 代理状态, 若不想爬取此网站,可以将status设置为非active的任意值
request_method , 请求方法, 必写, 当为post的时候, 必须定义提交的post_data, 否则会报错.因项目的特殊性, 提交的数据中会带有页码数据, 所以在这里将
post_data 定义为列表, 里面的数据为字典格式
url 代理网址
parse_type 解析类型,默认提供: xpath, re
(1) xpath
ip_port_together ip地址和ip的端口是否在一个字段中
若为地址与端口在一起,则建议key为ip_address_and_port
若为地址与端口不在一起,则建议key为ip_address, ip_port
(2) re
若解析的类型为re, 则ip_port_together可以为任意的值
parse_method中只有一个键: _pattern
parse_func 解析函数, 默认值为system, 当需要使用自定义的解析函数的时候, 需要显式的定义该字段为自定义的解析函数
解析函数要有四个参数, 分别为value, html_content, parse_type, website_name
header 因网址较多, 所以在这里可以自定义头
"""
from custom_get_ip.get_ip_from_peauland import peauland_parser, peauland_format_post_data, peauland_header
# 定义检测的目标网站
target_urls = ['https://www.baidu.com', 'https://httpbin.org/get']
# 数据库集合名
collection_name = 'proxy'
# 数据库中IP存活时间阀值, 超过及对其重新检测
over_time = 1800
url_parse_dict = {
# data5u
'data5u': {
'status':'active',
'request_method':'get',
'url': ['http://www.data5u.com/free/{tag}/index.shtml'.format(tag=tag) for tag in ['gngn', 'gnpt', 'gwgn', 'gwpt']],
'parse_type': 'xpath',
'ip_port_together': False,
'parse_method':{
'ip_address': '//ul[@class="l2"]/span[1]/li/text()',
'ip_port': '//ul[@class="l2"]/span[2]/li/text()',
},
'parse_func': 'system'
},
# xicidaili
'xicidaili': {
'status': 'active',
'request_method': 'get',
'url': ['http://www.xicidaili.com/nn/{page}'.format(page=page) for page in range(1, 10)],
'parse_type': 'xpath',
'ip_port_together': False,
'parse_method': {
'ip_address': '//tr[@class="odd"]/td[2]/text()',
'ip_port': '//tr[@class="odd"]/td[3]/text()',
},
'parse_func': 'system'
},
# 66ip
'66ip': {
'status': 'active',
'request_method': 'get',
'url': ['http://m.66ip.cn/{page}.html'.format(page=page) for page in range(1, 10)],
'parse_type': 're',
'ip_port_together': False,
'parse_method': {
'_pattern': '<tr><td>([\d\.]*?)</td><td>(.*?)</td>',
},
'parse_func': 'system'
},
# 这个是国外的一个网站,如果你的网络无法访问,可以将status改为inactive, 很尴尬, 测了一下,好像都不行,哈哈
'proxylistplus': {
'status': 'active',
'request_method': 'get',
'url': ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-{page}'.format(page=page) for page in range(1, 5)],
'parse_type': 'xpath',
'ip_port_together': False,
'parse_method': {
'ip_address': '//table[@class="bg"]/tr[@class="cells"]/td[2]/text()',
'ip_port': '//table[@class="bg"]/tr[@class="cells"]/td[3]/text()',
},
'parse_func': 'system'
},
# proxydb
# 这个也是国外的一个网站,如果你的网络无法访问,可以将status改为inactive
# 这个网站采用的post方法, 需要将submit_data定义好, 采用自定义解析函数, 自定义的请求头
# 如果你也遇到变态的网站, 按照这个进行配置即可
'proxydb': {
'status': 'active',
'request_method': 'post',
'submit_data':peauland_format_post_data(),
'url': ['https://proxy.peuland.com/proxy/search_proxy.php'],
'parse_func': peauland_parser,
'header': peauland_header()
},
}
| 2,326 |
1,738 | <gh_stars>1000+
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// Original file Copyright Crytek GMBH or its affiliates, used under license.
#ifndef CRYINCLUDE_CRYCOMMON_ITIMER_H
#define CRYINCLUDE_CRYCOMMON_ITIMER_H
#pragma once
#include "TimeValue.h" // CTimeValue
#include "SerializeFwd.h"
struct tm;
// Summary:
// Interface to the Timer System.
struct ITimer
{
enum ETimer
{
ETIMER_GAME = 0, // Pausable, serialized, frametime is smoothed/scaled/clamped.
ETIMER_UI, // Non-pausable, non-serialized, frametime unprocessed.
ETIMER_LAST
};
enum ETimeScaleChannels
{
eTSC_Trackview = 0,
eTSC_GameStart
};
// <interfuscator:shuffle>
virtual ~ITimer() {};
// Summary:
// Resets the timer
// Notes:
// Only needed because float precision wasn't last that long - can be removed if 64bit is used everywhere.
virtual void ResetTimer() = 0;
// Summary:
// Updates the timer every frame, needs to be called by the system.
virtual void UpdateOnFrameStart() = 0;
// Summary:
// Returns the absolute time at the last UpdateOnFrameStart() call.
// Todo:
// Remove, use GetFrameStartTime() instead.
// See also:
// UpdateOnFrameStart(),GetFrameStartTime()
virtual float GetCurrTime(ETimer which = ETIMER_GAME) const = 0;
// Summary:
// Returns the absolute time at the last UpdateOnFrameStart() call.
// See also:
// UpdateOnFrameStart()
//virtual const CTimeValue& GetFrameStartTime(ETimer which = ETIMER_GAME) const = 0;
virtual const CTimeValue& GetFrameStartTime(ETimer which = ETIMER_GAME) const = 0;
// Summary:
// Returns the absolute current time.
// Notes:
// The value continuously changes, slower than GetFrameStartTime().
// See also:
// GetFrameStartTime()
virtual CTimeValue GetAsyncTime() const = 0;
// Summary:
// Returns the absolute current time at the moment of the call.
virtual float GetAsyncCurTime() = 0;
// Summary:
// Returns the relative time passed from the last UpdateOnFrameStart() in seconds.
// See also:
// UpdateOnFrameStart()
virtual float GetFrameTime(ETimer which = ETIMER_GAME) const = 0;
// Description:
// Returns the relative time passed from the last UpdateOnFrameStart() in seconds without any dilation, smoothing, clamping, etc...
// See also:
// UpdateOnFrameStart()
virtual float GetRealFrameTime() const = 0;
// Summary:
// Returns the time scale applied to time values.
virtual float GetTimeScale() const = 0;
// Summary:
// Returns the time scale factor for the given channel
virtual float GetTimeScale(uint32 channel) const = 0;
// Summary:
// Clears all current time scale requests
virtual void ClearTimeScales() = 0;
// Summary:
// Sets the time scale applied to time values.
virtual void SetTimeScale(float s, uint32 channel = 0) = 0;
// Summary:
// Enables/disables timer.
virtual void EnableTimer(bool bEnable) = 0;
// Return Value:
// True if timer is enabled
virtual bool IsTimerEnabled() const = 0;
// Summary:
// Returns the current framerate in frames/second.
virtual float GetFrameRate() = 0;
// Summary:
// Returns the fraction to blend current frame in profiling stats.
virtual float GetProfileFrameBlending(float* pfBlendTime = 0, int* piBlendMode = 0) = 0;
// Summary:
// Serialization.
virtual void Serialize(TSerialize ser) = 0;
// Summary:
// Tries to pause/unpause a timer.
// Return Value:
// True if successfully paused/unpaused, false otherwise.
virtual bool PauseTimer(ETimer which, bool bPause) = 0;
// Summary:
// Determines if a timer is paused.
// Returns:
// True if paused, false otherwise.
virtual bool IsTimerPaused(ETimer which) = 0;
// Summary:
// Tries to set a timer.
// Returns:
// True if successful, false otherwise.
virtual bool SetTimer(ETimer which, float timeInSeconds) = 0;
// Summary:
// Makes a tm struct from a time_t in UTC
// Example:
// Like gmtime.
virtual void SecondsToDateUTC(time_t time, struct tm& outDateUTC) = 0;
// Summary:
// Makes a UTC time from a tm.
// Example:
// Like timegm, but not available on all platforms.
virtual time_t DateToSecondsUTC(struct tm& timePtr) = 0;
// Summary
// Convert from ticks (CryGetTicks()) to seconds
//
virtual float TicksToSeconds(int64 ticks) = 0;
// Summary
// Get number of ticks per second
//
virtual int64 GetTicksPerSecond() = 0;
// Summary
// Create a new timer of the same type
//
virtual ITimer* CreateNewTimer() = 0;
/*!
This is similar to the cvar t_FixedStep. However it is stronger, and will cause even GetRealFrameTime to follow the fixed time stamp.
GetRealFrameTime will always return the same value as GetFrameTime. This mode is mostly intended for Feature tests that have strict requirements
for determinism. It will cause even fps counters to return a fixed value that does not match the actual fps. I could see this also being useful
if rendering a video.
*/
virtual void EnableFixedTimeMode(bool enable, float timeStep) = 0;
// </interfuscator:shuffle>
};
// Description:
// This class is used for automatic profiling of a section of the code.
// Creates an instance of this class, and upon exiting from the code section.
template <typename time>
class CITimerAutoProfiler
{
public:
CITimerAutoProfiler (ITimer* pTimer, time& rTime)
: m_pTimer (pTimer)
, m_rTime (rTime)
{
rTime -= pTimer->GetAsyncCurTime();
}
~CITimerAutoProfiler ()
{
m_rTime += m_pTimer->GetAsyncCurTime();
}
protected:
ITimer* m_pTimer;
time& m_rTime;
};
// Description:
// Include this string AUTO_PROFILE_SECTION(pITimer, g_fTimer) for the section of code where the profiler timer must be turned on and off.
// The profiler timer is just some global or static float or double value that accumulates the time (in seconds) spent in the given block of code.
// pITimer is a pointer to the ITimer interface, g_fTimer is the global accumulator.
#define AUTO_PROFILE_SECTION(pITimer, g_fTimer) CITimerAutoProfiler<double> __section_auto_profiler(pITimer, g_fTimer)
#endif // CRYINCLUDE_CRYCOMMON_ITIMER_H
| 2,442 |
2,189 | import numpy as np
from typing import List
import plotly.graph_objects as go
def visualize_term_rank(topic_model,
topics: List[int] = None,
log_scale: bool = False,
width: int = 800,
height: int = 500) -> go.Figure:
""" Visualize the ranks of all terms across all topics
Each topic is represented by a set of words. These words, however,
do not all equally represent the topic. This visualization shows
how many words are needed to represent a topic and at which point
the beneficial effect of adding words starts to decline.
Arguments:
topic_model: A fitted BERTopic instance.
topics: A selection of topics to visualize. These will be colored
red where all others will be colored black.
log_scale: Whether to represent the ranking on a log scale
width: The width of the figure.
height: The height of the figure.
Returns:
fig: A plotly figure
Usage:
To visualize the ranks of all words across
all topics simply run:
```python
topic_model.visualize_term_rank()
```
Or if you want to save the resulting figure:
```python
fig = topic_model.visualize_term_rank()
fig.write_html("path/to/file.html")
```
<iframe src="../../tutorial/visualization/term_rank.html"
style="width:1000px; height: 530px; border: 0px;""></iframe>
<iframe src="../../tutorial/visualization/term_rank_log.html"
style="width:1000px; height: 530px; border: 0px;""></iframe>
Reference:
This visualization was heavily inspired by the
"Term Probability Decline" visualization found in an
analysis by the amazing [tmtoolkit](https://tmtoolkit.readthedocs.io/).
Reference to that specific analysis can be found
[here](https://wzbsocialsciencecenter.github.io/tm_corona/tm_analysis.html).
"""
topics = [] if topics is None else topics
topic_ids = topic_model.get_topic_info().Topic.unique().tolist()
topic_words = [topic_model.get_topic(topic) for topic in topic_ids]
values = np.array([[value[1] for value in values] for values in topic_words])
indices = np.array([[value + 1 for value in range(len(values))] for values in topic_words])
# Create figure
lines = []
for topic, x, y in zip(topic_ids, indices, values):
if not any(y > 1.5):
# labels
label = f"<b>Topic {topic}</b>:" + "_".join([word[0] for word in topic_model.get_topic(topic)])
label = label[:50]
# line parameters
color = "red" if topic in topics else "black"
opacity = 1 if topic in topics else .1
if any(y == 0):
y[y == 0] = min(values[values > 0])
y = np.log10(y, out=y, where=y > 0) if log_scale else y
line = go.Scatter(x=x, y=y,
name="",
hovertext=label,
mode="lines+lines",
opacity=opacity,
line=dict(color=color, width=1.5))
lines.append(line)
fig = go.Figure(data=lines)
# Stylize layout
fig.update_xaxes(range=[0, len(indices[0])], tick0=1, dtick=2)
fig.update_layout(
showlegend=False,
template="plotly_white",
title={
'text': "<b>Term score decline per Topic</b>",
'y': .9,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(
size=22,
color="Black")
},
width=width,
height=height,
hoverlabel=dict(
bgcolor="white",
font_size=16,
font_family="Rockwell"
),
)
fig.update_xaxes(title_text='Term Rank')
if log_scale:
fig.update_yaxes(title_text='c-TF-IDF score (log scale)')
else:
fig.update_yaxes(title_text='c-TF-IDF score')
return fig
| 1,829 |
4,054 | <filename>clustercontroller-utils/src/main/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/errors/InvalidContentException.java
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.clustercontroller.utils.staterestapi.errors;
public class InvalidContentException extends StateRestApiException {
public InvalidContentException(String description) {
super(description);
setHtmlCode(400);
setHtmlStatus("Content of HTTP request had invalid data");
}
}
| 175 |
2,151 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chromeos/components/proximity_auth/mock_proximity_auth_client.h"
#include "base/memory/ptr_util.h"
namespace proximity_auth {
MockProximityAuthClient::MockProximityAuthClient() {}
MockProximityAuthClient::~MockProximityAuthClient() {}
std::unique_ptr<cryptauth::CryptAuthClientFactory>
MockProximityAuthClient::CreateCryptAuthClientFactory() {
return base::WrapUnique(CreateCryptAuthClientFactoryPtr());
}
} // namespace proximity_auth
| 192 |
15,179 | <filename>tests/distributed/test_workspaces/test_exceptions.py
import os
import pytest
from daemon.clients import JinaDClient
from jina import __default_host__
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_error_in_partial_daemon():
client = JinaDClient(host=__default_host__, port=8000)
workspace_id = client.workspaces.create(
paths=[os.path.join(cur_dir, 'wrong_flow.yml')]
)
error_msg = client.flows.create(
workspace_id=workspace_id, filename='wrong_flow.yml'
)
assert 'jina.excepts.RuntimeFailToStart' in error_msg
assert 'jina.excepts.ExecutorFailToLoad' in error_msg
assert 'FileNotFoundError: can not find executor_ex.yml' in error_msg
assert client.workspaces.delete(id=workspace_id)
def test_pea_error_in_partial_daemon():
client = JinaDClient(host=__default_host__, port=8000)
workspace_id = client.workspaces.create()
status, error_msg = client.peas.create(
workspace_id=workspace_id,
payload={'name': 'blah-pea', 'py_modules': ['abc.py']},
)
assert not status
assert 'jina.excepts.RuntimeFailToStart' in error_msg
assert 'FileNotFoundError: can not find abc.py' in error_msg
assert client.workspaces.delete(id=workspace_id)
| 491 |
984 | <filename>phoenix-pherf/src/test/java/org/apache/phoenix/pherf/workload/mt/WeightedRandomLoadEventGeneratorTest.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.pherf.workload.mt;
import org.apache.phoenix.pherf.PherfConstants;
import org.apache.phoenix.pherf.XMLConfigParserTest;
import org.apache.phoenix.pherf.configuration.DataModel;
import org.apache.phoenix.pherf.configuration.LoadProfile;
import org.apache.phoenix.pherf.configuration.Scenario;
import org.apache.phoenix.pherf.configuration.XMLConfigParser;
import org.apache.phoenix.pherf.util.PhoenixUtil;
import org.apache.phoenix.pherf.workload.mt.generators.TenantOperationInfo;
import org.apache.phoenix.pherf.workload.mt.generators.WeightedRandomLoadEventGenerator;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Tests the various event generation outcomes based on scenario, model and load profile.
*/
public class WeightedRandomLoadEventGeneratorTest {
private static final Logger LOGGER = LoggerFactory.getLogger(
WeightedRandomLoadEventGeneratorTest.class);
private enum TestOperationGroup {
upsertOp, queryOp1, queryOp2, idleOp, udfOp
}
private enum TestOperationGroup2 {
upsertOp, queryOp1, queryOp2, queryOp3, queryOp4, queryOp5, queryOp6, queryOp7, queryOp8, idleOp, udfOp
}
private enum TestTenantGroup {
tg1, tg2, tg3
}
public DataModel readTestDataModel(String resourceName) throws Exception {
URL scenarioUrl = XMLConfigParserTest.class.getResource(resourceName);
assertNotNull(scenarioUrl);
Path p = Paths.get(scenarioUrl.toURI());
return XMLConfigParser.readDataModel(p);
}
/**
* Case : where no operations and tenant groups have zero weight
* @throws Exception
*/
@Test
public void testVariousEventGeneration() throws Exception {
int numRuns = 10;
int numOperations = 100000;
double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f;
int numTenantGroups = 3;
int numOpGroups = 5;
PhoenixUtil pUtil = PhoenixUtil.create();
Properties properties = PherfConstants
.create().getProperties(PherfConstants.PHERF_PROPERTIES, false);
DataModel model = readTestDataModel("/scenario/test_evt_gen1.xml");
for (Scenario scenario : model.getScenarios()) {
LOGGER.debug(String.format("Testing %s", scenario.getName()));
LoadProfile loadProfile = scenario.getLoadProfile();
assertEquals("tenant group size is not as expected: ",
numTenantGroups, loadProfile.getTenantDistribution().size());
assertEquals("operation group size is not as expected: ",
numOpGroups, loadProfile.getOpDistribution().size());
// Calculate the expected distribution.
double[][] expectedDistribution = new double[numOpGroups][numTenantGroups];
for (int r = 0; r < numOpGroups; r++) {
for (int c = 0; c < numTenantGroups; c++) {
int tenantWeight = loadProfile.getTenantDistribution().get(c).getWeight();
int opWeight = loadProfile.getOpDistribution().get(r).getWeight();
expectedDistribution[r][c] = normalizedOperations * (tenantWeight * opWeight);
LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c]));
}
}
WeightedRandomLoadEventGenerator evtGen = new WeightedRandomLoadEventGenerator(
pUtil, model, scenario, properties);
// Calculate the actual distribution.
double[][] distribution = new double[numOpGroups][numTenantGroups];
for (int i = 0; i < numRuns; i++) {
int ops = numOperations;
loadProfile.setNumOperations(ops);
while (ops-- > 0) {
TenantOperationInfo info = evtGen.next();
int row = TestOperationGroup.valueOf(info.getOperationGroupId()).ordinal();
int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal();
distribution[row][col]++;
}
}
validateResults(numOpGroups, numTenantGroups, expectedDistribution, distribution);
}
}
/**
* Case : where some operations have zero weight
*/
@Test
public void testAutoAssignedPMFs() throws Exception {
int numRuns = 50;
int numOperations = 100000;
double normalizedOperations = (double) (numOperations * numRuns) / 10000.0f;
int numTenantGroups = 3;
int numOpGroups = 11;
PhoenixUtil pUtil = PhoenixUtil.create();
Properties properties = PherfConstants
.create().getProperties(PherfConstants.PHERF_PROPERTIES, false);
DataModel model = readTestDataModel("/scenario/test_evt_gen2.xml");
for (Scenario scenario : model.getScenarios()) {
LOGGER.debug(String.format("Testing %s", scenario.getName()));
LoadProfile loadProfile = scenario.getLoadProfile();
assertEquals("tenant group size is not as expected: ",
numTenantGroups, loadProfile.getTenantDistribution().size());
assertEquals("operation group size is not as expected: ",
numOpGroups, loadProfile.getOpDistribution().size());
float totalOperationWeight = 0.0f;
float autoAssignedOperationWeight = 0.0f;
float remainingOperationWeight = 0.0f;
int numAutoWeightedOperations = 0;
for (int r = 0; r < numOpGroups; r++) {
int opWeight = loadProfile.getOpDistribution().get(r).getWeight();
if (opWeight > 0.0f) {
totalOperationWeight += opWeight;
} else {
numAutoWeightedOperations++;
}
}
remainingOperationWeight = 100.0f - totalOperationWeight;
if (numAutoWeightedOperations > 0) {
autoAssignedOperationWeight = remainingOperationWeight/((float) numAutoWeightedOperations);
}
LOGGER.debug(String.format("Auto [%d,%f] = %f", numAutoWeightedOperations,
remainingOperationWeight, autoAssignedOperationWeight ));
// Calculate the expected distribution.
double[][] expectedDistribution = new double[numOpGroups][numTenantGroups];
for (int r = 0; r < numOpGroups; r++) {
for (int c = 0; c < numTenantGroups; c++) {
float tenantWeight = loadProfile.getTenantDistribution().get(c).getWeight();
float opWeight = loadProfile.getOpDistribution().get(r).getWeight();
if (opWeight <= 0.0f) {
opWeight = autoAssignedOperationWeight;
}
expectedDistribution[r][c] = Math.round(normalizedOperations * (tenantWeight * opWeight));
LOGGER.debug(String.format("Expected [%d,%d] = %f", r, c, expectedDistribution[r][c]));
}
}
WeightedRandomLoadEventGenerator evtGen = new WeightedRandomLoadEventGenerator(
pUtil, model, scenario, properties);
// Calculate the actual distribution.
double[][] distribution = new double[numOpGroups][numTenantGroups];
for (int i = 0; i < numRuns; i++) {
int ops = numOperations;
loadProfile.setNumOperations(ops);
while (ops-- > 0) {
TenantOperationInfo info = evtGen.next();
int row = TestOperationGroup2.valueOf(info.getOperationGroupId()).ordinal();
int col = TestTenantGroup.valueOf(info.getTenantGroupId()).ordinal();
distribution[row][col]++;
}
}
validateResults(numOpGroups, numTenantGroups, expectedDistribution, distribution);
}
}
private void validateResults(int numOpGroups, int numTenantGroups,
double[][] expectedDistribution,
double[][] actualDistribution) throws Exception {
double variancePercent = 0.05f; // 5 percent
// Validate that the expected and actual distribution
// is within the margin of allowed variance.
for (int r = 0; r < numOpGroups; r++) {
for (int c = 0; c < numTenantGroups; c++) {
double allowedVariance = expectedDistribution[r][c] * variancePercent;
double diff = Math.abs(expectedDistribution[r][c] - actualDistribution[r][c]);
boolean isAllowed = diff < allowedVariance;
LOGGER.debug(String.format("Actual[%d,%d] = %f, %f, %f",
r, c, actualDistribution[r][c], diff, allowedVariance));
assertTrue(String.format("Difference is outside the allowed variance "
+ "[expected = %f, actual = %f]", allowedVariance, diff), isAllowed);
}
}
}
}
| 4,311 |
14,668 | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/wm/core/visibility_controller.h"
#include "ui/aura/test/aura_test_base.h"
#include "ui/aura/test/test_window_delegate.h"
#include "ui/aura/test/test_windows.h"
#include "ui/aura/window.h"
#include "ui/aura/window_event_dispatcher.h"
#include "ui/compositor/layer.h"
#include "ui/compositor/layer_animator.h"
#include "ui/compositor/scoped_animation_duration_scale_mode.h"
#include "ui/compositor/scoped_layer_animation_settings.h"
#include "ui/wm/core/window_animations.h"
namespace wm {
typedef aura::test::AuraTestBase VisibilityControllerTest;
// Check that a transparency change to 0 will not cause a hide call to be
// ignored.
TEST_F(VisibilityControllerTest, AnimateTransparencyToZeroAndHideHides) {
// We cannot disable animations for this test.
ui::ScopedAnimationDurationScaleMode test_duration_mode(
ui::ScopedAnimationDurationScaleMode::NON_ZERO_DURATION);
VisibilityController controller;
aura::client::SetVisibilityClient(root_window(), &controller);
SetChildWindowVisibilityChangesAnimated(root_window());
aura::test::TestWindowDelegate d;
std::unique_ptr<aura::Window> window(aura::test::CreateTestWindowWithDelegate(
&d, -2, gfx::Rect(0, 0, 50, 50), root_window()));
ui::ScopedLayerAnimationSettings settings(window->layer()->GetAnimator());
settings.SetTransitionDuration(base::Milliseconds(5));
EXPECT_TRUE(window->layer()->visible());
EXPECT_TRUE(window->IsVisible());
window->layer()->SetOpacity(0.0);
EXPECT_TRUE(window->layer()->visible());
EXPECT_TRUE(window->IsVisible());
EXPECT_TRUE(window->layer()->GetAnimator()->
IsAnimatingProperty(ui::LayerAnimationElement::OPACITY));
EXPECT_EQ(0.0f, window->layer()->GetTargetOpacity());
// Check that the visibility is correct after the hide animation has finished.
window->Hide();
window->layer()->GetAnimator()->StopAnimating();
EXPECT_FALSE(window->layer()->visible());
EXPECT_FALSE(window->IsVisible());
}
// Check that a hiding animation would not change a window's bounds in screen.
TEST_F(VisibilityControllerTest, HideAnimationWindowBoundsTest) {
// We cannot disable animations for this test.
ui::ScopedAnimationDurationScaleMode test_duration_mode(
ui::ScopedAnimationDurationScaleMode::NON_ZERO_DURATION);
VisibilityController controller;
aura::client::SetVisibilityClient(root_window(), &controller);
// Set bound expectation.
gfx::Rect expected_bounds(4, 5, 123, 245);
aura::test::TestWindowDelegate d;
std::unique_ptr<aura::Window> window(aura::test::CreateTestWindowWithDelegate(
&d, -2, expected_bounds, root_window()));
window->Show();
SetWindowVisibilityChangesAnimated(window.get());
SetWindowVisibilityAnimationDuration(window.get(), base::Milliseconds(5));
SetWindowVisibilityAnimationType(window.get(),
WINDOW_VISIBILITY_ANIMATION_TYPE_DROP);
// Check that the bound is correct after the hide animation has finished.
window->Hide();
window->layer()->GetAnimator()->StopAnimating();
EXPECT_EQ(expected_bounds, window->GetBoundsInScreen());
}
// Test if SetWindowVisibilityChagngesAnimated will animate the specified
// window.
TEST_F(VisibilityControllerTest, SetWindowVisibilityChagnesAnimated) {
// We cannot disable animations for this test.
ui::ScopedAnimationDurationScaleMode test_duration_mode(
ui::ScopedAnimationDurationScaleMode::NON_ZERO_DURATION);
VisibilityController controller;
aura::client::SetVisibilityClient(root_window(), &controller);
aura::test::TestWindowDelegate d;
std::unique_ptr<aura::Window> window(aura::test::CreateTestWindowWithDelegate(
&d, -2, gfx::Rect(0, 0, 50, 50), root_window()));
// Test using Show animation because Hide animation detaches the window's
// layer.
window->Hide();
ASSERT_FALSE(window->IsVisible());
SetWindowVisibilityChangesAnimated(window.get());
SetWindowVisibilityAnimationDuration(window.get(), base::Milliseconds(5));
SetWindowVisibilityAnimationType(window.get(),
WINDOW_VISIBILITY_ANIMATION_TYPE_FADE);
window->Show();
EXPECT_TRUE(window->layer()->GetAnimator()->is_animating());
EXPECT_EQ(1.0f, window->layer()->GetTargetOpacity());
EXPECT_EQ(0.0f, window->layer()->opacity());
window->layer()->GetAnimator()->StopAnimating();
EXPECT_EQ(1.0f, window->layer()->GetTargetOpacity());
EXPECT_EQ(1.0f, window->layer()->opacity());
}
} // namespace wm
| 1,555 |
421 | <filename>samples/snippets/cpp/VS_Snippets_CLR/StringCompareTo/CPP/stringcompareto.cpp
//<snippet1>
using namespace System;
String^ CompareStrings(String^ str1, String^ str2)
{
// compare the values, using the CompareTo method on the first string
int cmpVal = str1->CompareTo(str2);
if (cmpVal == 0)
// the values are the same
return "The strings occur in the same position in the sort order.";
else if (cmpVal < 0)
return "The first string precedes the second in the sort order.";
else
return "The first string follows the second in the sort order.";
}
int main()
{
String^ strFirst = "Goodbye";
String^ strSecond = "Hello";
String^ strThird = "a small String*";
String^ strFourth = "goodbye";
// Compare a string to itself.
Console::WriteLine(CompareStrings(strFirst, strFirst));
Console::WriteLine(CompareStrings(strFirst, strSecond));
Console::WriteLine(CompareStrings(strFirst, strThird));
// Compare a string to another string that varies only by case.
Console::WriteLine(CompareStrings(strFirst, strFourth));
Console::WriteLine(CompareStrings(strFourth, strFirst));
}
// The example displays the following output:
// The strings occur in the same position in the sort order.
// The first string precedes the second in the sort order.
// The first string follows the second in the sort order.
// The first string follows the second in the sort order.
// The first string precedes the second in the sort order.
//</snippet1>
| 541 |
1,702 | <reponame>mhalbritter/spring-native
/*
* Copyright 2020-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aop.framework;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.aop.AopInvocationException;
import org.springframework.aop.RawTargetAccess;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
/**
* Represent an AOP proxy that is expected to be loadable from disk. If it cannot be loaded
* a message is constructed indicating the hint the user should provide in the application
* to generate it at build time.
*
* @author <NAME>
* @author <NAME>
*/
@SuppressWarnings("serial")
public class BuildTimeAopProxy implements AopProxy, Serializable {
protected static final Log logger = LogFactory.getLog(BuildTimeAopProxy.class);
/**
* A cache that is used for avoiding repeated proxy creation.
*/
private static final Map<Object, Class<?>> cache = new ConcurrentHashMap<>();
/**
* Keeps track of the Classes that we have validated for final methods.
*/
private static final Map<Class<?>, Boolean> validatedClasses = new WeakHashMap<>();
/**
* The object used to configure this proxy.
*/
protected final AdvisedSupport advised;
@Nullable
protected Object[] constructorArgs;
@Nullable
protected Class<?>[] constructorArgTypes;
/**
* Create a new BuildTimeAopProxy for the given AOP configuration.
*
* @param config the AOP configuration as AdvisedSupport object
* @throws AopConfigException if the config is invalid. We try to throw an informative
* exception in this case, rather than let a mysterious failure happen later.
*/
public BuildTimeAopProxy(AdvisedSupport config) throws AopConfigException {
Assert.notNull(config, "AdvisedSupport must not be null");
if (config.getAdvisors().length == 0 && config.getTargetSource() == AdvisedSupport.EMPTY_TARGET_SOURCE) {
throw new AopConfigException("No advisors and no TargetSource specified");
}
this.advised = config;
}
/**
* Set constructor arguments to use for creating the proxy.
*
* @param constructorArgs the constructor argument values
* @param constructorArgTypes the constructor argument types
*/
public void setConstructorArguments(@Nullable Object[] constructorArgs, @Nullable Class<?>[] constructorArgTypes) {
if (constructorArgs == null || constructorArgTypes == null) {
throw new IllegalArgumentException("Both 'constructorArgs' and 'constructorArgTypes' need to be specified");
}
if (constructorArgs.length != constructorArgTypes.length) {
throw new IllegalArgumentException("Number of 'constructorArgs' (" + constructorArgs.length +
") must match number of 'constructorArgTypes' (" + constructorArgTypes.length + ")");
}
this.constructorArgs = constructorArgs;
this.constructorArgTypes = constructorArgTypes;
}
@Override
public Object getProxy() {
return getProxy(null);
}
@Override
public Object getProxy(@Nullable ClassLoader classLoader) {
if (logger.isDebugEnabled()) {
logger.debug("Creating Build Time Proxy: target source is " + this.advised.getTargetSource());
}
try {
Class<?> rootClass = this.advised.getTargetClass();
Assert.state(rootClass != null, "Target class must be available for creating a Build Time Proxy");
Class<?> proxySuperClass = rootClass;
if (rootClass.getName().contains(ClassUtils.CGLIB_CLASS_SEPARATOR)) {
proxySuperClass = rootClass.getSuperclass();
Class<?>[] additionalInterfaces = rootClass.getInterfaces();
for (Class<?> additionalInterface : additionalInterfaces) {
if (additionalInterface != _AdvisedSupportAware.class) {
this.advised.addInterface(additionalInterface);
}
}
}
validateClassIfNecessary(proxySuperClass, classLoader);
ClassLoader targetClassLoader;
if (classLoader == null) {
targetClassLoader = proxySuperClass.getClassLoader();
if (targetClassLoader == null) {
targetClassLoader = getClass().getClassLoader();
}
} else {
targetClassLoader = classLoader;
}
ProxyConfiguration configuration = ProxyConfiguration.get(advised, targetClassLoader);
Class<?> proxyType = cache.get(configuration);
if (proxyType == null) {
synchronized (cache) {
proxyType = cache.get(configuration);
if (proxyType == null) {
proxyType = attemptToLoadProxyClass(configuration, targetClassLoader);
if (proxyType == null) {
throw new IllegalStateException("Class proxy missing at runtime, hint required at build time: "+
configuration.asHint());
}
cache.put(configuration, proxyType);
}
}
}
Object proxy = createProxyInstance(proxyType);
((_AdvisedSupportAware) proxy)._setAdvised(this.advised);
return proxy;
} catch (IllegalStateException ex) {
throw new AopConfigException("Unexpected problem loading and instantiating proxy for target class "+
advised.getTargetClass() , ex);
}
catch (Exception ex) {
throw new AopConfigException("Unexpected AOP exception", ex);
}
}
private static Class<?> attemptToLoadProxyClass(ProxyConfiguration configuration, ClassLoader classLoader) {
logger.info("Attempting discovery (load) of build time generated proxy for class: "+configuration.getTargetClass());
String proxyClassName = configuration.getProxyClassName();
try {
Class<?> proxyClass = ClassUtils.resolveClassName(proxyClassName, classLoader);
logger.info("Suitable proxy found with name "+proxyClassName);
return proxyClass;
} catch (Throwable t) {
logger.info("No suitable proxy found with name "+proxyClassName);
}
return null;
}
protected Object createProxyInstance(Class<?> proxyClass) throws Exception {
return this.constructorArgs != null ?
proxyClass.getDeclaredConstructor(this.constructorArgTypes).newInstance(this.constructorArgs) :
proxyClass.getDeclaredConstructor().newInstance();
}
/**
* Checks to see whether the supplied {@code Class} has already been validated and
* validates it if not.
*/
private void validateClassIfNecessary(Class<?> proxySuperClass, @Nullable ClassLoader proxyClassLoader) {
if (logger.isInfoEnabled()) {
synchronized (validatedClasses) {
if (!validatedClasses.containsKey(proxySuperClass)) {
doValidateClass(proxySuperClass, proxyClassLoader);
validatedClasses.put(proxySuperClass, Boolean.TRUE);
}
}
}
}
/**
* Checks for final methods on the given {@code Class}, as well as package-visible
* methods across ClassLoaders, and writes warnings to the log for each one found.
*/
private void doValidateClass(Class<?> proxySuperClass, @Nullable ClassLoader proxyClassLoader) {
if (Object.class != proxySuperClass) {
Method[] methods = proxySuperClass.getDeclaredMethods();
for (Method method : methods) {
int mod = method.getModifiers();
if (!Modifier.isStatic(mod)) {
if (Modifier.isFinal(mod)) {
logger.info("Unable to proxy method [" + method + "] because it is final: " +
"All calls to this method via a proxy will NOT be routed to the target instance.");
}
else if (!Modifier.isPublic(mod) && !Modifier.isProtected(mod) && !Modifier.isPrivate(mod) &&
proxyClassLoader != null && proxySuperClass.getClassLoader() != proxyClassLoader) {
logger.info("Unable to proxy method [" + method + "] because it is package-visible " +
"across different ClassLoaders: All calls to this method via a proxy will " +
"NOT be routed to the target instance.");
}
}
}
doValidateClass(proxySuperClass.getSuperclass(), proxyClassLoader);
}
}
/**
* Process a return value. Wraps a return of {@code this} if necessary to be the
* {@code proxy} and also verifies that {@code null} is not returned as a primitive.
*/
@Nullable
private static Object processReturnType(
Object proxy, @Nullable Object target, Method method, @Nullable Object returnValue) {
// Massage return value if necessary
if (returnValue != null && returnValue == target &&
!RawTargetAccess.class.isAssignableFrom(method.getDeclaringClass())) {
// Special case: it returned "this". Note that we can't help
// if the target sets a reference to itself in another returned object.
returnValue = proxy;
}
Class<?> returnType = method.getReturnType();
if (returnValue == null && returnType != Void.TYPE && returnType.isPrimitive()) {
throw new AopInvocationException(
"Null return value from advice does not match primitive return type for: " + method);
}
return returnValue;
}
@Override
public boolean equals(Object other) {
return (this == other || (other instanceof BuildTimeAopProxy &&
AopProxyUtils.equalsInProxy(this.advised, ((BuildTimeAopProxy) other).advised)));
}
@Override
public int hashCode() {
return BuildTimeAopProxy.class.hashCode() * 13 + this.advised.getTargetSource().hashCode();
}
}
| 3,074 |
979 | <gh_stars>100-1000
#include "bec/bitfield-enum-class.hpp"
#include <iostream>
enum class Key
{
None = 0,
Red = 1 << 0,
Green = 1 << 1,
Blue = 1 << 2,
Purple = 1 << 3,
Yellow = 1 << 4,
Orange = 1 << 5,
};
template<>
struct bec::EnableBitMaskOperators<Key>
{
static const bool Enable = true;
};
enum class Team
{
None = 0,
Green = 1 << 0,
Purple = 1 << 1,
Orange = 1 << 2,
Yellow = 1 << 3,
Red = 1 << 4,
Blue = 1 << 5,
};
template<>
struct bec::EnableBitMaskOperators<Team>
{
static const bool Enable = true;
};
// simple example program using bitfield-enum-class library
int main(int argc, char** argv) {
using bec::operator&;
using bec::operator|;
using bec::operator|=;
Key collected_keys = Key::None;
collected_keys |= Key::Red | Key::Green;
if ((collected_keys & Key::Red) == Key::Red) {
std::cout << "Collected the Red key\n";
}
Team participating_teams = Team::None;
participating_teams |= Team::Blue | Team::Purple;
if ((participating_teams & Team::Blue) == Team::Blue) {
std::cout << "Blue team is participating\n";
}
return 0;
}
| 509 |
438 | <filename>src/languages/en-upside/music/join.json<gh_stars>100-1000
{
"DESCRIPTION": "˙lǝuuɐɥɔ ǝɔᴉoʌ ɹnoʎ uᴉoɾ ʇoq ǝɥʇ sǝʞɐW",
"USAGE": "join",
"NO_VC": "˙oʇ ʇɔǝuuoɔ uɐɔ I ʇɐɥʇ lǝuuɐɥɔ ǝɔᴉoʌ ɐ uᴉ ʇou ǝɹ,no⅄",
"JOIN": "I have successfully joined the channel.",
"MOVED": "˙lǝuuɐɥɔ pǝʌoɯ ʎllnɟssǝɔɔns ǝʌɐɥ I"
}
| 254 |
892 | <reponame>westonsteimel/advisory-database-github<gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-h445-hrmv-gh6r",
"modified": "2022-04-29T01:27:19Z",
"published": "2022-04-29T01:27:19Z",
"aliases": [
"CVE-2003-0995"
],
"details": "Buffer overflow in the Microsoft Message Queue Manager (MSQM) allows remote attackers to cause a denial of service (RPC service crash) via a queue registration request.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2003-0995"
},
{
"type": "WEB",
"url": "https://docs.microsoft.com/en-us/security-updates/securitybulletins/2003/ms03-039"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/13131"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 419 |
312 | // Copyright <NAME> 2020
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#if !defined(MQTT_STRAND_HPP)
#define MQTT_STRAND_HPP
#include <boost/asio/io_context.hpp>
#include <boost/asio/strand.hpp>
#include <mqtt/namespace.hpp>
namespace MQTT_NS {
namespace as = boost::asio;
// Determines which strand to use
#if defined(MQTT_NO_TS_EXECUTORS)
// Use standard executor style strand
using strand = as::strand<as::io_context::executor_type>;
#else // defined(MQTT_NO_TS_EXECUTORS)
// Use networking TS style strand
using strand = as::io_context::strand;
#endif // defined(MQTT_NO_TS_EXECUTORS)
}
#endif // MQTT_STRAND_HPP
| 286 |
13,111 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.configuration.api;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.skywalking.oap.server.library.module.ModuleConfig;
import org.apache.skywalking.oap.server.library.module.ModuleDefine;
import org.apache.skywalking.oap.server.library.module.ModuleProvider;
import org.apache.skywalking.oap.server.library.module.ModuleStartException;
import org.apache.skywalking.oap.server.library.module.ServiceNotProvidedException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.powermock.reflect.Whitebox;
public class ConfigWatcherRegisterTest {
private ConfigWatcherRegister register;
@Before
public void setup() {
register = new MockConfigWatcherRegister();
}
@After
public void tearDown() {
register = null;
}
@Test
public void testInit() {
final String[] newValue = new String[1];
register.registerConfigChangeWatcher(new ConfigChangeWatcher("MockModule", new MockProvider(), "prop2") {
@Override
public void notify(ConfigChangeEvent value) {
newValue[0] = value.getNewValue();
}
@Override
public String value() {
return null;
}
});
register.configSync();
Assert.assertEquals("abc2", newValue[0]);
}
@Test
public void testGroupConfInit() {
final Map<String, String> config = new ConcurrentHashMap<>();
register.registerConfigChangeWatcher(new GroupConfigChangeWatcher("MockModule", new MockProvider(), "groupItems1") {
@Override
public void notifyGroup(Map<String , ConfigChangeEvent> groupItems) {
groupItems.forEach((groupItemName , event) -> {
config.put(groupItemName, event.getNewValue());
});
}
@Override
public Map<String, String> groupItems() {
return config;
}
});
register.configSync();
Assert.assertEquals("abc", config.get("item1"));
Assert.assertEquals("abc2", config.get("item2"));
}
@Test
public void testRegisterTableLog() {
register.registerConfigChangeWatcher(new ConfigChangeWatcher("MockModule", new MockProvider(), "prop2") {
@Override
public void notify(ConfigChangeEvent value) {
}
@Override
public String value() {
return null;
}
});
register.registerConfigChangeWatcher(new GroupConfigChangeWatcher("MockModule", new MockProvider(), "groupItems1") {
@Override
public Map<String, String> groupItems() {
return null;
}
@Override
public void notifyGroup(final Map<String, ConfigChangeEvent> groupItems) {
}
});
register.configSync();
ConfigWatcherRegister.Register registerTable = Whitebox.getInternalState(this.register, "singleConfigChangeWatcherRegister");
ConfigWatcherRegister.Register groupRegisterTable = Whitebox.getInternalState(this.register, "groupConfigChangeWatcherRegister");
String expected = "Following dynamic config items are available." + ConfigWatcherRegister.LINE_SEPARATOR + "---------------------------------------------" + ConfigWatcherRegister.LINE_SEPARATOR + "key:MockModule.provider.prop2 module:MockModule provider:provider value(current):null" + ConfigWatcherRegister.LINE_SEPARATOR;
String groupConfigExpected = "Following dynamic config items are available." + ConfigWatcherRegister.LINE_SEPARATOR + "---------------------------------------------" + ConfigWatcherRegister.LINE_SEPARATOR + "key:MockModule.provider.groupItems1 module:MockModule provider:provider groupItems(current):null" + ConfigWatcherRegister.LINE_SEPARATOR;
Assert.assertEquals(expected, registerTable.toString());
Assert.assertEquals(groupConfigExpected, groupRegisterTable.toString());
}
public static class MockConfigWatcherRegister extends ConfigWatcherRegister {
@Override
public Optional<ConfigTable> readConfig(Set<String> keys) {
ConfigTable.ConfigItem item1 = new ConfigTable.ConfigItem("MockModule.provider.prop1", "abc");
ConfigTable.ConfigItem item2 = new ConfigTable.ConfigItem("MockModule.provider.prop2", "abc2");
ConfigTable table = new ConfigTable();
table.add(item1);
table.add(item2);
return Optional.of(table);
}
@Override
public Optional<GroupConfigTable> readGroupConfig(Set<String> keys) {
ConfigTable.ConfigItem item1 = new ConfigTable.ConfigItem("item1", "abc");
ConfigTable.ConfigItem item2 = new ConfigTable.ConfigItem("item2", "abc2");
ConfigTable.ConfigItem item3 = new ConfigTable.ConfigItem("item3", "abc3");
GroupConfigTable.GroupConfigItems groupConfigItems1 = new GroupConfigTable.GroupConfigItems("MockModule.provider.groupItems1");
GroupConfigTable.GroupConfigItems groupConfigItems2 = new GroupConfigTable.GroupConfigItems("MockModule.provider.groupItems2");
groupConfigItems1.add(item1);
groupConfigItems1.add(item2);
groupConfigItems2.add(item3);
GroupConfigTable table = new GroupConfigTable();
table.addGroupConfigItems(groupConfigItems1);
table.addGroupConfigItems(groupConfigItems2);
return Optional.of(table);
}
}
public static class MockModule extends ModuleDefine {
public MockModule() {
super("MockModule");
}
@Override
public Class[] services() {
return new Class[0];
}
}
public static class MockProvider extends ModuleProvider {
@Override
public String name() {
return "provider";
}
@Override
public Class<? extends ModuleDefine> module() {
return MockModule.class;
}
@Override
public ModuleConfig createConfigBeanIfAbsent() {
return null;
}
@Override
public void prepare() throws ServiceNotProvidedException, ModuleStartException {
}
@Override
public void start() throws ServiceNotProvidedException, ModuleStartException {
}
@Override
public void notifyAfterCompleted() throws ServiceNotProvidedException, ModuleStartException {
}
@Override
public String[] requiredModules() {
return new String[0];
}
}
}
| 2,893 |
1,025 | # encoding: utf-8
import datetime
import json
import logging
import urllib.parse
import urllib.request
import redis
import time
# 日期开始
start_date = time.strftime("%Y%m%d", time.localtime())
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
fifteenago = today - datetime.timedelta(days=11)
print(str(yesterday).replace("-", ""), str(fifteenago).replace("-", ""))
end, start = str(yesterday).replace("-", ""), str(fifteenago).replace("-", "")
# 日期结束
base_url = "https://api.baidu.com/json/tongji/v1/ReportService/getData"
pool = redis.ConnectionPool(host='172.16.58.3', port=31857, password='<PASSWORD>') # TODO redis地址
r = redis.Redis(connection_pool=pool)
# logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='myapp.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
class Baidu(object):
def __init__(self, siteId, username, password, token):
self.siteId = siteId
self.username = username
self.password = password
self.token = token
def getresult(self, start_date, end_date, method, metrics, **kw):
base_url = "https://api.baidu.com/json/tongji/v1/ReportService/getData"
body = {"header": {"account_type": 1, "password": <PASSWORD>, "token": self.token,
"username": self.username},
"body": {"siteId": self.siteId, "method": method, "start_date": start_date,
"end_date": end_date, "metrics": metrics}}
for key in kw:
body['body'][key] = kw[key]
data = bytes(json.dumps(body), 'utf8')
req = urllib.request.Request(base_url, data)
response = urllib.request.urlopen(req)
the_page = response.read()
logging.info("从百度返回结果")
return the_page.decode("utf-8")
def getPvUvAvgTime(self): # 获取PV、UV、AvgTime
result = self.getresult(start, end, "overview/getTimeTrendRpt",
"pv_count,visitor_count,ip_count,bounce_ratio,avg_visit_time")
result = json.loads(result)["body"]["data"][0]["result"]["items"]
data = result[0]
daterange = [str(x[0]).replace("2017/", "") for x in data]
pv_count = [x[0] if x[0] != '--' else 0 for x in result[1]]
visitor_count = [x[1] if x[1] != '--' else 0 for x in result[1]]
ip_count = [x[2] if x[2] != '--' else 0 for x in result[1]]
bounce_ratio = [x[3] if x[3] != '--' else 0 for x in result[1]]
avg_visit_time = [round(x[4] / 60, 2) if x[4] != '--' else 0 for x in result[1]]
pv_sum = sum(pv_count)
uv_sum = sum(ip_count)
r.set("pv_sum", pv_sum)
r.set("uv_sum", uv_sum)
r.set("daterange", str(daterange))
r.set("pv_count", str(pv_count))
r.set("visitor_count", str(visitor_count))
r.set("ip_count", str(ip_count))
r.set("bounce_ratio", str(bounce_ratio))
r.set("avg_visit_time", str(avg_visit_time))
logging.info("PV,UV,AvgTime")
def getRukouYeMian(self): # 前十入口页面
result = self.getresult(start, end, "source/all/a",
"pv_count,visitor_count,avg_visit_time", viewType='visitor')
data = json.loads(result)["body"]["data"][0]["result"]["items"]
name = [item[0]['name'] for item in data[0]]
count = 0
tojson = []
for item in data[1]:
temp = {}
temp["name"] = name[count]
temp["pv_count"] = item[0]
temp["visitor_count"] = item[1]
temp["average_stay_time"] = item[2]
tojson.append(temp)
count = count + 1
r.set("rukouyemian", json.dumps(tojson[:5]))
logging.info("前十入口页面")
def getAllSource(self): # 获取所有来源
result = self.getresult(start, end, "source/all/a",
"pv_count,pv_ratio,visitor_count")
base = json.loads(result)["body"]["data"][0]["result"]["items"]
source = [item[0]['name'] for item in base[0]]
count = 0
detail = []
for item in base[1]:
tojson = {}
tojson['name'] = source[count]
tojson['pv_count'] = item[0]
tojson['pv_ratio'] = item[1]
tojson['visitor_count'] = item[2]
count = count + 1
detail.append(tojson)
r.set("source", json.dumps(detail))
logging.info("所有来源")
def getDiYu(self): # 地域
result = self.getresult(start, end, "visit/district/a",
"pv_count,visitor_count,avg_visit_time")
base = json.loads(result)["body"]["data"][0]["result"]["items"]
source = [item[0]['name'] for item in base[0]]
count = 0
detail = []
for item in base[1]:
tojson = {}
tojson['name'] = source[count]
tojson['pv_count'] = item[0]
tojson['pv_ratio'] = item[1]
tojson['visitor_count'] = item[2]
count = count + 1
detail.append(tojson)
r.set("diyu", json.dumps(detail))
logging.info("地域")
def getTopTen(self): # 前十
result = self.getresult(start, end, "visit/toppage/a",
"pv_count,visitor_count,average_stay_time")
base = json.loads(result)["body"]["data"][0]["result"]["items"]
name = [item[0]['name'] for item in base[0]]
count = 0
tojson = []
for item in base[1]:
temp = {}
temp["name"] = name[count]
temp["pv_count"] = item[0]
temp["visitor_count"] = item[1]
temp["average_stay_time"] = item[2]
tojson.append(temp)
count = count + 1
r.set("top_ten", json.dumps(tojson[:5]))
logging.info("前十访问页面")
if __name__ == '__main__':
bd = Baidu(10879516, "ZepheryWen", "wenzhihuai2017", "bad4fda9a0634<PASSWORD>4416ec<PASSWORD>")
bd.getPvUvAvgTime()
bd.getRukouYeMian()
bd.getAllSource()
bd.getDiYu()
bd.getTopTen()
print("finish")
| 3,342 |
2,813 | /**
* The files in this directory are copied from JabRef's main source.
* We need top have a copy, because
* a) symblinks do not work on git for windows out of the box (<a href="https://stackoverflow.com/a/59761201/873282">https://stackoverflow.com/a/59761201/873282</a>)
* b) using a source directory of another project causes issues in i) gradle 7, ii) IntelliJ 2020 and later
*/
package org.jabref.logic.journals;
| 132 |
9,724 | <filename>system/lib/compiler-rt/lib/builtins/atomic_flag_clear.c
//===-- atomic_flag_clear.c -----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements atomic_flag_clear from C11's stdatomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef __has_include
#define __has_include(inc) 0
#endif
#if __has_include(<stdatomic.h>)
#include <stdatomic.h>
#undef atomic_flag_clear
void atomic_flag_clear(volatile atomic_flag *object) {
__c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST);
}
#endif
| 255 |
2,806 | package com.idormy.sms.forwarder.sender;
import android.os.Handler;
import android.util.Log;
import androidx.annotation.NonNull;
import com.idormy.sms.forwarder.utils.LogUtil;
import com.idormy.sms.forwarder.utils.SettingUtil;
import java.io.IOException;
import java.net.URLEncoder;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import io.reactivex.rxjava3.core.Observable;
import io.reactivex.rxjava3.core.ObservableEmitter;
import okhttp3.Call;
import okhttp3.Callback;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.Response;
@SuppressWarnings("ResultOfMethodCallIgnored")
public class SenderBarkMsg extends SenderBaseMsg {
static final String TAG = "SenderBarkMsg";
public static void sendMsg(final long logId, final Handler handError, String barkServer, String barkIcon, String from, String content, String groupName) throws Exception {
Log.i(TAG, "sendMsg barkServer:" + barkServer + " from:" + from + " content:" + content);
if (barkServer == null || barkServer.isEmpty()) {
return;
}
//特殊处理避免标题重复
content = content.replaceFirst("^" + from + "(.*)", "").trim();
barkServer += URLEncoder.encode(from, "UTF-8");
barkServer += "/" + URLEncoder.encode(content, "UTF-8");
barkServer += "?isArchive=1"; //自动保存
barkServer += "&group=" + URLEncoder.encode(groupName, "UTF-8"); //增加支持分组
if (barkIcon != null && !barkIcon.isEmpty()) {
barkServer += "&icon=" + URLEncoder.encode(barkIcon, "UTF-8"); //指定推送消息图标
}
int isCode = content.indexOf("验证码");
int isPassword = content.indexOf("动态密码");
if (isCode != -1 || isPassword != -1) {
Pattern p = Pattern.compile("(\\d{4,6})");
Matcher m = p.matcher(content);
if (m.find()) {
System.out.println(m.group());
barkServer += "&automaticallyCopy=1©=" + m.group();
}
}
final String requestUrl = barkServer;
Log.i(TAG, "requestUrl:" + requestUrl);
Observable
.create((ObservableEmitter<Object> emitter) -> {
Toast(handError, TAG, "开始请求接口...");
OkHttpClient client = new OkHttpClient();
final Request request = new Request.Builder().url(requestUrl).get().build();
Call call = client.newCall(request);
call.enqueue(new Callback() {
@Override
public void onFailure(@NonNull Call call, @NonNull final IOException e) {
LogUtil.updateLog(logId, 0, e.getMessage());
Toast(handError, TAG, "发送失败:" + e.getMessage());
emitter.onError(new RuntimeException("请求接口异常..."));
}
@Override
public void onResponse(@NonNull Call call, @NonNull Response response) throws IOException {
final String responseStr = Objects.requireNonNull(response.body()).string();
Log.d(TAG, "Response:" + response.code() + "," + responseStr);
Toast(handError, TAG, "发送状态:" + responseStr);
//TODO:粗略解析是否发送成功
if (responseStr.contains("\"message\":\"success\"")) {
LogUtil.updateLog(logId, 2, responseStr);
} else {
LogUtil.updateLog(logId, 0, responseStr);
}
}
});
}).retryWhen((Observable<Throwable> errorObservable) -> errorObservable
.zipWith(Observable.just(
SettingUtil.getRetryDelayTime(1),
SettingUtil.getRetryDelayTime(2),
SettingUtil.getRetryDelayTime(3),
SettingUtil.getRetryDelayTime(4),
SettingUtil.getRetryDelayTime(5)
), (Throwable e, Integer time) -> time)
.flatMap((Integer delay) -> {
Toast(handError, TAG, "请求接口异常," + delay + "秒后重试");
return Observable.timer(delay, TimeUnit.SECONDS);
}))
.subscribe(System.out::println);
}
}
| 2,511 |
1,590 | {
"parameters": {
"api-version": "2017-04-01",
"subscriptionId": "29cfa613-cbbc-4512-b1d6-1b3a92c7fa40",
"parameters": {
"name": "sdk-Namespace-2924"
}
},
"responses": {
"200": {
"body": {
"isAvailiable": false,
"id": "/subscriptions/29cfa613-cbbc-4512-b1d6-1b3a92c7fa40/providers/Microsoft.NotificationHubs/CheckNamespaceAvailability",
"name": "mytestnamespace",
"type": "Microsoft.NotificationHubs/namespaces/checkNamespaceAvailability",
"location": "West Europe",
"tags": null
}
}
}
}
| 285 |
313 | <gh_stars>100-1000
package com.imperva.apispecparser.parsers.swagger.propertynode;
import com.imperva.apiattacktool.model.tests.ParameterLocation;
import io.swagger.models.Model;
import io.swagger.models.ModelImpl;
import io.swagger.models.RefModel;
import java.util.Map;
public class SwaggerModelToPropertyNodeFactory {
public static SwaggerPropertyNodeConverter get(Model model, boolean isRequired, String parentName, ParameterLocation parameterLocation,
Map<String, Model> definitions) {
if (model == null) {
return null;
}
if (model instanceof ModelImpl) {
return new SwaggerModelImplToPropertyNode((ModelImpl) model, isRequired, parentName, parameterLocation, definitions);
} else if (model instanceof RefModel) {
return new SwaggerRefModelToPropertyNode((RefModel) model, isRequired, parentName, parameterLocation, definitions);
} else {
return null;
}
}
}
| 394 |
823 | /*
* Copyright 2019 The Project Oak Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "examples/private_set_intersection/proto/private_set_intersection.grpc.pb.h"
#include "examples/private_set_intersection/proto/private_set_intersection.pb.h"
#include "glog/logging.h"
#include "include/grpcpp/grpcpp.h"
#include "oak/client/application_client.h"
#include "oak/common/nonce_generator.h"
ABSL_FLAG(std::string, address, "localhost:8080", "Address of the Oak application to connect to");
ABSL_FLAG(std::string, set_id, "", "ID of the set intersection");
ABSL_FLAG(std::string, ca_cert_path, "", "Path to the PEM-encoded CA root certificate");
ABSL_FLAG(std::string, public_key, "", "Path to the PEM-encoded public key used as a data label");
using ::oak::examples::private_set_intersection::GetIntersectionRequest;
using ::oak::examples::private_set_intersection::GetIntersectionResponse;
using ::oak::examples::private_set_intersection::PrivateSetIntersection;
using ::oak::examples::private_set_intersection::SubmitSetRequest;
grpc::Status SubmitSet(PrivateSetIntersection::Stub* stub, std::string set_id,
std::vector<std::string> set) {
grpc::ClientContext context;
SubmitSetRequest request;
request.set_set_id(set_id);
for (auto item : set) {
request.add_values(item);
}
google::protobuf::Empty response;
return stub->SubmitSet(&context, request, &response);
}
std::vector<std::string> RetrieveIntersection(PrivateSetIntersection::Stub* stub,
std::string set_id) {
std::vector<std::string> values;
grpc::ClientContext context;
GetIntersectionRequest request;
request.set_set_id(set_id);
GetIntersectionResponse response;
grpc::Status status = stub->GetIntersection(&context, request, &response);
if (!status.ok()) {
LOG(FATAL) << "Could not retrieve intersection: "
<< oak::status_code_to_string(status.error_code()) << ": " << status.error_message();
}
for (auto item : response.values()) {
values.push_back(item);
}
return values;
}
int main(int argc, char** argv) {
absl::ParseCommandLine(argc, argv);
std::string address = absl::GetFlag(FLAGS_address);
std::string set_id = absl::GetFlag(FLAGS_set_id);
std::string ca_cert_path =
oak::ApplicationClient::LoadRootCert(absl::GetFlag(FLAGS_ca_cert_path));
LOG(INFO) << "Connecting to Oak Application: " << address;
// TODO(#1066): Use a more restrictive Label, based on a bearer token shared between the two
// clients.
std::string public_key = oak::ApplicationClient::LoadPublicKey(absl::GetFlag(FLAGS_public_key));
oak::label::Label label = oak::WebAssemblyModuleSignatureLabel(public_key);
auto stub_0 = PrivateSetIntersection::NewStub(oak::ApplicationClient::CreateChannel(
address, oak::ApplicationClient::GetTlsChannelCredentials(ca_cert_path), label));
auto stub_1 = PrivateSetIntersection::NewStub(oak::ApplicationClient::CreateChannel(
address, oak::ApplicationClient::GetTlsChannelCredentials(ca_cert_path), label));
// Submit sets from different clients.
std::vector<std::string> set_0{"a", "b", "c"};
auto submit_status_0 = SubmitSet(stub_0.get(), set_id, set_0);
if (!submit_status_0.ok()) {
LOG(FATAL) << "Could not submit set: " << submit_status_0.error_code() << ": "
<< submit_status_0.error_message();
}
std::vector<std::string> set_1{"b", "c", "d"};
auto submit_status_1 = SubmitSet(stub_1.get(), set_id, set_1);
if (!submit_status_1.ok()) {
LOG(FATAL) << "Could not submit set: " << submit_status_1.error_code() << ": "
<< submit_status_1.error_message();
}
// Use an invalid public key.
std::string invalid_public_key_base64 = "<KEY>;
std::string invalid_public_key;
if (!absl::Base64Unescape(invalid_public_key_base64, &invalid_public_key)) {
LOG(FATAL) << "Could not decode public key: " << invalid_public_key_base64;
}
oak::label::Label invalid_label = oak::WebAssemblyModuleSignatureLabel(invalid_public_key);
auto invalid_stub = PrivateSetIntersection::NewStub(oak::ApplicationClient::CreateChannel(
address, oak::ApplicationClient::GetTlsChannelCredentials(ca_cert_path), invalid_label));
std::vector<std::string> set_2{"c", "d", "e"};
auto submit_status_2 = SubmitSet(invalid_stub.get(), set_id, set_2);
// Error code `3` means `could not process gRPC request`.
if (submit_status_2.error_code() != 3) {
LOG(FATAL) << "Invalid public key was accepted";
}
// Retrieve intersection.
std::set<std::string> expected_set{"b", "c"};
std::vector<std::string> intersection_0 = RetrieveIntersection(stub_0.get(), set_id);
LOG(INFO) << "client 0 intersection:";
for (auto item : intersection_0) {
LOG(INFO) << "- " << item;
}
if (std::set<std::string>(intersection_0.begin(), intersection_0.end()) != expected_set) {
LOG(FATAL) << "Unexpected set";
}
std::vector<std::string> intersection_1 = RetrieveIntersection(stub_1.get(), set_id);
LOG(INFO) << "client 1 intersection:";
for (auto item : intersection_1) {
LOG(INFO) << "- " << item;
}
if (std::set<std::string>(intersection_1.begin(), intersection_1.end()) != expected_set) {
LOG(FATAL) << "Unexpected set";
}
return EXIT_SUCCESS;
}
| 2,053 |
348 | {"nom":"Saint-Julien-de-Concelles","circ":"10ème circonscription","dpt":"Loire-Atlantique","inscrits":5347,"abs":3129,"votants":2218,"blancs":163,"nuls":47,"exp":2008,"res":[{"nuance":"REM","nom":"<NAME>","voix":1234},{"nuance":"LR","nom":"<NAME>","voix":774}]} | 103 |
380 | {"options":{},"indexes":[{"v":2,"key":{"_id":1},"name":"_id_","ns":"openrasp.plugin"},{"v":2,"key":{"app_id":1},"name":"app_id","ns":"openrasp.plugin","background":true},{"v":2,"key":{"upload_time":1},"name":"upload_time","ns":"openrasp.plugin","background":true}],"uuid":"4518b8aeec984278809ec45ebe1cbab4"} | 116 |
711 | package com.java110.intf.community;
import com.java110.config.feign.FeignConfiguration;
import com.java110.dto.repair.RepairDto;
import com.java110.po.owner.RepairPoolPo;
import org.springframework.cloud.openfeign.FeignClient;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import java.util.List;
/**
* @ClassName IRepairInnerServiceSMO
* @Description 报修信息接口类
* @Author wuxw
* @Date 2019/4/24 9:04
* @Version 1.0
* add by wuxw 2019/4/24
**/
@FeignClient(name = "community-service", configuration = {FeignConfiguration.class})
@RequestMapping("/repairApi")
public interface IRepairInnerServiceSMO {
/**
* <p>查询小区楼信息</p>
*
*
* @param repairDto 数据对象分享
* @return RepairDto 对象数据
*/
@RequestMapping(value = "/queryRepairs", method = RequestMethod.POST)
List<RepairDto> queryRepairs(@RequestBody RepairDto repairDto);
/**
* 查询<p>小区楼</p>总记录数
*
* @param repairDto 数据对象分享
* @return 小区下的小区楼记录数
*/
@RequestMapping(value = "/queryRepairsCount", method = RequestMethod.POST)
int queryRepairsCount(@RequestBody RepairDto repairDto);
/**
* <p>查询小区楼信息</p>
*
*
* @param repairDto 数据对象分享
* @return RepairDto 对象数据
*/
@RequestMapping(value = "/queryStaffRepairs", method = RequestMethod.POST)
List<RepairDto> queryStaffRepairs(@RequestBody RepairDto repairDto);
/**
* 查询<p>小区楼</p>总记录数
*
* @param repairDto 数据对象分享
* @return 小区下的小区楼记录数
*/
@RequestMapping(value = "/queryStaffRepairsCount", method = RequestMethod.POST)
int queryStaffRepairsCount(@RequestBody RepairDto repairDto);
/**
* <p>查询小区楼信息</p>
*
*
* @param repairDto 数据对象分享
* @return RepairDto 对象数据
*/
@RequestMapping(value = "/queryStaffFinishRepairs", method = RequestMethod.POST)
List<RepairDto> queryStaffFinishRepairs(@RequestBody RepairDto repairDto);
/**
* 查询<p>小区楼</p>总记录数
*
* @param repairDto 数据对象分享
* @return 小区下的小区楼记录数
*/
@RequestMapping(value = "/queryStaffFinishRepairsCount", method = RequestMethod.POST)
int queryStaffFinishRepairsCount(@RequestBody RepairDto repairDto);
@RequestMapping(value = "/updateRepair", method = RequestMethod.POST)
int updateRepair(@RequestBody RepairPoolPo repairPoolPo);
}
| 1,216 |
14,668 | // Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "jingle/glue/jingle_glue_mock_objects.h"
namespace jingle_glue {
MockStream::MockStream() {}
MockStream::~MockStream() {}
} // namespace jingle_glue
| 112 |
2,039 | /*-
*
* * Copyright 2017 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*
*/
package org.nd4j.linalg.factory;
import org.nd4j.base.Preconditions;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.api.ops.impl.accum.AMax;
import org.nd4j.linalg.api.ops.impl.accum.AMin;
import org.nd4j.linalg.api.ops.impl.broadcast.*;
import org.nd4j.linalg.api.ops.impl.transforms.arithmetic.*;
import org.nd4j.linalg.api.ops.impl.transforms.comparison.*;
import java.util.Arrays;
/**
* Convenience methods for broadcasts
*
* @author <NAME>
*/
public class Broadcast {
private Broadcast(){ }
/**
* Broadcast add op. See: {@link BroadcastAddOp}
*/
public static INDArray add(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldAddOp(x,y,z));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastAddOp(x,y,z,dimensions));
}
/**
* Broadcast copy op. See: {@link BroadcastCopyOp}
*/
public static INDArray copy(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new CopyOp(x,y,z));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastCopyOp(x,y,z,dimensions));
}
/**
* Broadcast divide op. See: {@link BroadcastDivOp}
*/
public static INDArray div(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldDivOp(x,y,z));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastDivOp(x,y,z,dimensions));
}
/**
* Broadcast equal to op. See: {@link BroadcastEqualTo}
*/
public static INDArray eq(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldEqualTo(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastEqualTo(x,y,z,dimensions));
}
/**
* Broadcast greater than op. See: {@link BroadcastGreaterThan}
*/
public static INDArray gt(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldGreaterThan(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastGreaterThan(x,y,z,dimensions));
}
/**
* Broadcast greater than or equal to op. See: {@link BroadcastGreaterThanOrEqual}
*/
public static INDArray gte(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldGreaterThanOrEqual(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastGreaterThanOrEqual(x,y,z,dimensions));
}
/**
* Broadcast less than op. See: {@link BroadcastLessThan}
*/
public static INDArray lt(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldLessThan(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastLessThan(x,y,z,dimensions));
}
/**
* Broadcast less than or equal to op. See: {@link BroadcastLessThanOrEqual}
*/
public static INDArray lte(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldLessThanOrEqual(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastLessThanOrEqual(x,y,z,dimensions));
}
/**
* Broadcast element-wise multiply op. See: {@link BroadcastMulOp}
*/
public static INDArray mul(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
return Nd4j.getExecutioner().execAndReturn(new OldMulOp(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(x,y,z,dimensions));
}
/**
* Broadcast not equal to op. See: {@link BroadcastNotEqual}
*/
public static INDArray neq(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldNotEqualTo(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastNotEqual(x,y,z,dimensions));
}
/**
* Broadcast reverse division op. See: {@link BroadcastRDivOp}
*/
public static INDArray rdiv(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldRDivOp(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastRDivOp(x,y,z,dimensions));
}
/**
* Broadcast reverse subtraction op. See: {@link BroadcastRSubOp}
*/
public static INDArray rsub(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldSubOp(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastRSubOp(x,y,z,dimensions));
}
/**
* Broadcast subtraction op. See: {@link BroadcastSubOp}
*/
public static INDArray sub(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
return Nd4j.getExecutioner().execAndReturn(new OldSubOp(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastSubOp(x,y,z,dimensions));
}
/**
* Broadcast max op. See: {@link BroadcastMax}
*/
public static INDArray max(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldMax(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastMax(x,y,z,dimensions));
}
/**
* Broadcast min op. See: {@link BroadcastMin}
*/
public static INDArray min(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new OldMin(x,y,z,x.length()));
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastMin(x,y,z,dimensions));
}
/**
* Broadcast absolute max op. See: {@link BroadcastAMax}
*/
public static INDArray amax(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new AMax(x,y,z,x.length())).z();
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastAMax(x,y,z,dimensions));
}
/**
* Broadcast absolute min op. See: {@link BroadcastAMax}
*/
public static INDArray amin(INDArray x, INDArray y, INDArray z, int... dimensions) {
if(dimensions == null) {
Preconditions.checkArgument(Arrays.equals(x.shape(),y.shape()),getFormattedShapeErrorMessageXy(x,y));
Preconditions.checkArgument(Arrays.equals(x.shape(),z.shape()),getFormattedShapeErrorMessageXResult(x,z));
return Nd4j.getExecutioner().execAndReturn(new AMin(x,y,z,x.length())).z();
}
return Nd4j.getExecutioner().execAndReturn(new BroadcastAMin(x,y,z,dimensions));
}
private static String getFormattedShapeErrorMessageXy(INDArray arr1,INDArray arr2) {
return String.format("Shapes for x(%s) and y(%s) must be equal!", Arrays.toString(arr1.shape()),Arrays.toString(arr2.shape()));
}
private static String getFormattedShapeErrorMessageXResult(INDArray arr1,INDArray arr2) {
return String.format("Shapes for x(%s) and result array(%s) must be equal!", Arrays.toString(arr1.shape()),Arrays.toString(arr2.shape()));
}
}
| 4,959 |
5,766 | /* XzCrc64.c -- CRC64 calculation
2010-04-16 : <NAME> : Public domain */
#include "XzCrc64.h"
#define kCrc64Poly UINT64_CONST(0xC96C5795D7870F42)
UInt64 g_Crc64Table[256];
void MY_FAST_CALL Crc64GenerateTable(void)
{
UInt32 i;
for (i = 0; i < 256; i++)
{
UInt64 r = i;
int j;
for (j = 0; j < 8; j++)
r = (r >> 1) ^ ((UInt64)kCrc64Poly & ~((r & 1) - 1));
g_Crc64Table[i] = r;
}
}
UInt64 MY_FAST_CALL Crc64Update(UInt64 v, const void *data, size_t size)
{
const Byte *p = (const Byte *)data;
for (; size > 0 ; size--, p++)
v = CRC64_UPDATE_BYTE(v, *p);
return v;
}
UInt64 MY_FAST_CALL Crc64Calc(const void *data, size_t size)
{
return CRC64_GET_DIGEST(Crc64Update(CRC64_INIT_VAL, data, size));
}
| 365 |
6,270 | [
{
"type": "feature",
"category": "ComputeOptimizer",
"description": "Adds support for 1) the AWS Graviton (AWS_ARM64) recommendation preference for Amazon EC2 instance and Auto Scaling group recommendations, and 2) the ability to get the enrollment statuses for all member accounts of an organization."
},
{
"type": "feature",
"category": "EC2",
"description": "Support added for resizing VPC prefix lists"
},
{
"type": "feature",
"category": "Rekognition",
"description": "This release added new attributes to Rekognition RecognizeCelebities and GetCelebrityInfo API operations."
},
{
"type": "feature",
"category": "TranscribeService",
"description": "This release adds support for batch transcription in six new languages - Afrikaans, Danish, Mandarin Chinese (Taiwan), New Zealand English, South African English, and Thai."
}
] | 332 |
515 | package br.com.caelum.stella.gateway.pagseguro;
import br.com.caelum.stella.gateway.core.DefinedByCode;
public enum PagSeguroTipoPagamento implements DefinedByCode{
PAG_SEGURO("PAGAMENTO"),CARTAO_CREDITO("CARTAO DE CREDITO"),BOLETO("BOLETO"),PAGAMENTO_ONLINE("PAGAMENTO ONLINE");
private String codigo;
private PagSeguroTipoPagamento(String codigo){
this.codigo = codigo;
}
public String getCodigo() {
return codigo;
}
@Override
public String toString() {
// TODO Auto-generated method stub
return super.toString();
}
}
| 253 |
375 | /*
* Copyright 2016 Nokia Solutions and Networks
* Licensed under the Apache License, Version 2.0,
* see license.txt file for details.
*/
package org.robotframework.ide.eclipse.main.plugin.assist;
import java.util.List;
import com.google.common.collect.ImmutableList;
class LibraryAliasReservedWordProposal extends BaseAssistProposal {
private static final List<String> ARGUMENTS = ImmutableList.of("alias");
LibraryAliasReservedWordProposal(final ProposalMatch match) {
super(LibraryAliasReservedWordProposals.WITH_NAME, match);
}
@Override
public List<String> getArguments() {
return ARGUMENTS;
}
}
| 211 |
1,800 | package com.limpoxe.fairy.core.compat;
import android.content.Context;
import androidx.collection.SimpleArrayMap;
import com.limpoxe.fairy.util.RefInvoker;
import java.util.HashMap;
import java.util.Map;
/**
* for supportv7
*/
public class CompatForFragmentClassCache {
private static final String androidx_fragment_app_Fragment = "androidx.fragment.app.Fragment";
private static final String androidx_fragment_app_Fragment_sClassMap = "sClassMap";
private static final String androidx_fragment_app_FragmentFactory = "androidx.fragment.app.FragmentFactory";
private static final String androidx_fragment_app_FragmentFactory_sClassMap = "sClassCacheMap";
private static final String android_support_v4_app_Fragment = "android.support.v4.app.Fragment";
private static final String android_support_v4_app_Fragment_sClassMap = "sClassMap";
private static final String android_app_Fragment = "android.app.Fragment";
private static final String android_app_Fragment_sClassMap = "sClassMap";
//阻止class缓存
public static void installFragmentClassCache() {
Class FragmentClass = null;
try {
FragmentClass = Class.forName(android_app_Fragment);
Object slCassMap = RefInvoker.getField(null, FragmentClass, android_app_Fragment_sClassMap);
if (slCassMap != null) {
//4.3及以下是 HashMap<String, Class<?>>
if (slCassMap.getClass().isAssignableFrom(HashMap.class)) {
RefInvoker.setField(null, FragmentClass, android_app_Fragment_sClassMap, new EmptyHashMap<String, Class<?>>());
} else {
//4.4+ android.util.ArrayMap<String, Class<?>>
}
}
} catch (ClassNotFoundException e) {
//LogUtil.printException("CompatForFragmentClassCache.installFragmentClassCache", e);
}
}
//阻止class缓存
public static void installSupportV4FragmentClassCache() {
Class FragmentClass = null;
try {
FragmentClass = Class.forName(android_support_v4_app_Fragment);
Object slCassMap = RefInvoker.getField(null, FragmentClass, android_support_v4_app_Fragment_sClassMap);
if (slCassMap != null) {
//4.3及以下是 HashMap<String, Class<?>>
if (slCassMap.getClass().isAssignableFrom(HashMap.class)) {
RefInvoker.setField(null, FragmentClass, android_support_v4_app_Fragment_sClassMap, new EmptyHashMap<String, Class<?>>());
} else {
//4.4+ android.support.v4.util.SimpleArrayMap<String, Class<?>>
}
}
} catch (ClassNotFoundException e) {
//LogUtil.printException("CompatForFragmentClassCache.installSupportV4FragmentClassCache", e);
}
}
//阻止class缓存
public static void installAndroidXFragmentClassCache() {
Class FragmentClass = null;
try {
FragmentClass = Class.forName(androidx_fragment_app_Fragment);
Object slCassMap = RefInvoker.getField(null, FragmentClass, androidx_fragment_app_Fragment_sClassMap);
if (slCassMap != null) {
if (slCassMap instanceof Map) {
RefInvoker.setField(null, FragmentClass, androidx_fragment_app_Fragment_sClassMap, new EmptyHashMap());
} else if (slCassMap instanceof SimpleArrayMap) {
RefInvoker.setField(null, FragmentClass, androidx_fragment_app_Fragment_sClassMap, new EmptySimpleArrayMap());
}
} else {
FragmentClass = Class.forName(androidx_fragment_app_FragmentFactory);
slCassMap = RefInvoker.getField(null, FragmentClass, androidx_fragment_app_FragmentFactory_sClassMap);
if (slCassMap != null) {
if (slCassMap instanceof Map) {
RefInvoker.setField(null, FragmentClass, androidx_fragment_app_FragmentFactory_sClassMap, new EmptyHashMap());
} else if (slCassMap instanceof SimpleArrayMap) {
RefInvoker.setField(null, FragmentClass, androidx_fragment_app_FragmentFactory_sClassMap, new EmptySimpleArrayMap());
}
}
}
} catch (ClassNotFoundException e) {
//LogUtil.printException("CompatForFragmentClassCache.installAndroidXFragmentClassCache", e);
}
}
//清理class缓存
public static void clearFragmentClassCache() {
Class FragmentClass = null;
try {
FragmentClass = Class.forName(android_app_Fragment);
Object slCassMap = RefInvoker.getField(null, FragmentClass, android_app_Fragment_sClassMap);
if (slCassMap != null) {
RefInvoker.invokeMethod(slCassMap, slCassMap.getClass(), "clear", (Class[])null, (Object[])null);
}
} catch (ClassNotFoundException e) {
//LogUtil.printException("CompatForFragmentClassCache.clearFragmentClassCache", e);
}
}
//清理class缓存
public static void clearSupportV4FragmentClassCache() {
Class FragmentClass = null;
try {
FragmentClass = Class.forName(android_support_v4_app_Fragment);
Object slCassMap = RefInvoker.getField(null, FragmentClass, android_support_v4_app_Fragment_sClassMap);
if (slCassMap != null) {
RefInvoker.invokeMethod(slCassMap, slCassMap.getClass(), "clear", (Class[])null, (Object[])null);
}
} catch (ClassNotFoundException e) {
//LogUtil.printException("CompatForFragmentClassCache.clearSupportV4FragmentClassCache", e);
}
}
//清理class缓存
public static void clearAndroidXFragmentClassCache() {
Class FragmentClass = null;
try {
FragmentClass = Class.forName(androidx_fragment_app_Fragment);
Object slCassMap = RefInvoker.getField(null, FragmentClass, androidx_fragment_app_Fragment_sClassMap);
if (slCassMap == null) {
FragmentClass = Class.forName(androidx_fragment_app_FragmentFactory);
slCassMap = RefInvoker.getField(null, FragmentClass, androidx_fragment_app_FragmentFactory_sClassMap);
}
if (slCassMap != null) {
RefInvoker.invokeMethod(slCassMap, slCassMap.getClass(), "clear", (Class[])null, (Object[])null);
}
} catch (ClassNotFoundException e) {
//LogUtil.printException("CompatForFragmentClassCache.clearAndroidXFragmentClassCache", e);
}
}
/**
* 提前将fragment类的缓存添加到fragment的缓存池中
* 注意:这里提前缓存的逻辑和框架初始化时执行的阻止缓存的逻辑从冲突的
* @param fragmentContext
* @param fname
*/
public static void forceCache(Context fragmentContext, String fname) {
try {
//框架并不知道实际可能是什么类型,所以都试一下
//调用下面这几个函数,会触发函数内的缓存逻辑,将fname对应的class缓存到其内部的静态map中
android.app.Fragment.instantiate(fragmentContext, fname, null);
RefInvoker.invokeMethod(null, android_support_v4_app_Fragment,
"isSupportFragmentClass",new Class[]{Context.class, String.class}, new Object[]{fragmentContext, fname});
RefInvoker.invokeMethod(null, androidx_fragment_app_Fragment,
"isSupportFragmentClass",new Class[]{Context.class, String.class}, new Object[]{fragmentContext, fname});
RefInvoker.invokeMethod(null, androidx_fragment_app_FragmentFactory,
"isFragmentClass",new Class[]{ClassLoader.class, String.class}, new Object[]{fragmentContext.getClassLoader(), fname});
} catch (Exception e) {
//e.printStackTrace();
}
}
}
| 3,626 |
324 | <filename>src/test/java/net/openhft/chronicle/bytes/Allocator.java<gh_stars>100-1000
/*
* Copyright 2016-2020 chronicle.software
*
* https://chronicle.software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.openhft.chronicle.bytes;
import org.jetbrains.annotations.NotNull;
import java.nio.ByteBuffer;
public enum Allocator {
NATIVE {
@NotNull
@Override
Bytes<ByteBuffer> elasticBytes(int capacity) {
return Bytes.elasticByteBuffer(capacity);
}
@NotNull
@Override
ByteBuffer byteBuffer(int capacity) {
return ByteBuffer.allocateDirect(capacity);
}
},
HEAP {
@NotNull
@Override
Bytes<byte[]> elasticBytes(int capacity) {
return Bytes.allocateElasticOnHeap(capacity);
}
@NotNull
@Override
ByteBuffer byteBuffer(int capacity) {
return ByteBuffer.allocate(capacity);
}
},
BYTE_BUFFER {
@NotNull
@Override
Bytes<ByteBuffer> elasticBytes(int capacity) {
return Bytes.elasticHeapByteBuffer(capacity);
}
@NotNull
@Override
ByteBuffer byteBuffer(int capacity) {
return ByteBuffer.allocate(capacity);
}
},
NATIVE_UNCHECKED {
@NotNull
@Override
Bytes<ByteBuffer> elasticBytes(int capacity) {
return Bytes.elasticByteBuffer(Math.max(128, capacity)).unchecked(true);
}
@NotNull
@Override
ByteBuffer byteBuffer(int capacity) {
return ByteBuffer.allocateDirect(capacity);
}
},
HEAP_UNCHECKED {
@NotNull
@Override
Bytes<ByteBuffer> elasticBytes(int capacity) {
return Bytes.elasticHeapByteBuffer(Math.max(32, capacity)).unchecked(true);
}
@NotNull
@Override
ByteBuffer byteBuffer(int capacity) {
return ByteBuffer.allocate(capacity);
}
},
HEAP_EMBEDDED {
@Override
@NotNull Bytes<?> elasticBytes(int capacity) {
return fixedBytes(Math.max(capacity, 127));
}
@Override
@NotNull ByteBuffer byteBuffer(int capacity) {
throw new IllegalArgumentException();
}
@Override
Bytes fixedBytes(int capacity) {
if (capacity >= 128)
throw new IllegalArgumentException();
Padding padding = new Padding();
return Bytes.forFieldGroup(padding, "p").writeLimit(capacity);
}
};
@NotNull
abstract Bytes<?> elasticBytes(int capacity);
@NotNull
abstract ByteBuffer byteBuffer(int capacity);
Bytes fixedBytes(int capacity) {
return Bytes.wrapForWrite(byteBuffer(capacity));
}
static class Parent {
int start;
}
static class Padding extends Parent {
@FieldGroup("p")
// 128 bytes
transient long p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15;
}
}
| 1,534 |
848 | <filename>tools/Vitis-AI-Runtime/VART/vart/util/include/vitis/ai/simple_config.hpp
/*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
namespace vitis {
namespace ai {
class SimpleConfig {
public:
static std::shared_ptr<SimpleConfig> getOrCreateSimpleConfig(
const std::string& filename);
SimpleConfig(const std::string& filename);
template <typename T>
T as(const std::string& name) const;
struct SimpleConfigViewer {
SimpleConfigViewer(const SimpleConfig& cfg, const std::string& name);
template <class T>
SimpleConfigViewer operator[](const T& name) const;
SimpleConfigViewer operator()(const std::string& name) const;
SimpleConfigViewer operator()(int index) const;
std::vector<SimpleConfigViewer> fields() const;
bool has(const std::string& name) const;
bool has(size_t idx) const;
template <typename T>
T as() const;
const SimpleConfig& cfg_;
std::string name_;
};
struct SimpleConfigViewer operator()(
const std::string& name = std::string()) const;
bool has(const std::string& name) const;
private:
std::map<std::string, std::string> values_;
std::vector<std::string> fields_;
friend struct SimpleConfigViewer;
/* following are private help functions*/
private:
void Initialize(const std::string& filename);
template <typename T>
static void ParseValue(const std::string& text, T& value);
};
} // namespace ai
} // namespace vitis
| 656 |
1,742 | from .generic_nodes import is_pAdicField, is_pAdicRing
from .factory import Zp, Zq, Zp as pAdicRing, ZpCR, ZpCA, ZpFM, ZpFP, ZpLC, ZpLF, ZqCR, ZqCA, ZqFM, ZqFP, ZpER
from .factory import Qp, Qq, Qp as pAdicField, QpCR, QpFP, QpLC, QpLF, QqCR, QqFP, QpER
from .factory import pAdicExtension
from .padic_generic import local_print_mode
from .pow_computer import PowComputer
from .pow_computer_ext import PowComputer_ext_maker
| 183 |
6,726 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#ifdef FB_SONARKIT_ENABLED
#pragma once
#import <Flipper/FlipperSocket.h>
#import <Flipper/FlipperSocketProvider.h>
#import <Flipper/FlipperTransportTypes.h>
#import <folly/dynamic.h>
#import <folly/io/async/EventBase.h>
#import <future>
#import <memory>
@class FlipperPlatformWebSocket;
namespace facebook {
namespace flipper {
class FlipperConnectionManager;
class ConnectionContextStore;
class FlipperWebSocket : public FlipperSocket {
public:
FlipperWebSocket(
FlipperConnectionEndpoint endpoint,
std::unique_ptr<FlipperSocketBasePayload> payload);
FlipperWebSocket(
FlipperConnectionEndpoint endpoint,
std::unique_ptr<FlipperSocketBasePayload> payload,
ConnectionContextStore* connectionContextStore);
virtual ~FlipperWebSocket();
virtual void setEventHandler(SocketEventHandler eventHandler) override;
virtual void setMessageHandler(SocketMessageHandler messageHandler) override;
virtual bool connect(FlipperConnectionManager* manager) override;
virtual void disconnect() override;
virtual void send(const folly::dynamic& message, SocketSendHandler completion)
override;
virtual void send(const std::string& message, SocketSendHandler completion)
override;
virtual void sendExpectResponse(
const std::string& message,
SocketSendExpectResponseHandler completion) override;
private:
FlipperConnectionEndpoint endpoint_;
std::unique_ptr<FlipperSocketBasePayload> payload_;
ConnectionContextStore* connectionContextStore_;
SocketEventHandler eventHandler_;
SocketMessageHandler messageHandler_;
FlipperPlatformWebSocket* socket_;
};
class FlipperWebSocketProvider : public FlipperSocketProvider {
public:
FlipperWebSocketProvider() {}
virtual std::unique_ptr<FlipperSocket> create(
FlipperConnectionEndpoint endpoint,
std::unique_ptr<FlipperSocketBasePayload> payload,
folly::EventBase* eventBase) override {
return std::make_unique<FlipperWebSocket>(
std::move(endpoint), std::move(payload));
}
virtual std::unique_ptr<FlipperSocket> create(
FlipperConnectionEndpoint endpoint,
std::unique_ptr<FlipperSocketBasePayload> payload,
folly::EventBase* eventBase,
ConnectionContextStore* connectionContextStore) override {
return std::make_unique<FlipperWebSocket>(
std::move(endpoint), std::move(payload), connectionContextStore);
}
};
} // namespace flipper
} // namespace facebook
#endif
| 820 |
892 | <reponame>westonsteimel/advisory-database-github
{
"schema_version": "1.2.0",
"id": "GHSA-8ww7-x4wm-w3qh",
"modified": "2022-05-03T03:18:08Z",
"published": "2022-05-03T03:18:08Z",
"aliases": [
"CVE-2007-2230"
],
"details": "SQL injection vulnerability in CA Clever Path Portal allows remote authenticated users to execute limited SQL commands and retrieve arbitrary database contents via (1) the ofinterest parameter in a light search query, (2) description parameter in the advanced search query, and possibly other vectors.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2007-2230"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/33853"
},
{
"type": "WEB",
"url": "http://archives.neohapsis.com/archives/fulldisclosure/2007-04/0648.html"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/25002"
},
{
"type": "WEB",
"url": "http://supportconnectw.ca.com/public/cp/portal/infodocs/portal-secnot.asp"
},
{
"type": "WEB",
"url": "http://www.ca.com/us/securityadvisor/newsinfo/collateral.aspx?cid=136879"
},
{
"type": "WEB",
"url": "http://www.hacktics.com/AdvCleverPathApr07.html"
},
{
"type": "WEB",
"url": "http://www.osvdb.org/34128"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/archive/1/466760/100/0/threaded"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/23671"
},
{
"type": "WEB",
"url": "http://www.securitytracker.com/id?1017970"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2007/1544"
}
],
"database_specific": {
"cwe_ids": [
"CWE-89"
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 915 |
3,102 | // RUN: %clang_cc1 -S -emit-llvm -o - -O2 -disable-llvm-passes %s | FileCheck %s -check-prefixes=CHECK,O2
// RUN: %clang_cc1 -S -emit-llvm -o - -O2 -disable-lifetime-markers %s \
// RUN: | FileCheck %s -check-prefixes=CHECK,O0
// RUN: %clang_cc1 -S -emit-llvm -o - -O0 %s | FileCheck %s -check-prefixes=CHECK,O0
extern int bar(char *A, int n);
// CHECK-LABEL: @foo
// O0-NOT: @llvm.lifetime.start
int foo (int n) {
if (n) {
// O2: @llvm.lifetime.start
char A[100];
return bar(A, 1);
} else {
// O2: @llvm.lifetime.start
char A[100];
return bar(A, 2);
}
}
// CHECK-LABEL: @no_goto_bypass
void no_goto_bypass() {
// O2: @llvm.lifetime.start.p0i8(i64 1
char x;
l1:
bar(&x, 1);
char y[5];
bar(y, 5);
goto l1;
// Infinite loop
// O2-NOT: @llvm.lifetime.end.p0i8(
}
// CHECK-LABEL: @goto_bypass
void goto_bypass() {
{
// O2-NOT: @llvm.lifetime.start.p0i8(i64 1
// O2-NOT: @llvm.lifetime.end.p0i8(i64 1
char x;
l1:
bar(&x, 1);
}
goto l1;
}
// CHECK-LABEL: @no_switch_bypass
void no_switch_bypass(int n) {
switch (n) {
case 1: {
// O2: @llvm.lifetime.start.p0i8(i64 1
// O2: @llvm.lifetime.end.p0i8(i64 1
char x;
bar(&x, 1);
break;
}
case 2:
n = n;
// O2: @llvm.lifetime.start.p0i8(i64 5
// O2: @llvm.lifetime.end.p0i8(i64 5
char y[5];
bar(y, 5);
break;
}
}
// CHECK-LABEL: @switch_bypass
void switch_bypass(int n) {
switch (n) {
case 1:
n = n;
// O2-NOT: @llvm.lifetime.start.p0i8(i64 1
// O2-NOT: @llvm.lifetime.end.p0i8(i64 1
char x;
bar(&x, 1);
break;
case 2:
bar(&x, 1);
break;
}
}
// CHECK-LABEL: @indirect_jump
void indirect_jump(int n) {
char x;
// O2-NOT: @llvm.lifetime
void *T[] = {&&L};
goto *T[n];
L:
bar(&x, 1);
}
// O2-LABEL: @jump_backward_over_declaration(
// O2: %[[p:.*]] = alloca i32*
// O2: %[[v0:.*]] = bitcast i32** %[[p]] to i8*
// O2: call void @llvm.lifetime.start.p0i8(i64 {{.*}}, i8* %[[v0]])
// O2-NOT: call void @llvm.lifetime.start.p0i8(
extern void foo2(int p);
int jump_backward_over_declaration(int a) {
int *p = 0;
label1:
if (p) {
foo2(*p);
return 0;
}
int i = 999;
if (a != 2) {
p = &i;
goto label1;
}
return -1;
}
| 1,178 |
1,558 | {
"name": "Theme Selection",
"description": "Ergänzt Ihre Anwendung mit Designunterstützung."
}
| 47 |
372 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.datastore.v1beta1.model;
/**
* The request for google.datastore.admin.v1beta1.DatastoreAdmin.ExportEntities.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Datastore API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class GoogleDatastoreAdminV1beta1ExportEntitiesRequest extends com.google.api.client.json.GenericJson {
/**
* Description of what data from the project is included in the export.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GoogleDatastoreAdminV1beta1EntityFilter entityFilter;
/**
* Client-assigned labels.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.Map<String, java.lang.String> labels;
/**
* Location for the export metadata and data files.
*
* The full resource URL of the external storage location. Currently, only Google Cloud Storage is
* supported. So output_url_prefix should be of the form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`,
* where `BUCKET_NAME` is the name of the Cloud Storage bucket and `NAMESPACE_PATH` is an optional
* Cloud Storage namespace path (this is not a Cloud Datastore namespace). For more information
* about Cloud Storage namespace paths, see [Object name
* considerations](https://cloud.google.com/storage/docs/naming#object-considerations).
*
* The resulting files will be nested deeper than the specified URL prefix. The final output URL
* will be provided in the google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url field.
* That value should be used for subsequent ImportEntities operations.
*
* By nesting the data files deeper, the same Cloud Storage bucket can be used in multiple
* ExportEntities operations without conflict.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String outputUrlPrefix;
/**
* Description of what data from the project is included in the export.
* @return value or {@code null} for none
*/
public GoogleDatastoreAdminV1beta1EntityFilter getEntityFilter() {
return entityFilter;
}
/**
* Description of what data from the project is included in the export.
* @param entityFilter entityFilter or {@code null} for none
*/
public GoogleDatastoreAdminV1beta1ExportEntitiesRequest setEntityFilter(GoogleDatastoreAdminV1beta1EntityFilter entityFilter) {
this.entityFilter = entityFilter;
return this;
}
/**
* Client-assigned labels.
* @return value or {@code null} for none
*/
public java.util.Map<String, java.lang.String> getLabels() {
return labels;
}
/**
* Client-assigned labels.
* @param labels labels or {@code null} for none
*/
public GoogleDatastoreAdminV1beta1ExportEntitiesRequest setLabels(java.util.Map<String, java.lang.String> labels) {
this.labels = labels;
return this;
}
/**
* Location for the export metadata and data files.
*
* The full resource URL of the external storage location. Currently, only Google Cloud Storage is
* supported. So output_url_prefix should be of the form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`,
* where `BUCKET_NAME` is the name of the Cloud Storage bucket and `NAMESPACE_PATH` is an optional
* Cloud Storage namespace path (this is not a Cloud Datastore namespace). For more information
* about Cloud Storage namespace paths, see [Object name
* considerations](https://cloud.google.com/storage/docs/naming#object-considerations).
*
* The resulting files will be nested deeper than the specified URL prefix. The final output URL
* will be provided in the google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url field.
* That value should be used for subsequent ImportEntities operations.
*
* By nesting the data files deeper, the same Cloud Storage bucket can be used in multiple
* ExportEntities operations without conflict.
* @return value or {@code null} for none
*/
public java.lang.String getOutputUrlPrefix() {
return outputUrlPrefix;
}
/**
* Location for the export metadata and data files.
*
* The full resource URL of the external storage location. Currently, only Google Cloud Storage is
* supported. So output_url_prefix should be of the form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`,
* where `BUCKET_NAME` is the name of the Cloud Storage bucket and `NAMESPACE_PATH` is an optional
* Cloud Storage namespace path (this is not a Cloud Datastore namespace). For more information
* about Cloud Storage namespace paths, see [Object name
* considerations](https://cloud.google.com/storage/docs/naming#object-considerations).
*
* The resulting files will be nested deeper than the specified URL prefix. The final output URL
* will be provided in the google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url field.
* That value should be used for subsequent ImportEntities operations.
*
* By nesting the data files deeper, the same Cloud Storage bucket can be used in multiple
* ExportEntities operations without conflict.
* @param outputUrlPrefix outputUrlPrefix or {@code null} for none
*/
public GoogleDatastoreAdminV1beta1ExportEntitiesRequest setOutputUrlPrefix(java.lang.String outputUrlPrefix) {
this.outputUrlPrefix = outputUrlPrefix;
return this;
}
@Override
public GoogleDatastoreAdminV1beta1ExportEntitiesRequest set(String fieldName, Object value) {
return (GoogleDatastoreAdminV1beta1ExportEntitiesRequest) super.set(fieldName, value);
}
@Override
public GoogleDatastoreAdminV1beta1ExportEntitiesRequest clone() {
return (GoogleDatastoreAdminV1beta1ExportEntitiesRequest) super.clone();
}
}
| 1,982 |
590 | /*******************************************************************************
* Copyright 2017 Bstek
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package com.bstek.uflo.heartbeat;
import java.text.ParseException;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import org.hibernate.Query;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.quartz.JobKey;
import org.quartz.Scheduler;
import org.quartz.Trigger;
import org.quartz.impl.triggers.CronTriggerImpl;
import com.bstek.uflo.model.Heartbeat;
import com.bstek.uflo.service.SchedulerService;
import com.bstek.uflo.utils.EnvironmentUtils;
/**
* @author Jacky.gao
* @since 2013-5-6
*/
public class HeartbeatDetectionJob implements Job{
private String heartJobCronExpression="0/30 * * * * ?";
public void execute(JobExecutionContext context) throws JobExecutionException {
DetectionJobDetail jobDetail=(DetectionJobDetail)context.getJobDetail();
Session session=jobDetail.getSessionFactory().openSession();
try {
String currentInstanceName=jobDetail.getCurrentInstanceName();
Operation operation=detection(session,jobDetail.getJobInstanceNames(),currentInstanceName);
if(operation.equals(Operation.reset)){
SchedulerService service=jobDetail.getSchedulerService();
service.resetScheduer();
Heartbeat beat=new Heartbeat();
Calendar c=Calendar.getInstance();
c.setTime(new Date());
c.add(Calendar.SECOND, 1);
beat.setDate(c.getTime());
beat.setId(UUID.randomUUID().toString());
beat.setInstanceName(currentInstanceName);
session.save(beat);
initHeartJob(currentInstanceName, service.getScheduler());
}
} catch (Exception e) {
throw new JobExecutionException(e);
}finally{
session.flush();
session.close();
}
}
private void initHeartJob(String currentInstanceName,Scheduler scheduler) throws Exception{
HeartJobDetail heartJobDetail=buildHeartJobDetail(currentInstanceName);
Trigger heartJobTrigger=buildHeartJobTrigger();
scheduler.scheduleJob(heartJobDetail, heartJobTrigger);
}
private HeartJobDetail buildHeartJobDetail(String currentInstanceName){
SessionFactory sessionFactory=EnvironmentUtils.getEnvironment().getSessionFactory();
HeartJobDetail jobDetail=new HeartJobDetail(sessionFactory,currentInstanceName);
jobDetail.setKey(new JobKey("UfloHeartJob","uflo_background_system_job"));
jobDetail.setJobClass(HeartJob.class);
return jobDetail;
}
private Trigger buildHeartJobTrigger() {
CronTriggerImpl trigger=new CronTriggerImpl();
trigger.setName("UfloHeartJobTrigger");
try {
trigger.setCronExpression(heartJobCronExpression);
return trigger;
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
/**
* 当实例列表中只有一个,且是当前实例时就重启
* @param session Hibernate Session对象
* @param instanceNames 排队等待的实例名列表,如InsA,InsB,InsC,InsD
* @param currentInstanceName 当前服务器实例名
*/
@SuppressWarnings("unchecked")
private Operation detection(Session session,String[] clusterJobInstanceNames,String currentInstanceName) {
Query query=session.createQuery("from "+Heartbeat.class.getName()+" b order by b.date desc");
List<Heartbeat> heartbeats=query.setMaxResults(1).list();
int currentPos=getPosition(clusterJobInstanceNames, currentInstanceName)+1;
if(heartbeats.size()>0){
Date now=new Date();
Heartbeat heartbeat=heartbeats.get(0);
Date beatDate=heartbeat.getDate();
Calendar beatCalendar=Calendar.getInstance();
beatCalendar.setTime(beatDate);
String beatInstanceName=heartbeat.getInstanceName();
int secondUnit=40;
int beatPos=getPosition(clusterJobInstanceNames, beatInstanceName)+1;
if(!currentInstanceName.equals(beatInstanceName)){
int currentSecond=currentPos*secondUnit;
if(currentPos>beatPos){
beatCalendar.add(Calendar.SECOND,currentSecond);
}else if(currentPos<beatPos){
currentSecond=(currentPos+(clusterJobInstanceNames.length-beatPos))*secondUnit;
beatCalendar.add(Calendar.SECOND,currentSecond);
}
}else{
beatCalendar.add(Calendar.SECOND,secondUnit*clusterJobInstanceNames.length);
}
if(now.compareTo(beatCalendar.getTime())>0){
//当前时间大于心跳时间+currentSecond,说明当前运行JOB的实例挂了
return Operation.reset;
}
}else{
if(currentPos==1)return Operation.reset;
}
return Operation.donothing;
}
private int getPosition(String[] instanceNames,String instanceName){
int pos=0;
for(int i=0;i<instanceNames.length;i++){
String name=instanceNames[i];
if(name.equals(instanceName)){
pos=i;
}
}
return pos;
}
enum Operation{
reset,donothing
}
}
| 1,923 |
911 | <reponame>Pieter12345/ComputerCraft<gh_stars>100-1000
/*
* This file is part of the public ComputerCraft API - http://www.computercraft.info
* Copyright <NAME>, 2011-2017. This API may be redistributed unmodified and in full only.
* For help using the API, and posting your mods, visit the forums at computercraft.info.
*/
package dan200.computercraft.api.media;
import dan200.computercraft.api.filesystem.IMount;
import net.minecraft.item.ItemStack;
import net.minecraft.util.SoundEvent;
import net.minecraft.world.World;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
/**
* Represents an item that can be placed in a disk drive and used by a Computer.
* Implement this interface on your Item class to allow it to be used in the drive.
*/
public interface IMedia
{
/**
* Get a string representing the label of this item. Will be called via {@code disk.getLabel()} in lua.
*
* @param stack The itemstack to inspect.
* @return The label. ie: "Dan's Programs".
*/
@Nullable
String getLabel( @Nonnull ItemStack stack );
/**
* Set a string representing the label of this item. Will be called vi {@code disk.setLabel()} in lua.
*
* @param stack The itemstack to modify.
* @param label The string to set the label to.
* @return true if the label was updated, false if the label may not be modified.
*/
boolean setLabel( @Nonnull ItemStack stack, @Nullable String label );
/**
* If this disk represents an item with audio (like a record), get the readable name of the audio track. ie:
* "<NAME> - Still Alive"
*
* @param stack The itemstack to inspect.
* @return The name, or null if this item does not represent an item with audio.
*/
@Nullable
String getAudioTitle( @Nonnull ItemStack stack );
/**
* If this disk represents an item with audio (like a record), get the resource name of the audio track to play.
*
* @param stack The itemstack to inspect.
* @return The name, or null if this item does not represent an item with audio.
*/
@Nullable
SoundEvent getAudio( @Nonnull ItemStack stack );
/**
* If this disk represents an item with data (like a floppy disk), get a mount representing it's contents. This will
* be mounted onto the filesystem of the computer while the media is in the disk drive.
*
* @param stack The itemstack to inspect.
* @param world The world in which the item and disk drive reside.
* @return The mount, or null if this item does not represent an item with data. If the mount returned also
* implements {@link dan200.computercraft.api.filesystem.IWritableMount}, it will mounted using mountWritable()
* @see dan200.computercraft.api.filesystem.IMount
* @see dan200.computercraft.api.filesystem.IWritableMount
* @see dan200.computercraft.api.ComputerCraftAPI#createSaveDirMount(World, String, long)
* @see dan200.computercraft.api.ComputerCraftAPI#createResourceMount(Class, String, String)
*/
@Nullable
IMount createDataMount( @Nonnull ItemStack stack, @Nonnull World world );
}
| 1,014 |
4,772 | <filename>jpa/deferred/src/main/java/example/repo/Customer1356Repository.java
package example.repo;
import example.model.Customer1356;
import java.util.List;
import org.springframework.data.repository.CrudRepository;
public interface Customer1356Repository extends CrudRepository<Customer1356, Long> {
List<Customer1356> findByLastName(String lastName);
}
| 115 |
799 | import demistomock as demisto
from CommonServerPython import *
from JSONFeedApiModule import * # noqa: E402
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
params['feed_name_to_config'] = {
'CIDR': {
'url': 'https://api.fastly.com/public-ip-list',
'extractor': "addresses[].{ip:@}",
'indicator': 'ip',
'indicator_type': FeedIndicatorType.CIDR,
},
'IPv6CIDR': {
'url': 'https://api.fastly.com/public-ip-list',
'extractor': "ipv6_addresses[].{ip:@}",
'indicator': 'ip',
'indicator_type': FeedIndicatorType.IPv6CIDR,
},
}
feed_main(params, 'Fastly Feed', 'fastly')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 401 |
1,545 | <reponame>pkumar-singh/bookkeeper
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.testing;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.async.ResultCallback;
import com.github.dockerjava.api.command.LogContainerCmd;
import com.github.dockerjava.api.model.Frame;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.extern.slf4j.Slf4j;
import org.testcontainers.DockerClientFactory;
import org.testcontainers.containers.ContainerLaunchException;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.output.WaitingConsumer;
import org.testcontainers.containers.wait.strategy.WaitStrategy;
import org.testcontainers.utility.LogUtils;
/**
* Etcd test container.
*/
@Slf4j
public class EtcdContainer extends GenericContainer<EtcdContainer> {
static class LogContainerResultCb extends ResultCallback.Adapter<Frame> {
@Override
public void onNext(Frame frame) {
log.info(new String(frame.getPayload(), UTF_8));
}
}
public static final String NAME = "etcd";
public static final int CLIENT_PORT = 2379;
private final String clusterName;
public EtcdContainer(String clusterName) {
super("quay.io/coreos/etcd:v3.3");
this.clusterName = clusterName;
}
public String getExternalServiceUri() {
return "etcd://" + getContainerIpAddress() + ":" + getEtcdClientPort() + "/clusters/" + clusterName;
}
public String getInternalServiceUri() {
return "etcd://" + NAME + ":" + CLIENT_PORT + "/clusters/" + clusterName;
}
@Override
protected void configure() {
super.configure();
String[] command = new String[] {
"/usr/local/bin/etcd",
"--name", NAME + "0",
"--initial-advertise-peer-urls", "http://" + NAME + ":2380",
"--listen-peer-urls", "http://0.0.0.0:2380",
"--advertise-client-urls", "http://" + NAME + ":2379",
"--listen-client-urls", "http://0.0.0.0:2379",
"--initial-cluster", NAME + "0=http://" + NAME + ":2380"
};
this.withNetworkAliases(NAME)
.withExposedPorts(CLIENT_PORT)
.withCreateContainerCmdModifier(createContainerCmd -> {
createContainerCmd.withHostName(NAME);
createContainerCmd.withName(clusterName + "-" + NAME);
})
.withCommand(command)
.withNetworkAliases(NAME)
.waitingFor(waitStrategy());
tailContainerLog();
}
public void tailContainerLog() {
CompletableFuture.runAsync(() -> {
while (null == this.getContainerId()) {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
return;
}
}
LogContainerCmd logContainerCmd = this.dockerClient.logContainerCmd(this.getContainerId());
logContainerCmd.withStdOut(true).withStdErr(true).withFollowStream(true);
logContainerCmd.exec(new LogContainerResultCb());
});
}
public int getEtcdClientPort() {
return getMappedPort(CLIENT_PORT);
}
public String getClientEndpoint() {
return String.format("http://%s:%d", getContainerIpAddress(), getEtcdClientPort());
}
private WaitStrategy waitStrategy() {
return new org.testcontainers.containers.wait.strategy.AbstractWaitStrategy() {
@Override
protected void waitUntilReady() {
final DockerClient client = DockerClientFactory.instance().client();
final WaitingConsumer waitingConsumer = new WaitingConsumer();
LogUtils.followOutput(client, waitStrategyTarget.getContainerId(), waitingConsumer);
try {
waitingConsumer.waitUntil(
f -> f.getUtf8String().contains("ready to serve client requests"),
startupTimeout.getSeconds(),
TimeUnit.SECONDS,
1
);
} catch (TimeoutException e) {
throw new ContainerLaunchException("Timed out");
}
}
};
}
}
| 2,130 |
668 | /*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.oracle.truffle.llvm.runtime.types.symbols;
import java.util.regex.Pattern;
public final class LLVMIdentifier {
public static final String UNKNOWN = "<anon>";
private static final Pattern GLOBAL_VARNAME_PATTERN = Pattern.compile("@(\\\\01)?(\"[^\"]+\"|[\\w\\d\\u0024_\\u002e]+)");
private static final Pattern LOCAL_VARNAME_PATTERN = Pattern.compile("%(\"[^\"]+\"|[\\w\\d\\u0024_\\u002e]+)");
private LLVMIdentifier() {
}
public static String toGlobalIdentifier(String name) {
if (GLOBAL_VARNAME_PATTERN.matcher(name).matches()) {
// already a global identifier
return name;
} else {
return "@" + escapeNameIfNecessary(name);
}
}
public static String toLocalIdentifier(String name) {
if (LOCAL_VARNAME_PATTERN.matcher(name).matches()) {
// already a global identifier
return name;
} else {
return "%" + escapeNameIfNecessary(name);
}
}
public static String toExplicitBlockName(String name) {
return toLocalIdentifier(name);
}
public static String toImplicitBlockName(int label) {
return String.format("%d", label);
}
public static String toTypeIdentifier(String name) {
return toLocalIdentifier(name);
}
private static final Pattern UNESCAPED_VARNAME_PATTERN = Pattern.compile("[\\w\\d\\u0024_\\u002e]+");
private static final Pattern ESCAPED_VARNAME_PATTERN = Pattern.compile("(%|@|@\\\\01)?\"[^\"]+\"");
private static String escapeNameIfNecessary(String unescaped) {
// see http://releases.llvm.org/3.8.1/docs/LangRef.html#identifiers
if (UNESCAPED_VARNAME_PATTERN.matcher(unescaped).matches()) {
return unescaped;
} else if (ESCAPED_VARNAME_PATTERN.matcher(unescaped).matches()) {
// do not escape an already escaped name again
return unescaped;
}
final StringBuilder builder = new StringBuilder("\"");
for (int i = 0; i < unescaped.length(); i++) {
final char c = unescaped.charAt(i);
if (c == '\"' || c < ' ' || c > '~') {
// use the format "\xx" where xx is the hex-value of c
builder.append(String.format("\\%02x", c & 0xff));
} else {
builder.append(c);
}
}
builder.append('\"');
return builder.toString();
}
}
| 1,515 |
781 | <reponame>BeastLe9enD/zpl<filename>code/source/threading/sem.c
// file: source/threading/sem.c
ZPL_BEGIN_C_DECLS
void zpl_semaphore_release(zpl_semaphore *s) { zpl_semaphore_post(s, 1); }
#if defined(ZPL_SYSTEM_WINDOWS)
void zpl_semaphore_init (zpl_semaphore *s) { s->win32_handle = CreateSemaphoreA(NULL, 0, ZPL_I32_MAX, NULL); }
void zpl_semaphore_destroy(zpl_semaphore *s) { CloseHandle(s->win32_handle); }
void zpl_semaphore_post (zpl_semaphore *s, zpl_i32 count) { ReleaseSemaphore(s->win32_handle, count, NULL); }
void zpl_semaphore_wait (zpl_semaphore *s) { WaitForSingleObject(s->win32_handle, INFINITE); }
#elif defined(ZPL_SYSTEM_OSX)
void zpl_semaphore_init (zpl_semaphore *s) { semaphore_create(mach_task_self(), &s->osx_handle, SYNC_POLICY_FIFO, 0); }
void zpl_semaphore_destroy(zpl_semaphore *s) { semaphore_destroy(mach_task_self(), s->osx_handle); }
void zpl_semaphore_post (zpl_semaphore *s, zpl_i32 count) { while (count --> 0) semaphore_signal(s->osx_handle); }
void zpl_semaphore_wait (zpl_semaphore *s) { semaphore_wait(s->osx_handle); }
#elif defined(ZPL_SYSTEM_UNIX)
void zpl_semaphore_init (zpl_semaphore *s) { sem_init(&s->unix_handle, 0, 0); }
void zpl_semaphore_destroy(zpl_semaphore *s) { sem_destroy(&s->unix_handle); }
void zpl_semaphore_post (zpl_semaphore *s, zpl_i32 count) { while (count --> 0) sem_post(&s->unix_handle); }
void zpl_semaphore_wait (zpl_semaphore *s) { int i; do { i = sem_wait(&s->unix_handle); } while (i == -1 && errno == EINTR); }
#else
# error Semaphores for this OS are not implemented
#endif
ZPL_END_C_DECLS
| 836 |
3,384 | //
// XVimLowercaseEvaluator.h
// XVim
//
// Created by <NAME> on 6/04/12.
// Copyright (c) 2012 __MyCompanyName__. All rights reserved.
//
#import "XVimOperatorEvaluator.h"
@interface XVimLowercaseEvaluator : XVimOperatorEvaluator
@end
| 98 |
4,262 | <reponame>rikvb/camel
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.spring.xml.handler;
import org.apache.camel.Processor;
import org.apache.camel.builder.DeadLetterChannelBuilder;
import org.apache.camel.builder.DefaultErrorHandlerBuilder;
import org.apache.camel.processor.errorhandler.RedeliveryPolicy;
import org.apache.camel.spring.spi.TransactionErrorHandlerBuilder;
import org.apache.camel.util.IOHelper;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ErrorHandlerDefinitionParserTest {
protected ClassPathXmlApplicationContext ctx;
@BeforeEach
public void setUp() throws Exception {
ctx = new ClassPathXmlApplicationContext("org/apache/camel/spring/xml/handler/ErrorHandlerDefinitionParser.xml");
}
@AfterEach
public void tearDown() throws Exception {
IOHelper.close(ctx);
}
@Test
public void testDefaultErrorHandler() {
DefaultErrorHandlerBuilder errorHandler = ctx.getBean("defaultErrorHandler", DefaultErrorHandlerBuilder.class);
assertNotNull(errorHandler);
RedeliveryPolicy policy = errorHandler.getRedeliveryPolicy();
assertNotNull(policy);
assertEquals(2, policy.getMaximumRedeliveries(), "Wrong maximumRedeliveries");
assertEquals(0, policy.getRedeliveryDelay(), "Wrong redeliveryDelay");
assertEquals(false, policy.isLogStackTrace(), "Wrong logStackTrace");
errorHandler = ctx.getBean("errorHandler", DefaultErrorHandlerBuilder.class);
assertNotNull(errorHandler);
}
@Test
public void testTransactionErrorHandler() {
TransactionErrorHandlerBuilder errorHandler
= ctx.getBean("transactionErrorHandler", TransactionErrorHandlerBuilder.class);
assertNotNull(errorHandler);
assertNotNull(errorHandler.getTransactionTemplate());
Processor processor = errorHandler.getOnRedelivery();
assertTrue(processor instanceof MyErrorProcessor, "It should be MyErrorProcessor");
}
@Test
public void testTXErrorHandler() {
TransactionErrorHandlerBuilder errorHandler = ctx.getBean("txEH", TransactionErrorHandlerBuilder.class);
assertNotNull(errorHandler);
assertNotNull(errorHandler.getTransactionTemplate());
}
@Test
public void testDeadLetterErrorHandler() {
DeadLetterChannelBuilder errorHandler = ctx.getBean("deadLetterErrorHandler", DeadLetterChannelBuilder.class);
assertNotNull(errorHandler);
assertEquals("log:dead", errorHandler.getDeadLetterUri(), "Get wrong deadletteruri");
RedeliveryPolicy policy = errorHandler.getRedeliveryPolicy();
assertNotNull(policy);
assertEquals(2, policy.getMaximumRedeliveries(), "Wrong maximumRedeliveries");
assertEquals(1000, policy.getRedeliveryDelay(), "Wrong redeliveryDelay");
assertEquals(true, policy.isLogHandled(), "Wrong logStackTrace");
assertEquals(true, policy.isAsyncDelayedRedelivery(), "Wrong asyncRedeliveryDelayed");
}
}
| 1,331 |
1,346 | <gh_stars>1000+
package com.ctrip.platform.dal.daogen.generator.processor.csharp;
import com.ctrip.platform.dal.daogen.CodeGenContext;
import com.ctrip.platform.dal.daogen.DalProcessor;
import com.ctrip.platform.dal.daogen.generator.csharp.CSharpCodeGenContext;
import com.ctrip.platform.dal.daogen.log.LoggerManager;
import org.apache.commons.io.FileUtils;
import java.io.File;
public class CSharpDirectoryPreparerProcessor implements DalProcessor {
@Override
public void process(CodeGenContext context) throws Exception {
CSharpCodeGenContext ctx = (CSharpCodeGenContext) context;
int projectId = ctx.getProjectId();
boolean regenerate = ctx.isRegenerate();
File mavenLikeDir = new File(String.format("%s/%s/cs", ctx.getGeneratePath(), projectId));
try {
if (mavenLikeDir.exists() && regenerate) {
FileUtils.forceDelete(mavenLikeDir);
}
File idaoDir = new File(mavenLikeDir, "IDao");
File daoDir = new File(mavenLikeDir, "Dao");
File entityDir = new File(mavenLikeDir, "Entity");
File testDir = new File(mavenLikeDir, "Test");
File configDir = new File(mavenLikeDir, "Config");
if (!idaoDir.exists()) {
FileUtils.forceMkdir(idaoDir);
}
if (!daoDir.exists()) {
FileUtils.forceMkdir(daoDir);
}
if (!entityDir.exists()) {
FileUtils.forceMkdir(entityDir);
}
if (!testDir.exists()) {
FileUtils.forceMkdir(testDir);
}
if (!configDir.exists()) {
FileUtils.forceMkdir(configDir);
}
} catch (Throwable e) {
LoggerManager.getInstance().error(e);
throw e;
}
}
}
| 881 |
14,668 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_SHARING_SHARING_HANDLER_REGISTRY_H_
#define CHROME_BROWSER_SHARING_SHARING_HANDLER_REGISTRY_H_
#include "chrome/browser/sharing/proto/sharing_message.pb.h"
class SharingMessageHandler;
class SharingHandlerRegistry {
public:
SharingHandlerRegistry() = default;
virtual ~SharingHandlerRegistry() = default;
// Gets SharingMessageHandler registered for |payload_case|.
virtual SharingMessageHandler* GetSharingHandler(
chrome_browser_sharing::SharingMessage::PayloadCase payload_case) = 0;
// Register SharingMessageHandler for |payload_case|.
virtual void RegisterSharingHandler(
std::unique_ptr<SharingMessageHandler> handler,
chrome_browser_sharing::SharingMessage::PayloadCase payload_case) = 0;
// Unregister SharingMessageHandler for |payload_case|.
virtual void UnregisterSharingHandler(
chrome_browser_sharing::SharingMessage::PayloadCase payload_case) = 0;
};
#endif // CHROME_BROWSER_SHARING_SHARING_HANDLER_REGISTRY_H_
| 371 |
348 | {"nom":"Jury","circ":"2ème circonscription","dpt":"Moselle","inscrits":715,"abs":427,"votants":288,"blancs":21,"nuls":4,"exp":263,"res":[{"nuance":"LR","nom":"<NAME>","voix":139},{"nuance":"REM","nom":"<NAME>","voix":124}]} | 90 |
2,449 | /**
* Copyright 2021 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.deployservice.common;
import com.pinterest.deployservice.knox.CommandLineKnox;
import com.pinterest.deployservice.knox.FileSystemKnox;
import com.pinterest.deployservice.knox.Knox;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
public class KnoxKeyManager {
private final Logger LOG = LoggerFactory.getLogger(KnoxKeyManager.class);
private Knox mKnox;
private void registerKey(String key) throws Exception {
File file = new File("/var/lib/knox/v0/keys/" + key);
if (!file.exists()) {
CommandLineKnox cmdKnox = new CommandLineKnox(key, Runtime.getRuntime());
if (cmdKnox.register() != 0) {
throw new RuntimeException("Error registering keys: " + key);
}
long startTime = System.currentTimeMillis();
while (!file.exists() && System.currentTimeMillis() - startTime < 5000) {
try {
Thread.sleep(100);
}
catch (InterruptedException ignore) {
}
}
}
mKnox = new FileSystemKnox(key);
}
public void init(String key) {
try {
LOG.info("Init the knox key with value {}", key);
registerKey(key);
} catch (Exception e) {
LOG.error("Unable to register key due to exception :" + e.getMessage());
}
}
public String getKey() {
if (mKnox == null) {
LOG.error("Returning null key since mKnox is null");
return null;
}
try {
String knoxKey = new String(mKnox.getPrimaryKey(), "UTF-8");
return knoxKey;
} catch (Exception e) {
LOG.error("Returning null key due to exception :" + e.getMessage());
return null;
}
}
}
| 1,009 |
357 | /*
*
* Copyright (c) 2012-2015 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*/
package com.vmware.identity.performanceSupport;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.commons.lang.Validate;
/**
* Define the bucket (measurement point) used to track performance metrics.
* Besides {@code PerfMeasurementInterface} and {@code PerfMeasurementTransaction},
* {@code providerInfo} is also used for data bucketing since network connection
* should have an impact on measurements.
*
*/
public final class PerfBucketKey implements Comparable<PerfBucketKey>
{
private static final String PROVIDER_INFO_NA = "PROVIDERID-NA";
private static final String PROVIDER_INFO_DEFAULT = "PROVIDERID-DEFAULT";
private final PerfMeasurementPoint measurementPt;
private final String providerInfo; // domain name, or domain ID provider's uri
/**
* c'tor with fully specified parameters
* @param pt specified the point of the measurement, cannot be null.
* @param idStr either an upn which will be parsed to get the domain name,
* or a domain's identity provider uri such as ldap://localhost:11711.
* Can be null, which will be translated to {@code PROVIDER_INFO_NA}.
*/
public PerfBucketKey(PerfMeasurementPoint pt, String idStr)
{
Validate.notNull(pt);
measurementPt = pt;
providerInfo = (idStr == null ?
PROVIDER_INFO_NA.toUpperCase() :
extractProviderInfo(idStr).toUpperCase());
}
/**
* c'tor with default value for {@code providerInfo}
* @param pt specified the point of the measurement, cannot be null.
*/
public PerfBucketKey(PerfMeasurementPoint pt)
{
this(pt, PROVIDER_INFO_DEFAULT);
}
/**
* Getter
* @return
*/
public PerfMeasurementPoint getMeasurementPoint()
{
return measurementPt;
}
/**
* Getter
* @return
*/
public String getProviderInfo()
{
return providerInfo;
}
@Override
public String toString()
{
return "PerfBucketKey [measurementPt=" + measurementPt
+ ", providerInfo=" + providerInfo + "]";
}
@Override
public int hashCode()
{
final int prime = 31;
int result = 1;
result =
prime * result + providerInfo.hashCode();
result =
prime * result + measurementPt.hashCode();
return result;
}
@Override
public boolean equals(Object obj)
{
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PerfBucketKey other = (PerfBucketKey) obj;
if (!providerInfo.equalsIgnoreCase(other.providerInfo))
return false;
if (measurementPt != other.measurementPt)
return false;
return true;
}
@Override
public int compareTo(PerfBucketKey arg0)
{
if (measurementPt.getItf() != arg0.getMeasurementPoint().getItf())
return measurementPt.getItf().compareTo(
arg0.getMeasurementPoint().getItf());
else if (measurementPt.getTxId() != arg0.getMeasurementPoint().getTxId())
return this.measurementPt.getTxId().compareToIgnoreCase(
arg0.measurementPt.getTxId());
else
return this.providerInfo.compareToIgnoreCase(arg0.providerInfo);
}
private String extractProviderInfo(String idStr)
{
assert idStr != null;
/**
* figure out the provider info from the following format:
* <li> <EMAIL> </li>
* <li> example.com\someone </li>
* <li> domain ID provider's URI </li>
**/
if (idStr.contains("@"))
{
return idStr.substring(idStr.indexOf('@')+1);
}
else if (idStr.contains("\\"))
{
return idStr.substring(0, idStr.indexOf("\\"));
}
else
{
try {
new URI(idStr);
return idStr;
}
catch (URISyntaxException e)
{
return "unknowDomain";
}
}
}
}
| 2,040 |
452 | package com.perfree.common;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* 正则校验工具
*/
public class RegexUtill {
/**
* 验证是否是URL
*
* @param url url
* @return boolean
*/
public static boolean verifyUrl(String url) {
String regEx = "[a-zA-z]+://[^\\s]*";
Pattern pattern = Pattern.compile(regEx);
Matcher matcher = pattern.matcher(url);
return matcher.matches();
}
}
| 217 |
1,857 | #ifndef PLATFORM_H
#define PLATFORM_H
#include "graphics.h"
typedef struct window window_t;
typedef enum {KEY_A, KEY_D, KEY_S, KEY_W, KEY_SPACE, KEY_NUM} keycode_t;
typedef enum {BUTTON_L, BUTTON_R, BUTTON_NUM} button_t;
typedef struct {
void (*key_callback)(window_t *window, keycode_t key, int pressed);
void (*button_callback)(window_t *window, button_t button, int pressed);
void (*scroll_callback)(window_t *window, float offset);
} callbacks_t;
/* platform initialization */
void platform_initialize(void);
void platform_terminate(void);
/* window related functions */
window_t *window_create(const char *title, int width, int height);
void window_destroy(window_t *window);
int window_should_close(window_t *window);
void window_set_userdata(window_t *window, void *userdata);
void *window_get_userdata(window_t *window);
void window_draw_buffer(window_t *window, framebuffer_t *buffer);
/* input related functions */
void input_poll_events(void);
int input_key_pressed(window_t *window, keycode_t key);
int input_button_pressed(window_t *window, button_t button);
void input_query_cursor(window_t *window, float *xpos, float *ypos);
void input_set_callbacks(window_t *window, callbacks_t callbacks);
/* misc platform functions */
float platform_get_time(void);
#endif
| 439 |
26,932 | #ifndef _ESCALATE_COMMON_H
#define _ESCALATE_COMMON_H
/*! @brief When defined, debug output is enabled on Windows builds. */
//#define DEBUGTRACE 1
#ifdef DEBUGTRACE
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#define dprintf(...) real_dprintf(__VA_ARGS__)
#else
#define dprintf(...) do{}while(0);
#endif
/*! @brief Sets `dwResult` to the return value of `GetLastError()`, prints debug output, then does `break;` */
#define BREAK_ON_ERROR( str ) { dwResult = GetLastError(); dprintf( "%s. error=%d", str, dwResult ); break; }
/*! @brief Sets `dwResult` to `error`, prints debug output, then `break;` */
#define BREAK_WITH_ERROR( str, err ) { dwResult = err; dprintf( "%s. error=%d", str, dwResult ); break; }
/*! @brief Sets `dwResult` to the return value of `WASGetLastError()`, prints debug output, then does `break;` */
#define BREAK_ON_WSAERROR( str ) { dwResult = WSAGetLastError(); dprintf( "%s. error=%d", str, dwResult ); break; }
/*! @brief Sets `dwResult` to the return value of `GetLastError()`, prints debug output, then does `continue;` */
#define CONTINUE_ON_ERROR( str ) { dwResult = GetLastError(); dprintf( "%s. error=%d", str, dwResult ); continue; }
/*! @brief Close a service handle if not already closed and set the handle to NULL. */
#define CLOSE_SERVICE_HANDLE( h ) if( h ) { CloseServiceHandle( h ); h = NULL; }
/*! @brief Close a handle if not already closed and set the handle to NULL. */
#define CLOSE_HANDLE( h ) if( h ) { DWORD dwHandleFlags; if(GetHandleInformation( h , &dwHandleFlags)) CloseHandle( h ); h = NULL; }
#ifdef DEBUGTRACE
/*!
* @brief Output a debug string to the debug console.
* @details The function emits debug strings via `OutputDebugStringA`, hence all messages can be viewed
* using Visual Studio's _Output_ window, _DebugView_ from _SysInternals_, or _Windbg_.
*/
static void real_dprintf(char *format, ...) {
va_list args;
char buffer[1024];
va_start(args,format);
vsnprintf_s(buffer, sizeof(buffer), sizeof(buffer)-3, format,args);
strcat_s(buffer, sizeof(buffer), "\r\n");
OutputDebugStringA(buffer);
}
#endif
#endif | 780 |
4,035 | {
"private": true,
"name": "preact-prerendering-hydration-preact8",
"dependencies": {
"preact": "8.5.3",
"preact-render-to-string": "4.1.0"
}
}
| 77 |
862 | <filename>atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/sweep/queue/SweepableCells.java<gh_stars>100-1000
/*
* (c) Copyright 2018 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.atlasdb.sweep.queue;
import static com.google.common.base.Preconditions.checkState;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSortedSet;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import com.google.common.collect.PeekingIterator;
import com.google.common.collect.Streams;
import com.palantir.atlasdb.AtlasDbConstants;
import com.palantir.atlasdb.keyvalue.api.Cell;
import com.palantir.atlasdb.keyvalue.api.CellReference;
import com.palantir.atlasdb.keyvalue.api.ColumnRangeSelection;
import com.palantir.atlasdb.keyvalue.api.ImmutableTargetedSweepMetadata;
import com.palantir.atlasdb.keyvalue.api.KeyValueService;
import com.palantir.atlasdb.keyvalue.api.RowColumnRangeIterator;
import com.palantir.atlasdb.keyvalue.api.TableReference;
import com.palantir.atlasdb.keyvalue.api.TargetedSweepMetadata;
import com.palantir.atlasdb.keyvalue.api.Value;
import com.palantir.atlasdb.keyvalue.api.WriteReference;
import com.palantir.atlasdb.keyvalue.api.WriteReferencePersister;
import com.palantir.atlasdb.logging.LoggingArgs;
import com.palantir.atlasdb.schema.generated.SweepableCellsTable;
import com.palantir.atlasdb.schema.generated.SweepableCellsTable.SweepableCellsColumnValue;
import com.palantir.atlasdb.schema.generated.SweepableCellsTable.SweepableCellsRow;
import com.palantir.atlasdb.schema.generated.TargetedSweepTableFactory;
import com.palantir.atlasdb.sweep.CommitTsCache;
import com.palantir.atlasdb.sweep.metrics.TargetedSweepMetrics;
import com.palantir.atlasdb.sweep.queue.id.SweepTableIndices;
import com.palantir.atlasdb.transaction.impl.TransactionConstants;
import com.palantir.atlasdb.transaction.service.TransactionService;
import com.palantir.logsafe.SafeArg;
import com.palantir.logsafe.logger.SafeLogger;
import com.palantir.logsafe.logger.SafeLoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
import java.util.stream.Collectors;
public class SweepableCells extends SweepQueueTable {
private static final SafeLogger log = SafeLoggerFactory.get(SweepableCells.class);
private final CommitTsCache commitTsCache;
private final WriteReferencePersister writeReferencePersister;
private static final WriteReference DUMMY = WriteReference.of(
TableReference.createFromFullyQualifiedName("dum.my"), Cell.create(new byte[] {0}, new byte[] {0}), false);
public SweepableCells(
KeyValueService kvs,
WriteInfoPartitioner partitioner,
TargetedSweepMetrics metrics,
TransactionService transactionService) {
super(kvs, TargetedSweepTableFactory.of().getSweepableCellsTable(null).getTableRef(), partitioner, metrics);
this.commitTsCache = CommitTsCache.create(transactionService);
this.writeReferencePersister = new WriteReferencePersister(new SweepTableIndices(kvs));
}
@Override
Map<Cell, byte[]> populateReferences(PartitionInfo partitionInfo, List<WriteInfo> writes) {
boolean dedicate = writes.size() > SweepQueueUtils.MAX_CELLS_GENERIC;
if (dedicate) {
return addReferenceToDedicatedRows(partitionInfo, writes);
} else {
return ImmutableMap.of();
}
}
@Override
Map<Cell, byte[]> populateCells(PartitionInfo partitionInfo, List<WriteInfo> writes) {
Map<Cell, byte[]> cells = new HashMap<>();
boolean dedicate = writes.size() > SweepQueueUtils.MAX_CELLS_GENERIC;
long index = 0;
for (WriteInfo write : writes) {
cells.putAll(addWrite(partitionInfo, write, dedicate, index));
index++;
}
return cells;
}
private Map<Cell, byte[]> addReferenceToDedicatedRows(PartitionInfo info, List<WriteInfo> writes) {
return addCell(info, DUMMY, false, 0, entryIndicatingNumberOfRequiredRows(writes));
}
private long entryIndicatingNumberOfRequiredRows(List<WriteInfo> writes) {
return -(1 + (writes.size() - 1) / SweepQueueUtils.MAX_CELLS_DEDICATED);
}
private Map<Cell, byte[]> addCell(
PartitionInfo info,
WriteReference writeRef,
boolean isDedicatedRow,
long dedicatedRowNumber,
long writeIndex) {
SweepableCellsRow row = computeRow(info, isDedicatedRow, dedicatedRowNumber);
SweepableCellsColumnValue colVal = createColVal(info.timestamp(), writeIndex, writeRef);
return ImmutableMap.of(SweepQueueUtils.toCell(row, colVal), colVal.persistValue());
}
private SweepableCellsRow computeRow(PartitionInfo info, boolean isDedicatedRow, long dedicatedRowNumber) {
TargetedSweepMetadata metadata = ImmutableTargetedSweepMetadata.builder()
.conservative(info.isConservative().isTrue())
.dedicatedRow(isDedicatedRow)
.shard(info.shard())
.dedicatedRowNumber(dedicatedRowNumber)
.build();
long tsOrPartition = getTimestampOrPartition(info, isDedicatedRow);
return SweepableCellsRow.of(tsOrPartition, metadata.persistToBytes());
}
private SweepableCellsRow computeRow(long partitionFine, ShardAndStrategy shardStrategy) {
TargetedSweepMetadata metadata = ImmutableTargetedSweepMetadata.builder()
.conservative(shardStrategy.isConservative())
.dedicatedRow(false)
.shard(shardStrategy.shard())
.dedicatedRowNumber(0)
.build();
return SweepableCellsRow.of(partitionFine, metadata.persistToBytes());
}
private long getTimestampOrPartition(PartitionInfo info, boolean isDedicatedRow) {
return isDedicatedRow ? info.timestamp() : SweepQueueUtils.tsPartitionFine(info.timestamp());
}
private SweepableCellsColumnValue createColVal(long ts, long index, WriteReference writeRef) {
SweepableCellsTable.SweepableCellsColumn col = SweepableCellsTable.SweepableCellsColumn.of(tsMod(ts), index);
return SweepableCellsColumnValue.of(col, writeReferencePersister.persist(writeRef));
}
private static long tsMod(long timestamp) {
return timestamp % SweepQueueUtils.TS_FINE_GRANULARITY;
}
SweepBatch getBatchForPartition(
ShardAndStrategy shardStrategy, long partitionFine, long minTsExclusive, long sweepTs) {
SweepableCellsRow row = computeRow(partitionFine, shardStrategy);
RowColumnRangeIterator resultIterator = getRowColumnRange(row, partitionFine, minTsExclusive, sweepTs);
PeekingIterator<Map.Entry<Cell, Value>> peekingResultIterator = Iterators.peekingIterator(resultIterator);
WriteBatch writeBatch = getBatchOfWrites(row, peekingResultIterator, sweepTs);
Multimap<Long, WriteInfo> writesByStartTs = writeBatch.writesByStartTs;
int entriesRead = writesByStartTs.size();
maybeMetrics.ifPresent(metrics -> metrics.updateEntriesRead(shardStrategy, entriesRead));
log.debug("Read {} entries from the sweep queue.", SafeArg.of("number", entriesRead));
TimestampsToSweep tsToSweep = getTimestampsToSweepDescendingAndCleanupAborted(
shardStrategy, minTsExclusive, sweepTs, writesByStartTs);
Collection<WriteInfo> writes = getWritesToSweep(writesByStartTs, tsToSweep.timestampsDescending());
DedicatedRows filteredDedicatedRows = getDedicatedRowsToClear(writeBatch.dedicatedRows, tsToSweep);
long lastSweptTs = getLastSweptTs(tsToSweep, peekingResultIterator, partitionFine, sweepTs);
return SweepBatch.of(writes, filteredDedicatedRows, lastSweptTs, tsToSweep.processedAll(), entriesRead);
}
private DedicatedRows getDedicatedRowsToClear(List<SweepableCellsRow> rows, TimestampsToSweep tsToSweep) {
return DedicatedRows.of(rows.stream()
.filter(row -> {
TargetedSweepMetadata metadata =
TargetedSweepMetadata.BYTES_HYDRATOR.hydrateFromBytes(row.getMetadata());
checkState(metadata.dedicatedRow(), "Row not a dedicated row", SafeArg.of("row", row));
return tsToSweep.timestampsDescending().contains(row.getTimestampPartition());
})
.collect(Collectors.toList()));
}
private WriteBatch getBatchOfWrites(
SweepableCellsRow row, PeekingIterator<Map.Entry<Cell, Value>> resultIterator, long sweepTs) {
WriteBatch writeBatch = new WriteBatch();
while (resultIterator.hasNext() && writeBatch.writesByStartTs.size() < SweepQueueUtils.SWEEP_BATCH_SIZE) {
Map.Entry<Cell, Value> entry = resultIterator.next();
SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry);
long startTs = getTimestamp(row, col);
if (knownToBeCommittedAfterSweepTs(startTs, sweepTs)) {
writeBatch.add(ImmutableList.of(getWriteInfo(startTs, entry.getValue())));
return writeBatch;
}
writeBatch.merge(getWrites(row, col, entry.getValue()));
}
// there may be entries remaining with the same start timestamp as the last processed one. If that is the case
// we want to include these ones as well. This is OK since there are at most MAX_CELLS_GENERIC - 1 of them.
while (resultIterator.hasNext()) {
Map.Entry<Cell, Value> entry = resultIterator.peek();
SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry);
long timestamp = getTimestamp(row, col);
if (writeBatch.writesByStartTs.containsKey(timestamp)) {
writeBatch.merge(getWrites(row, col, entry.getValue()));
resultIterator.next();
} else {
break;
}
}
return writeBatch;
}
private static final class WriteBatch {
private final Multimap<Long, WriteInfo> writesByStartTs = HashMultimap.create();
private final List<SweepableCellsRow> dedicatedRows = new ArrayList<>();
WriteBatch merge(WriteBatch other) {
writesByStartTs.putAll(other.writesByStartTs);
dedicatedRows.addAll(other.dedicatedRows);
return this;
}
static WriteBatch single(WriteInfo writeInfo) {
WriteBatch batch = new WriteBatch();
return batch.add(ImmutableList.of(writeInfo));
}
WriteBatch add(List<SweepableCellsRow> newDedicatedRows, List<WriteInfo> writeInfos) {
dedicatedRows.addAll(newDedicatedRows);
return add(writeInfos);
}
WriteBatch add(List<WriteInfo> writeInfos) {
writeInfos.forEach(info -> writesByStartTs.put(info.timestamp(), info));
return this;
}
}
private RowColumnRangeIterator getRowColumnRange(
SweepableCellsRow row, long partitionFine, long minTsExclusive, long maxTsExclusive) {
return getRowsColumnRange(
ImmutableList.of(row.persistToBytes()),
columnsBetween(minTsExclusive + 1, maxTsExclusive, partitionFine),
SweepQueueUtils.BATCH_SIZE_KVS);
}
private TimestampsToSweep getTimestampsToSweepDescendingAndCleanupAborted(
ShardAndStrategy shardStrategy,
long minTsExclusive,
long sweepTs,
Multimap<Long, WriteInfo> writesByStartTs) {
Map<Long, Long> startToCommitTs = commitTsCache.loadBatch(writesByStartTs.keySet());
Map<TableReference, Multimap<Cell, Long>> cellsToDelete = new HashMap<>();
List<Long> committedTimestamps = new ArrayList<>();
long lastSweptTs = minTsExclusive;
boolean processedAll = true;
List<Long> sortedStartTimestamps =
startToCommitTs.keySet().stream().sorted().collect(Collectors.toList());
for (long startTs : sortedStartTimestamps) {
long commitTs = startToCommitTs.get(startTs);
if (commitTs == TransactionConstants.FAILED_COMMIT_TS) {
lastSweptTs = startTs;
writesByStartTs.get(startTs).forEach(write -> cellsToDelete
.computeIfAbsent(write.tableRef(), ignore -> HashMultimap.create())
.put(write.cell(), write.timestamp()));
} else if (commitTs < sweepTs) {
lastSweptTs = startTs;
committedTimestamps.add(startTs);
} else {
processedAll = false;
lastSweptTs = startTs - 1;
break;
}
}
cellsToDelete.forEach((tableRef, multimap) -> {
try {
kvs.delete(tableRef, multimap);
} catch (Exception exception) {
if (tableWasDropped(tableRef)) {
// this table no longer exists, but had work to do in the sweep queue still;
// don't error out on this batch so that the queue cleans up and doesn't constantly retry forever
log.info(
"Tried to delete {} aborted writes from table {}, "
+ "but instead found that the table no longer exists.",
SafeArg.of("number", multimap.size()),
LoggingArgs.tableRef(tableRef),
exception);
} else {
throw exception;
}
}
maybeMetrics.ifPresent(metrics -> metrics.updateAbortedWritesDeleted(shardStrategy, multimap.size()));
log.info(
"Deleted {} aborted writes from table {}.",
SafeArg.of("number", multimap.size()),
LoggingArgs.tableRef(tableRef));
});
return TimestampsToSweep.of(
ImmutableSortedSet.copyOf(committedTimestamps).descendingSet(), lastSweptTs, processedAll);
}
private boolean tableWasDropped(TableReference tableRef) {
return Arrays.equals(kvs.getMetadataForTable(tableRef), AtlasDbConstants.EMPTY_TABLE_METADATA);
}
private Collection<WriteInfo> getWritesToSweep(Multimap<Long, WriteInfo> writesByStartTs, SortedSet<Long> startTs) {
Map<CellReference, WriteInfo> writesToSweepFor = new HashMap<>();
startTs.stream()
.map(writesByStartTs::get)
.flatMap(Collection::stream)
.forEach(write -> writesToSweepFor.putIfAbsent(write.writeRef().cellReference(), write));
return writesToSweepFor.values();
}
private long getLastSweptTs(
TimestampsToSweep startTsCommitted,
Iterator<Map.Entry<Cell, Value>> resultIterator,
long partitionFine,
long maxTsExclusive) {
if (startTsCommitted.processedAll() && exhaustedAllColumns(resultIterator)) {
return lastGuaranteedSwept(partitionFine, maxTsExclusive);
} else {
return startTsCommitted.maxSwept();
}
}
private WriteBatch getWrites(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col, Value value) {
if (isReferenceToDedicatedRows(col)) {
return writesFromDedicated(row, col);
} else {
return WriteBatch.single(getWriteInfo(getTimestamp(row, col), value));
}
}
private boolean isReferenceToDedicatedRows(SweepableCellsTable.SweepableCellsColumn col) {
return col.getWriteIndex() < 0;
}
private WriteBatch writesFromDedicated(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col) {
List<SweepableCellsRow> dedicatedRows = computeDedicatedRows(row, col);
RowColumnRangeIterator iterator =
getWithColumnRangeAll(Lists.transform(dedicatedRows, SweepableCellsRow::persistToBytes));
WriteBatch batch = new WriteBatch();
return batch.add(
dedicatedRows,
Streams.stream(iterator)
.map(entry -> getWriteInfo(getTimestamp(row, col), entry.getValue()))
.collect(Collectors.toList()));
}
private List<SweepableCellsRow> computeDedicatedRows(
SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col) {
TargetedSweepMetadata metadata = TargetedSweepMetadata.BYTES_HYDRATOR.hydrateFromBytes(row.getMetadata());
long timestamp = getTimestamp(row, col);
int numberOfDedicatedRows = writeIndexToNumberOfDedicatedRows(col.getWriteIndex());
List<SweepableCellsRow> dedicatedRows = new ArrayList<>();
for (int i = 0; i < numberOfDedicatedRows; i++) {
byte[] dedicatedMetadata = ImmutableTargetedSweepMetadata.builder()
.from(metadata)
.dedicatedRow(true)
.dedicatedRowNumber(i)
.build()
.persistToBytes();
dedicatedRows.add(SweepableCellsRow.of(timestamp, dedicatedMetadata));
}
return dedicatedRows;
}
private long getTimestamp(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col) {
return row.getTimestampPartition() * SweepQueueUtils.TS_FINE_GRANULARITY + col.getTimestampModulus();
}
private boolean knownToBeCommittedAfterSweepTs(long startTs, long sweepTs) {
return commitTsCache
.loadIfCached(startTs)
.map(commitTs -> commitTs >= sweepTs)
.orElse(false);
}
private int writeIndexToNumberOfDedicatedRows(long writeIndex) {
return (int) -writeIndex;
}
private RowColumnRangeIterator getWithColumnRangeAll(Iterable<byte[]> rows) {
return getRowsColumnRange(rows, SweepQueueUtils.ALL_COLUMNS, SweepQueueUtils.BATCH_SIZE_KVS);
}
private WriteInfo getWriteInfo(long timestamp, Value value) {
return WriteInfo.of(
writeReferencePersister.unpersist(SweepableCellsColumnValue.hydrateValue(value.getContents())),
timestamp);
}
private boolean exhaustedAllColumns(Iterator<Map.Entry<Cell, Value>> resultIterator) {
return !resultIterator.hasNext();
}
private long lastGuaranteedSwept(long partitionFine, long maxTsExclusive) {
return Math.min(SweepQueueUtils.maxTsForFinePartition(partitionFine), maxTsExclusive - 1);
}
void deleteDedicatedRows(DedicatedRows dedicatedRows) {
List<byte[]> rows = dedicatedRows.getDedicatedRows().stream()
.map(SweepableCellsRow::persistToBytes)
.collect(Collectors.toList());
deleteRows(rows);
}
void deleteNonDedicatedRows(ShardAndStrategy shardAndStrategy, Iterable<Long> partitionsFine) {
List<byte[]> rows = Streams.stream(partitionsFine)
.map(partitionFine -> computeRow(partitionFine, shardAndStrategy))
.map(SweepableCellsRow::persistToBytes)
.collect(Collectors.toList());
deleteRows(rows);
}
private Map<Cell, byte[]> addWrite(PartitionInfo info, WriteInfo write, boolean dedicate, long index) {
return addCell(
info,
write.writeRef(),
dedicate,
index / SweepQueueUtils.MAX_CELLS_DEDICATED,
index % SweepQueueUtils.MAX_CELLS_DEDICATED);
}
private SweepableCellsTable.SweepableCellsColumn computeColumn(Map.Entry<Cell, Value> entry) {
return SweepableCellsTable.SweepableCellsColumn.BYTES_HYDRATOR.hydrateFromBytes(
entry.getKey().getColumnName());
}
private ColumnRangeSelection columnsBetween(long startTsInclusive, long endTsExclusive, long partitionFine) {
long startIncl = exactColumnOrElseBeginningOfRow(startTsInclusive, partitionFine);
byte[] startCol = SweepableCellsTable.SweepableCellsColumn.of(startIncl, SweepQueueUtils.MINIMUM_WRITE_INDEX)
.persistToBytes();
long endExcl = exactColumnOrElseOneBeyondEndOfRow(endTsExclusive, partitionFine);
byte[] endCol = SweepableCellsTable.SweepableCellsColumn.of(endExcl, SweepQueueUtils.MINIMUM_WRITE_INDEX)
.persistToBytes();
return new ColumnRangeSelection(startCol, endCol);
}
private long exactColumnOrElseOneBeyondEndOfRow(long endTsExclusive, long partitionFine) {
return Math.min(
endTsExclusive - SweepQueueUtils.minTsForFinePartition(partitionFine),
SweepQueueUtils.TS_FINE_GRANULARITY);
}
private long exactColumnOrElseBeginningOfRow(long startTsInclusive, long partitionFine) {
return Math.max(startTsInclusive - SweepQueueUtils.minTsForFinePartition(partitionFine), 0);
}
}
| 9,040 |
6,098 | package water.api;
import hex.grid.Grid;
import hex.schemas.GridSchemaV99;
import water.Key;
import water.KeySnapshot;
import water.Value;
import water.api.schemas99.GridsV99;
/**
* /Grids/ end-point handler.
*/
public class GridsHandler extends Handler {
/**
* Return all the grids.
*/
@SuppressWarnings("unused") // called through reflection by RequestServer
public GridsV99 list(int version, GridsV99 s) {
final Key[] gridKeys = KeySnapshot.globalSnapshot().filter(new KeySnapshot.KVFilter() {
@Override
public boolean filter(KeySnapshot.KeyInfo k) {
return Value.isSubclassOf(k._type, Grid.class);
}
}).keys();
s.grids = new GridSchemaV99[gridKeys.length];
for (int i = 0; i < gridKeys.length; i++) {
s.grids[i] = new GridSchemaV99();
s.grids[i].fillFromImpl(getFromDKV("(none)", gridKeys[i], Grid.class));
}
return s;
}
/**
* Return a specified grid.
*/
@SuppressWarnings("unused") // called through reflection by RequestServer
public GridSchemaV99 fetch(int version, GridSchemaV99 s) {
return s.fillFromImpl(getFromDKV("grid_id", s.grid_id.key(), Grid.class));
}
}
| 446 |
314 | //
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "CDStructures.h"
@class NSMutableDictionary;
@interface IDETemplateMacroEngine : NSObject
{
NSMutableDictionary *_macroSubstitutions;
}
+ (id)standardSubstitutionsForMacrosWithWorkspaceName:(id)arg1 projectName:(id)arg2 packageName:(id)arg3 targetName:(id)arg4 productName:(id)arg5 fileName:(id)arg6 organizationName:(id)arg7;
+ (id)substitutionsForMacrosWithWorkspaceName:(id)arg1 projectName:(id)arg2 packageName:(id)arg3 targetName:(id)arg4 productName:(id)arg5 fileName:(id)arg6;
+ (id)multiSubstitutionsForSubstitution:(id)arg1 forMacro:(id)arg2;
- (void)setMultiSubstitution:(id)arg1 forMacro:(id)arg2;
- (BOOL)createFileAtURL:(id)arg1 usingEncoding:(unsigned long long)arg2 substitutingMacrosInFileAtURL:(id)arg3 error:(id *)arg4;
- (id)stringBySubstitutingMacrosInString:(id)arg1;
- (id)allMacros;
- (void)setSubstitutionsForMacros:(id)arg1;
- (id)substitutionForMacro:(id)arg1;
- (void)setSubstitution:(id)arg1 forMacro:(id)arg2;
- (id)init;
@end
| 431 |
581 | #!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD): WSD all-words lesk speed tests
#
# Copyright (C) 2014-2015 alvations
# URL:
# For license information, see LICENSE.md
from __future__ import print_function
import time
from nltk.corpus import brown
from pywsd.lesk import simple_lesk, original_lesk, cosine_lesk, adapted_lesk
from pywsd.allwords_wsd import disambiguate
print("======== TESTING all-words lesk (`from_cache=True`)===========")
start = time.time()
for sentence in brown.sents()[:10]:
sentence = " ".join(sentence)
disambiguate(sentence, simple_lesk, prefersNone=True, keepLemmas=True)
disambiguate(sentence, original_lesk)
disambiguate(sentence, adapted_lesk, keepLemmas=True)
print('Disambiguating 100 brown sentences took {} secs'.format(time.time() - start))
print("======== TESTING all-words lesk (`from_cache=False`)===========")
start = time.time()
for sentence in brown.sents()[:10]:
sentence = " ".join(sentence)
disambiguate(sentence, simple_lesk, prefersNone=True, keepLemmas=True, from_cache=False)
disambiguate(sentence, original_lesk, from_cache=False)
disambiguate(sentence, adapted_lesk, keepLemmas=True, from_cache=False)
print('Disambiguating 10 brown sentences took {} secs'.format(time.time() - start))
| 468 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-7hgh-752m-38mj",
"modified": "2022-05-17T00:12:15Z",
"published": "2022-05-17T00:12:15Z",
"aliases": [
"CVE-2017-14585"
],
"details": "A Server Side Request Forgery (SSRF) vulnerability could lead to remote code execution for authenticated administrators. This issue was introduced in version 2.2.0 of Hipchat Server and version 3.0.0 of Hipchat Data Center. Versions of Hipchat Server starting with 2.2.0 and before 2.2.6 are affected by this vulnerability. Versions of Hipchat Data Center starting with 3.0.0 and before 3.1.0 are affected.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:L/PR:H/UI:N/S:U/C:H/I:H/A:H"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2017-14585"
},
{
"type": "WEB",
"url": "https://confluence.atlassian.com/hc/hipchat-server-security-advisory-2017-11-22-939946293.html"
},
{
"type": "WEB",
"url": "https://jira.atlassian.com/browse/HCPUB-3526"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/101945"
}
],
"database_specific": {
"cwe_ids": [
"CWE-918"
],
"severity": "HIGH",
"github_reviewed": false
}
} | 598 |
675 | #include "shadow_depth_render_stage.h"
namespace Echo
{
ShadowDepthRenderStage::ShadowDepthRenderStage()
: RenderStage()
{
// Add default render queue
Echo::IRenderQueue* renderQueue = EchoNew(RenderQueue);
if (renderQueue)
{
renderQueue->setName("Shadow Depth");
addRenderQueue(renderQueue);
}
}
ShadowDepthRenderStage::~ShadowDepthRenderStage()
{
}
void ShadowDepthRenderStage::bindMethods()
{
}
void ShadowDepthRenderStage::render()
{
RenderStage::render();
}
} | 174 |
775 | <filename>micropolis/micropolis-activity/src/tk/tktxtag.c
/*
* tkTextTag.c --
*
* This module implements the "tag" subcommand of the widget command
* for text widgets, plus most of the other high-level functions
* related to tags.
*
* Copyright 1992 Regents of the University of California.
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies. The University of California
* makes no representations about the suitability of this
* software for any purpose. It is provided "as is" without
* express or implied warranty.
*/
#ifndef lint
static char rcsid[] = "$Header: /user6/ouster/wish/RCS/tkTextTag.c,v 1.3 92/07/28 15:38:59 ouster Exp $ SPRITE (Berkeley)";
#endif
#include "default.h"
#include "tkconfig.h"
#include "tk.h"
#include "tktext.h"
/*
* Information used for parsing tag configuration information:
*/
static Tk_ConfigSpec tagConfigSpecs[] = {
{TK_CONFIG_BORDER, "-background", (char *) NULL, (char *) NULL,
(char *) NULL, Tk_Offset(TkTextTag, border), TK_CONFIG_NULL_OK},
{TK_CONFIG_BITMAP, "-bgstipple", (char *) NULL, (char *) NULL,
(char *) NULL, Tk_Offset(TkTextTag, bgStipple), TK_CONFIG_NULL_OK},
{TK_CONFIG_PIXELS, "-borderwidth", (char *) NULL, (char *) NULL,
"0", Tk_Offset(TkTextTag, borderWidth), TK_CONFIG_DONT_SET_DEFAULT},
{TK_CONFIG_BITMAP, "-fgstipple", (char *) NULL, (char *) NULL,
(char *) NULL, Tk_Offset(TkTextTag, fgStipple), TK_CONFIG_NULL_OK},
{TK_CONFIG_FONT, "-font", (char *) NULL, (char *) NULL,
(char *) NULL, Tk_Offset(TkTextTag, fontPtr), TK_CONFIG_NULL_OK},
{TK_CONFIG_COLOR, "-foreground", (char *) NULL, (char *) NULL,
(char *) NULL, Tk_Offset(TkTextTag, fgColor), TK_CONFIG_NULL_OK},
{TK_CONFIG_RELIEF, "-relief", (char *) NULL, (char *) NULL,
"flat", Tk_Offset(TkTextTag, relief), TK_CONFIG_DONT_SET_DEFAULT},
{TK_CONFIG_BOOLEAN, "-underline", (char *) NULL, (char *) NULL,
"false", Tk_Offset(TkTextTag, underline), TK_CONFIG_DONT_SET_DEFAULT},
{TK_CONFIG_END, (char *) NULL, (char *) NULL, (char *) NULL,
(char *) NULL, 0, 0}
};
/*
* The following definition specifies the maximum number of characters
* needed in a string to hold a position specifier.
*/
#define POS_CHARS 30
/*
* Forward declarations for procedures defined later in this file:
*/
static void ChangeTagPriority _ANSI_ARGS_((TkText *textPtr,
TkTextTag *tagPtr, int prio));
static TkTextTag * FindTag _ANSI_ARGS_((Tcl_Interp *interp,
TkText *textPtr, char *tagName));
static void SortTags _ANSI_ARGS_((int numTags,
TkTextTag **tagArrayPtr));
static int TagSortProc _ANSI_ARGS_((CONST VOID *first,
CONST VOID *second));
static void TextDoEvent _ANSI_ARGS_((TkText *textPtr,
XEvent *eventPtr));
/*
*--------------------------------------------------------------
*
* TkTextTagCmd --
*
* This procedure is invoked to process the "tag" options of
* the widget command for text widgets. See the user documentation
* for details on what it does.
*
* Results:
* A standard Tcl result.
*
* Side effects:
* See the user documentation.
*
*--------------------------------------------------------------
*/
int
TkTextTagCmd(textPtr, interp, argc, argv)
register TkText *textPtr; /* Information about text widget. */
Tcl_Interp *interp; /* Current interpreter. */
int argc; /* Number of arguments. */
char **argv; /* Argument strings. Someone else has already
* parsed this command enough to know that
* argv[1] is "tag". */
{
int length, line1, ch1, line2, ch2, i, addTag;
char c;
char *fullOption;
register TkTextTag *tagPtr;
if (argc < 3) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag option ?arg arg ...?\"", (char *) NULL);
return TCL_ERROR;
}
c = argv[2][0];
length = strlen(argv[2]);
if ((c == 'a') && (strncmp(argv[2], "add", length) == 0)) {
fullOption = "add";
addTag = 1;
addAndRemove:
if ((argc != 5) && (argc != 6)) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag ", fullOption, " tagName index1 ?index2?\"",
(char *) NULL);
return TCL_ERROR;
}
tagPtr = TkTextCreateTag(textPtr, argv[3]);
if (TkTextGetIndex(interp, textPtr, argv[4], &line1, &ch1) != TCL_OK) {
return TCL_ERROR;
}
if (argc == 6) {
if (TkTextGetIndex(interp, textPtr, argv[5], &line2, &ch2)
!= TCL_OK) {
return TCL_ERROR;
}
} else {
line2 = line1;
ch2 = ch1+1;
}
if (TK_TAG_AFFECTS_DISPLAY(tagPtr)) {
TkTextRedrawTag(textPtr, line1, ch1, line2, ch2, tagPtr, !addTag);
}
TkBTreeTag(textPtr->tree, line1, ch1, line2, ch2, tagPtr, addTag);
/*
* If the tag is "sel" then grab the selection if we're supposed
* to export it and don't already have it. Also, invalidate
* partially-completed selection retrievals.
*/
if (tagPtr == textPtr->selTagPtr) {
if (addTag && textPtr->exportSelection
&& !(textPtr->flags & GOT_SELECTION)) {
Tk_OwnSelection(textPtr->tkwin, TkTextLostSelection,
(ClientData) textPtr);
textPtr->flags |= GOT_SELECTION;
}
textPtr->selOffset = -1;
}
} else if ((c == 'b') && (strncmp(argv[2], "bind", length) == 0)) {
if ((argc < 4) || (argc > 6)) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag bind tagName ?sequence? ?command?\"",
(char *) NULL);
return TCL_ERROR;
}
tagPtr = TkTextCreateTag(textPtr, argv[3]);
/*
* Make a binding table if the widget doesn't already have
* one.
*/
if (textPtr->bindingTable == NULL) {
textPtr->bindingTable = Tk_CreateBindingTable(interp);
}
if (argc == 6) {
int append = 0;
unsigned long mask;
if (argv[5][0] == 0) {
return Tk_DeleteBinding(interp, textPtr->bindingTable,
(ClientData) tagPtr, argv[4]);
}
if (argv[5][0] == '+') {
argv[5]++;
append = 1;
}
mask = Tk_CreateBinding(interp, textPtr->bindingTable,
(ClientData) tagPtr, argv[4], argv[5], append);
if (mask == 0) {
return TCL_ERROR;
}
if (mask & ~(ButtonMotionMask|Button1MotionMask|Button2MotionMask
|Button3MotionMask|Button4MotionMask|Button5MotionMask
|ButtonPressMask|ButtonReleaseMask|EnterWindowMask
|LeaveWindowMask|KeyPressMask|KeyReleaseMask
|PointerMotionMask)) {
Tk_DeleteBinding(interp, textPtr->bindingTable,
(ClientData) tagPtr, argv[4]);
Tcl_ResetResult(interp);
Tcl_AppendResult(interp, "requested illegal events; ",
"only key, button, motion, and enter/leave ",
"events may be used", (char *) NULL);
return TCL_ERROR;
}
} else if (argc == 5) {
char *command;
command = Tk_GetBinding(interp, textPtr->bindingTable,
(ClientData) tagPtr, argv[4]);
if (command == NULL) {
return TCL_ERROR;
}
interp->result = command;
} else {
Tk_GetAllBindings(interp, textPtr->bindingTable,
(ClientData) tagPtr);
}
} else if ((c == 'c') && (strncmp(argv[2], "configure", length) == 0)) {
if (argc < 4) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag configure tagName ?option? ?value? ",
"?option value ...?\"", (char *) NULL);
return TCL_ERROR;
}
tagPtr = TkTextCreateTag(textPtr, argv[3]);
if (argc == 4) {
return Tk_ConfigureInfo(interp, textPtr->tkwin, tagConfigSpecs,
(char *) tagPtr, (char *) NULL, 0);
} else if (argc == 5) {
return Tk_ConfigureInfo(interp, textPtr->tkwin, tagConfigSpecs,
(char *) tagPtr, argv[4], 0);
} else {
int result;
result = Tk_ConfigureWidget(interp, textPtr->tkwin, tagConfigSpecs,
argc-4, argv+4, (char *) tagPtr, 0);
/*
* If the "sel" tag was changed, be sure to mirror information
* from the tag back into the text widget record. NOTE: we
* don't have to free up information in the widget record
* before overwriting it, because it was mirrored in the tag
* and hence freed when the tag field was overwritten.
*/
if (tagPtr == textPtr->selTagPtr) {
textPtr->selBorder = tagPtr->border;
textPtr->selBorderWidth = tagPtr->borderWidth;
textPtr->selFgColorPtr = tagPtr->fgColor;
}
TkTextRedrawTag(textPtr, 0, 0, TkBTreeNumLines(textPtr->tree),
0, tagPtr, 1);
return result;
}
} else if ((c == 'd') && (strncmp(argv[2], "delete", length) == 0)) {
Tcl_HashEntry *hPtr;
if (argc < 4) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag delete tagName tagName ...\"",
(char *) NULL);
return TCL_ERROR;
}
for (i = 3; i < argc; i++) {
hPtr = Tcl_FindHashEntry(&textPtr->tagTable, argv[i]);
if (hPtr == NULL) {
continue;
}
tagPtr = (TkTextTag *) Tcl_GetHashValue(hPtr);
if (tagPtr == textPtr->selTagPtr) {
interp->result = "can't delete selection tag";
return TCL_ERROR;
}
if (TK_TAG_AFFECTS_DISPLAY(tagPtr)) {
TkTextRedrawTag(textPtr, 0, 0, TkBTreeNumLines(textPtr->tree),
0, tagPtr, 1);
}
TkBTreeTag(textPtr->tree, 0, 0, TkBTreeNumLines(textPtr->tree),
0, tagPtr, 0);
Tcl_DeleteHashEntry(hPtr);
if (textPtr->bindingTable != NULL) {
Tk_DeleteAllBindings(textPtr->bindingTable,
(ClientData) tagPtr);
}
/*
* Update the tag priorities to reflect the deletion of this tag.
*/
ChangeTagPriority(textPtr, tagPtr, textPtr->numTags-1);
textPtr->numTags -= 1;
TkTextFreeTag(tagPtr);
}
} else if ((c == 'l') && (strncmp(argv[2], "lower", length) == 0)) {
TkTextTag *tagPtr2;
int prio;
if ((argc != 4) && (argc != 5)) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag lower tagName ?belowThis?\"",
(char *) NULL);
return TCL_ERROR;
}
tagPtr = FindTag(interp, textPtr, argv[3]);
if (tagPtr == NULL) {
return TCL_ERROR;
}
if (argc == 5) {
tagPtr2 = FindTag(interp, textPtr, argv[4]);
if (tagPtr2 == NULL) {
return TCL_ERROR;
}
if (tagPtr->priority < tagPtr2->priority) {
prio = tagPtr2->priority - 1;
} else {
prio = tagPtr2->priority;
}
} else {
prio = 0;
}
ChangeTagPriority(textPtr, tagPtr, prio);
TkTextRedrawTag(textPtr, 0, 0, TkBTreeNumLines(textPtr->tree),
0, tagPtr, 1);
} else if ((c == 'n') && (strncmp(argv[2], "names", length) == 0)
&& (length >= 2)) {
TkTextTag **arrayPtr;
int arraySize;
TkTextLine *linePtr;
if ((argc != 3) && (argc != 4)) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag names ?index?\"",
(char *) NULL);
return TCL_ERROR;
}
if (argc == 3) {
Tcl_HashSearch search;
Tcl_HashEntry *hPtr;
arrayPtr = (TkTextTag **) ckalloc((unsigned)
(textPtr->numTags * sizeof(TkTextTag *)));
for (i = 0, hPtr = Tcl_FirstHashEntry(&textPtr->tagTable, &search);
hPtr != NULL; i++, hPtr = Tcl_NextHashEntry(&search)) {
arrayPtr[i] = (TkTextTag *) Tcl_GetHashValue(hPtr);
}
arraySize = textPtr->numTags;
} else {
if (TkTextGetIndex(interp, textPtr, argv[3], &line1, &ch1)
!= TCL_OK) {
return TCL_ERROR;
}
linePtr = TkBTreeFindLine(textPtr->tree, line1);
if (linePtr == NULL) {
return TCL_OK;
}
arrayPtr = TkBTreeGetTags(textPtr->tree, linePtr, ch1, &arraySize);
if (arrayPtr == NULL) {
return TCL_OK;
}
}
SortTags(arraySize, arrayPtr);
for (i = 0; i < arraySize; i++) {
tagPtr = arrayPtr[i];
Tcl_AppendElement(interp, tagPtr->name, 0);
}
ckfree((char *) arrayPtr);
} else if ((c == 'n') && (strncmp(argv[2], "nextrange", length) == 0)
&& (length >= 2)) {
TkTextSearch tSearch;
char position[POS_CHARS];
if ((argc != 5) && (argc != 6)) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag nextrange tagName index1 ?index2?\"",
(char *) NULL);
return TCL_ERROR;
}
tagPtr = FindTag((Tcl_Interp *) NULL, textPtr, argv[3]);
if (tagPtr == NULL) {
return TCL_OK;
}
if (TkTextGetIndex(interp, textPtr, argv[4], &line1, &ch1) != TCL_OK) {
return TCL_ERROR;
}
if (argc == 5) {
line2 = TkBTreeNumLines(textPtr->tree);
ch2 = 0;
} else if (TkTextGetIndex(interp, textPtr, argv[5], &line2, &ch2)
!= TCL_OK) {
return TCL_ERROR;
}
/*
* The search below is a bit tricky. Rather than use the B-tree
* facilities to stop the search at line2.ch2, let it search up
* until the end of the file but check for a position past line2.ch2
* ourselves. The reason for doing it this way is that we only
* care whether the *start* of the range is before line2.ch2; once
* we find the start, we don't want TkBTreeNextTag to abort the
* search because the end of the range is after line2.ch2.
*/
TkBTreeStartSearch(textPtr->tree, line1, ch1,
TkBTreeNumLines(textPtr->tree), 0, tagPtr, &tSearch);
if (!TkBTreeNextTag(&tSearch)) {
return TCL_OK;
}
if (!TkBTreeCharTagged(tSearch.linePtr, tSearch.ch1, tagPtr)) {
if (!TkBTreeNextTag(&tSearch)) {
return TCL_OK;
}
}
if ((tSearch.line1 > line2) || ((tSearch.line1 == line2)
&& (tSearch.ch1 >= ch2))) {
return TCL_OK;
}
TkTextPrintIndex(tSearch.line1, tSearch.ch1, position);
Tcl_AppendElement(interp, position, 0);
TkBTreeNextTag(&tSearch);
TkTextPrintIndex(tSearch.line1, tSearch.ch1, position);
Tcl_AppendElement(interp, position, 0);
} else if ((c == 'r') && (strncmp(argv[2], "raise", length) == 0)
&& (length >= 3)) {
TkTextTag *tagPtr2;
int prio;
if ((argc != 4) && (argc != 5)) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag raise tagName ?aboveThis?\"",
(char *) NULL);
return TCL_ERROR;
}
tagPtr = FindTag(interp, textPtr, argv[3]);
if (tagPtr == NULL) {
return TCL_ERROR;
}
if (argc == 5) {
tagPtr2 = FindTag(interp, textPtr, argv[4]);
if (tagPtr2 == NULL) {
return TCL_ERROR;
}
if (tagPtr->priority <= tagPtr2->priority) {
prio = tagPtr2->priority;
} else {
prio = tagPtr2->priority + 1;
}
} else {
prio = textPtr->numTags-1;
}
ChangeTagPriority(textPtr, tagPtr, prio);
TkTextRedrawTag(textPtr, 0, 0, TkBTreeNumLines(textPtr->tree),
0, tagPtr, 1);
} else if ((c == 'r') && (strncmp(argv[2], "ranges", length) == 0)
&& (length >= 3)) {
TkTextSearch tSearch;
char position[POS_CHARS];
if (argc != 4) {
Tcl_AppendResult(interp, "wrong # args: should be \"",
argv[0], " tag ranges tagName\"", (char *) NULL);
return TCL_ERROR;
}
tagPtr = FindTag((Tcl_Interp *) NULL, textPtr, argv[3]);
if (tagPtr == NULL) {
return TCL_OK;
}
TkBTreeStartSearch(textPtr->tree, 0, 0, TkBTreeNumLines(textPtr->tree),
0, tagPtr, &tSearch);
while (TkBTreeNextTag(&tSearch)) {
TkTextPrintIndex(tSearch.line1, tSearch.ch1, position);
Tcl_AppendElement(interp, position, 0);
}
} else if ((c == 'r') && (strncmp(argv[2], "remove", length) == 0)
&& (length >= 2)) {
fullOption = "remove";
addTag = 0;
goto addAndRemove;
} else {
Tcl_AppendResult(interp, "bad tag option \"", argv[2],
"\": must be add, bind, configure, delete, lower, ",
"names, nextrange, raise, ranges, or remove",
(char *) NULL);
return TCL_ERROR;
}
return TCL_OK;
}
/*
*----------------------------------------------------------------------
*
* TkTextCreateTag --
*
* Find the record describing a tag within a given text widget,
* creating a new record if one doesn't already exist.
*
* Results:
* The return value is a pointer to the TkTextTag record for tagName.
*
* Side effects:
* A new tag record is created if there isn't one already defined
* for tagName.
*
*----------------------------------------------------------------------
*/
TkTextTag *
TkTextCreateTag(textPtr, tagName)
TkText *textPtr; /* Widget in which tag is being used. */
char *tagName; /* Name of desired tag. */
{
register TkTextTag *tagPtr;
Tcl_HashEntry *hPtr;
int new;
hPtr = Tcl_CreateHashEntry(&textPtr->tagTable, tagName, &new);
if (!new) {
return (TkTextTag *) Tcl_GetHashValue(hPtr);
}
/*
* No existing entry. Create a new one, initialize it, and add a
* pointer to it to the hash table entry.
*/
tagPtr = (TkTextTag *) ckalloc(sizeof(TkTextTag));
tagPtr->name = Tcl_GetHashKey(&textPtr->tagTable, hPtr);
tagPtr->priority = textPtr->numTags;
tagPtr->border = NULL;
tagPtr->borderWidth = 1;
tagPtr->relief = TK_RELIEF_FLAT;
tagPtr->bgStipple = None;
tagPtr->fgColor = NULL;
tagPtr->fontPtr = NULL;
tagPtr->fgStipple = None;
tagPtr->underline = 0;
textPtr->numTags++;
Tcl_SetHashValue(hPtr, tagPtr);
return tagPtr;
}
/*
*----------------------------------------------------------------------
*
* FindTag --
*
* See if tag is defined for a given widget.
*
* Results:
* If tagName is defined in textPtr, a pointer to its TkTextTag
* structure is returned. Otherwise NULL is returned and an
* error message is recorded in interp->result unless interp
* is NULL.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static TkTextTag *
FindTag(interp, textPtr, tagName)
Tcl_Interp *interp; /* Interpreter to use for error message;
* if NULL, then don't record an error
* message. */
TkText *textPtr; /* Widget in which tag is being used. */
char *tagName; /* Name of desired tag. */
{
Tcl_HashEntry *hPtr;
hPtr = Tcl_FindHashEntry(&textPtr->tagTable, tagName);
if (hPtr != NULL) {
return (TkTextTag *) Tcl_GetHashValue(hPtr);
}
if (interp != NULL) {
Tcl_AppendResult(interp, "tag \"", tagName,
"\" isn't defined in text widget", (char *) NULL);
}
return NULL;
}
/*
*----------------------------------------------------------------------
*
* TkTextFreeTag --
*
* This procedure is called when a tag is deleted to free up the
* memory and other resources associated with the tag.
*
* Results:
* None.
*
* Side effects:
* Memory and other resources are freed.
*
*----------------------------------------------------------------------
*/
void
TkTextFreeTag(tagPtr)
register TkTextTag *tagPtr; /* Tag being deleted. */
{
if (tagPtr->border != None) {
Tk_Free3DBorder(tagPtr->border);
}
if (tagPtr->bgStipple != None) {
Tk_FreeBitmap(tagPtr->bgStipple);
}
if (tagPtr->fgColor != None) {
Tk_FreeColor(tagPtr->fgColor);
}
if (tagPtr->fgStipple != None) {
Tk_FreeBitmap(tagPtr->fgStipple);
}
ckfree((char *) tagPtr);
}
/*
*----------------------------------------------------------------------
*
* SortTags --
*
* This procedure sorts an array of tag pointers in increasing
* order of priority, optimizing for the common case where the
* array is small.
*
* Results:
* None.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static void
SortTags(numTags, tagArrayPtr)
int numTags; /* Number of tag pointers at *tagArrayPtr. */
TkTextTag **tagArrayPtr; /* Pointer to array of pointers. */
{
int i, j, prio;
register TkTextTag **tagPtrPtr;
TkTextTag **maxPtrPtr, *tmp;
if (numTags < 2) {
return;
}
if (numTags < 20) {
for (i = numTags-1; i > 0; i--, tagArrayPtr++) {
maxPtrPtr = tagPtrPtr = tagArrayPtr;
prio = tagPtrPtr[0]->priority;
for (j = i, tagPtrPtr++; j > 0; j--, tagPtrPtr++) {
if (tagPtrPtr[0]->priority < prio) {
prio = tagPtrPtr[0]->priority;
maxPtrPtr = tagPtrPtr;
}
}
tmp = *maxPtrPtr;
*maxPtrPtr = *tagArrayPtr;
*tagArrayPtr = tmp;
}
} else {
qsort((VOID *) tagArrayPtr, numTags, sizeof (TkTextTag *),
TagSortProc);
}
}
/*
*----------------------------------------------------------------------
*
* TagSortProc --
*
* This procedure is called by qsort when sorting an array of
* tags in priority order.
*
* Results:
* The return value is -1 if the first argument should be before
* the second element (i.e. it has lower priority), 0 if it's
* equivalent (this should never happen!), and 1 if it should be
* after the second element.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
static int
TagSortProc(first, second)
CONST VOID *first, *second; /* Elements to be compared. */
{
TkTextTag *tagPtr1, *tagPtr2;
tagPtr1 = * (TkTextTag **) first;
tagPtr2 = * (TkTextTag **) second;
return tagPtr1->priority - tagPtr2->priority;
}
/*
*----------------------------------------------------------------------
*
* ChangeTagPriority --
*
* This procedure changes the priority of a tag by modifying
* its priority and all other ones whose priority is affected
* by the change.
*
* Results:
* None.
*
* Side effects:
* Priorities may be changed for some or all of the tags in
* textPtr. The tags will be arranged so that there is exactly
* one tag at each priority level between 0 and textPtr->numTags-1,
* with tagPtr at priority "prio".
*
*----------------------------------------------------------------------
*/
static void
ChangeTagPriority(textPtr, tagPtr, prio)
TkText *textPtr; /* Information about text widget. */
TkTextTag *tagPtr; /* Tag whose priority is to be
* changed. */
int prio; /* New priority for tag. */
{
int low, high, delta;
register TkTextTag *tagPtr2;
Tcl_HashEntry *hPtr;
Tcl_HashSearch search;
if (prio < 0) {
prio = 0;
}
if (prio >= textPtr->numTags) {
prio = textPtr->numTags-1;
}
if (prio == tagPtr->priority) {
return;
} else if (prio < tagPtr->priority) {
low = prio;
high = tagPtr->priority-1;
delta = 1;
} else {
low = tagPtr->priority+1;
high = prio;
delta = -1;
}
for (hPtr = Tcl_FirstHashEntry(&textPtr->tagTable, &search);
hPtr != NULL; hPtr = Tcl_NextHashEntry(&search)) {
tagPtr2 = (TkTextTag *) Tcl_GetHashValue(hPtr);
if ((tagPtr2->priority >= low) && (tagPtr2->priority <= high)) {
tagPtr2->priority += delta;
}
}
tagPtr->priority = prio;
}
/*
*--------------------------------------------------------------
*
* TkTextBindProc --
*
* This procedure is invoked by the Tk dispatcher to handle
* events associated with bindings on items.
*
* Results:
* None.
*
* Side effects:
* Depends on the command invoked as part of the binding
* (if there was any).
*
*--------------------------------------------------------------
*/
void
TkTextBindProc(clientData, eventPtr)
ClientData clientData; /* Pointer to canvas structure. */
XEvent *eventPtr; /* Pointer to X event that just
* happened. */
{
TkText *textPtr = (TkText *) clientData;
int repick = 0;
Tk_Preserve((ClientData) textPtr);
/*
* This code simulates grabs for mouse buttons by refusing to
* pick a new current character between the time a mouse button goes
* down and the time when the last mouse button is released.
*/
if (eventPtr->type == ButtonPress) {
textPtr->flags |= BUTTON_DOWN;
} else if (eventPtr->type == ButtonRelease) {
int mask;
switch (eventPtr->xbutton.button) {
case Button1:
mask = Button1Mask;
break;
case Button2:
mask = Button2Mask;
break;
case Button3:
mask = Button3Mask;
break;
case Button4:
mask = Button4Mask;
break;
case Button5:
mask = Button5Mask;
break;
default:
mask = 0;
break;
}
if ((eventPtr->xbutton.state & (Button1Mask|Button2Mask
|Button3Mask|Button4Mask|Button5Mask)) == mask) {
textPtr->flags &= ~BUTTON_DOWN;
repick = 1;
}
} else if ((eventPtr->type == EnterNotify)
|| (eventPtr->type == LeaveNotify)) {
TkTextPickCurrent(textPtr, eventPtr);
goto done;
} else if (eventPtr->type == MotionNotify) {
TkTextPickCurrent(textPtr, eventPtr);
}
TextDoEvent(textPtr, eventPtr);
if (repick) {
unsigned int oldState;
oldState = eventPtr->xbutton.state;
eventPtr->xbutton.state &= ~(Button1Mask|Button2Mask
|Button3Mask|Button4Mask|Button5Mask);
TkTextPickCurrent(textPtr, eventPtr);
eventPtr->xbutton.state = oldState;
}
done:
Tk_Release((ClientData) textPtr);
}
/*
*--------------------------------------------------------------
*
* TkTextPickCurrent --
*
* Find the topmost item in a canvas that contains a given
* location and mark the the current item. If the current
* item has changed, generate a fake exit event on the old
* current item and a fake enter event on the new current
* item.
*
* Results:
* None.
*
* Side effects:
* The current item for textPtr may change. If it does,
* then the commands associated with item entry and exit
* could do just about anything.
*
*--------------------------------------------------------------
*/
void
TkTextPickCurrent(textPtr, eventPtr)
register TkText *textPtr; /* Text widget in which to select
* current character. */
XEvent *eventPtr; /* Event describing location of
* mouse cursor. Must be EnterWindow,
* LeaveWindow, ButtonRelease, or
* MotionNotify. */
{
TkTextLine *linePtr;
int ch;
/*
* If a button is down, then don't do anything at all; we'll be
* called again when all buttons are up, and we can repick then.
* This implements a form of mouse grabbing.
*/
if (textPtr->flags & BUTTON_DOWN) {
return;
}
/*
* Save information about this event in the widget for use if we have
* to synthesize more enter and leave events later (e.g. because a
* character was deleting, causing a new character to be underneath
* the mouse cursor). Also translate MotionNotify events into
* EnterNotify events, since that's what gets reported to event
* handlers when the current character changes.
*/
if (eventPtr != &textPtr->pickEvent) {
if ((eventPtr->type == MotionNotify)
|| (eventPtr->type == ButtonRelease)) {
textPtr->pickEvent.xcrossing.type = EnterNotify;
textPtr->pickEvent.xcrossing.serial = eventPtr->xmotion.serial;
textPtr->pickEvent.xcrossing.send_event
= eventPtr->xmotion.send_event;
textPtr->pickEvent.xcrossing.display = eventPtr->xmotion.display;
textPtr->pickEvent.xcrossing.window = eventPtr->xmotion.window;
textPtr->pickEvent.xcrossing.root = eventPtr->xmotion.root;
textPtr->pickEvent.xcrossing.subwindow = None;
textPtr->pickEvent.xcrossing.time = eventPtr->xmotion.time;
textPtr->pickEvent.xcrossing.x = eventPtr->xmotion.x;
textPtr->pickEvent.xcrossing.y = eventPtr->xmotion.y;
textPtr->pickEvent.xcrossing.x_root = eventPtr->xmotion.x_root;
textPtr->pickEvent.xcrossing.y_root = eventPtr->xmotion.y_root;
textPtr->pickEvent.xcrossing.mode = NotifyNormal;
textPtr->pickEvent.xcrossing.detail = NotifyNonlinear;
textPtr->pickEvent.xcrossing.same_screen
= eventPtr->xmotion.same_screen;
textPtr->pickEvent.xcrossing.focus = False;
textPtr->pickEvent.xcrossing.state = eventPtr->xmotion.state;
} else {
textPtr->pickEvent = *eventPtr;
}
}
linePtr = NULL;
if (textPtr->pickEvent.type != LeaveNotify) {
linePtr = TkTextCharAtLoc(textPtr, textPtr->pickEvent.xcrossing.x,
textPtr->pickEvent.xcrossing.y, &ch);
}
/*
* Simulate a LeaveNotify event on the previous current character and
* an EnterNotify event on the new current character. Also, move the
* "current" mark to its new place.
*/
if (textPtr->flags & IN_CURRENT) {
if ((linePtr == textPtr->currentAnnotPtr->linePtr)
&& (ch == textPtr->currentAnnotPtr->ch)) {
return;
}
} else {
if (linePtr == NULL) {
return;
}
}
if (textPtr->flags & IN_CURRENT) {
XEvent event;
event = textPtr->pickEvent;
event.type = LeaveNotify;
TextDoEvent(textPtr, &event);
textPtr->flags &= ~IN_CURRENT;
}
if (linePtr != NULL) {
XEvent event;
TkBTreeRemoveAnnotation(textPtr->currentAnnotPtr);
textPtr->currentAnnotPtr->linePtr = linePtr;
textPtr->currentAnnotPtr->ch = ch;
TkBTreeAddAnnotation(textPtr->currentAnnotPtr);
event = textPtr->pickEvent;
event.type = EnterNotify;
TextDoEvent(textPtr, &event);
textPtr->flags |= IN_CURRENT;
}
}
/*
*----------------------------------------------------------------------
*
* TkTextUnpickCurrent --
*
* This procedure is called when the "current" character is
* deleted: it synthesizes a "leave" event for the character.
*
* Results:
* None.
*
* Side effects:
* A binding associated with one of the tags on the current
* character may be triggered.
*
*----------------------------------------------------------------------
*/
void
TkTextUnpickCurrent(textPtr)
TkText *textPtr; /* Text widget information. */
{
if (textPtr->flags & IN_CURRENT) {
XEvent event;
event = textPtr->pickEvent;
event.type = LeaveNotify;
TextDoEvent(textPtr, &event);
textPtr->flags &= ~IN_CURRENT;
}
}
/*
*--------------------------------------------------------------
*
* TextDoEvent --
*
* This procedure is called to invoke binding processing
* for a new event that is associated with the current character
* for a text widget.
*
* Results:
* None.
*
* Side effects:
* Depends on the bindings for the text.
*
*--------------------------------------------------------------
*/
static void
TextDoEvent(textPtr, eventPtr)
TkText *textPtr; /* Text widget in which event
* occurred. */
XEvent *eventPtr; /* Real or simulated X event that
* is to be processed. */
{
TkTextTag **tagArrayPtr, **p1, **p2, *tmp;
int numTags;
if (textPtr->bindingTable == NULL) {
return;
}
/*
* Set up an array containing all of the tags that are associated
* with the current character. This array will be used to look
* for bindings. If there are no tags then there can't be any
* bindings.
*/
tagArrayPtr = TkBTreeGetTags(textPtr->tree,
textPtr->currentAnnotPtr->linePtr, textPtr->currentAnnotPtr->ch,
&numTags);
if (numTags == 0) {
return;
}
/*
* Sort the array of tags. SortTags sorts it backwards, so after it
* returns we have to reverse the order in the array.
*/
SortTags(numTags, tagArrayPtr);
for (p1 = tagArrayPtr, p2 = tagArrayPtr + numTags - 1;
p1 < p2; p1++, p2--) {
tmp = *p1;
*p1 = *p2;
*p2 = tmp;
}
/*
* Invoke the binding system, then free up the tag array.
*/
Tk_BindEvent(textPtr->bindingTable, eventPtr, textPtr->tkwin,
numTags, (ClientData *) tagArrayPtr);
ckfree((char *) tagArrayPtr);
}
| 11,940 |
421 | <gh_stars>100-1000
//<Snippet2>
using namespace System;
using namespace System::Globalization;
ref class DummyProvider: public IFormatProvider
{
public:
// Normally, GetFormat returns an object of the requested type
// (usually itself) if it is able; otherwise, it returns Nothing.
virtual Object^ GetFormat( Type^ argType )
{
// Here, GetFormat displays the name of argType, after removing
// the namespace information. GetFormat always returns null.
String^ argStr = argType->ToString();
if ( argStr->Equals( "" ) )
argStr = "Empty";
argStr = argStr->Substring( argStr->LastIndexOf( '.' ) + 1 );
Console::Write( "{0,-20}", argStr );
return (Object^)0;
}
};
int main()
{
// Create an instance of IFormatProvider.
DummyProvider^ provider = gcnew DummyProvider;
String^ format = "{0,-17}{1,-17}{2}";
// Convert these values using DummyProvider.
String^ Int32A = "-252645135";
String^ DoubleA = "61680.3855";
String^ DayTimeA = "2001/9/11 13:45";
String^ BoolA = "True";
String^ StringA = "Qwerty";
String^ CharA = "$";
Console::WriteLine( "This example of selected "
"Convert::To<Type>( String*, IFormatProvider* ) \nmethods "
"generates the following output. The example displays the "
"\nprovider type if the IFormatProvider is called." );
Console::WriteLine( "\nNote: For the "
"ToBoolean, ToString, and ToChar methods, the \n"
"IFormatProvider object is not referenced." );
// The format provider is called for the following conversions.
Console::WriteLine();
Console::WriteLine( format, "ToInt32", Int32A, Convert::ToInt32( Int32A, provider ) );
Console::WriteLine( format, "ToDouble", DoubleA, Convert::ToDouble( DoubleA, provider ) );
Console::WriteLine( format, "ToDateTime", DayTimeA, Convert::ToDateTime( DayTimeA, provider ) );
// The format provider is not called for these conversions.
Console::WriteLine();
Console::WriteLine( format, "ToBoolean", BoolA, Convert::ToBoolean( BoolA, provider ) );
Console::WriteLine( format, "ToString", StringA, Convert::ToString( StringA, provider ) );
Console::WriteLine( format, "ToChar", CharA, Convert::ToChar( CharA, provider ) );
}
/*
This example of selected Convert::To<Type>( String*, IFormatProvider* )
methods generates the following output. The example displays the
provider type if the IFormatProvider is called.
Note: For the ToBoolean, ToString, and ToChar methods, the
IFormatProvider object is not referenced.
NumberFormatInfo ToInt32 -252645135 -252645135
NumberFormatInfo ToDouble 61680.3855 61680.3855
DateTimeFormatInfo ToDateTime 2001/9/11 13:45 9/11/2001 1:45:00 PM
ToBoolean True True
ToString Qwerty Qwerty
ToChar $ $
*/
//</Snippet2>
| 1,103 |
14,668 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "extensions/browser/extensions_browser_interface_binders.h"
#include <string>
#include "base/bind.h"
#include "content/public/browser/render_frame_host.h"
#include "content/public/browser/render_process_host.h"
#include "extensions/browser/mojo/keep_alive_impl.h"
#include "extensions/buildflags/buildflags.h"
#include "extensions/common/extension_api.h"
#include "extensions/common/mojom/keep_alive.mojom.h" // nogncheck
namespace extensions {
void PopulateExtensionFrameBinders(
mojo::BinderMapWithContext<content::RenderFrameHost*>* binder_map,
content::RenderFrameHost* render_frame_host,
const Extension* extension) {
DCHECK(extension);
auto* context = render_frame_host->GetProcess()->GetBrowserContext();
binder_map->Add<KeepAlive>(base::BindRepeating(
&KeepAliveImpl::Create, context, base::RetainedRef(extension)));
}
} // namespace extensions
| 349 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.