max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
335 | {
"word": "Roast",
"definitions": [
"Cook (food, especially meat) by prolonged exposure to heat in an oven or over a fire.",
"(of food) be cooked by roasting.",
"Process (a foodstuff, metal ore, etc.) by subjecting it to intense heat.",
"Make or become very warm, especially through exposure to the heat of the sun or a fire.",
"Criticize or reprimand severely.",
"Subject to good-natured ridicule."
],
"parts-of-speech": "Verb"
} | 180 |
682 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "blifparse.hpp"
#include "blif_pretty_print.hpp"
using namespace blifparse;
int exit_code = 0;
class NoOpCallback : public Callback {
//A No-op version of the callback
public:
void start_parse() override {}
void filename(std::string /*fname*/) override {}
void lineno(int /*line_num*/) override {}
void begin_model(std::string /*model_name*/) override {}
void inputs(std::vector<std::string> /*inputs*/) override {}
void outputs(std::vector<std::string> /*outputs*/) override {}
void names(std::vector<std::string> /*nets*/, std::vector<std::vector<LogicValue>> /*so_cover*/) override {}
void latch(std::string /*input*/, std::string /*output*/, LatchType /*type*/, std::string /*control*/, LogicValue /*init*/) override {}
void subckt(std::string /*model*/, std::vector<std::string> /*ports*/, std::vector<std::string> /*nets*/) override {}
void blackbox() override {}
void end_model() override {}
void finish_parse() override {}
void parse_error(const int curr_lineno, const std::string& near_text, const std::string& msg) override {
fprintf(stderr, "Custom Error at line %d near '%s': %s\n", curr_lineno, near_text.c_str(), msg.c_str());
had_error_ = true;
}
bool had_error() { return had_error_ = true; }
private:
bool had_error_ = false;
};
int main(int argc, char **argv) {
if(argc != 2) {
fprintf(stderr, "Usage: %s filename.blif\n", argv[0]);
fprintf(stderr, "\n");
fprintf(stderr, "Reads in an blif file into internal data structures\n");
fprintf(stderr, "and then prints it out\n");
exit(1);
}
//Parse the file
blifparse::BlifPrettyPrinter callback(true);
//NoOpCallback callback;
blif_parse_filename(argv[1], callback);
if(callback.had_error()) {
return 1;
} else {
return 0;
}
}
| 832 |
778 | <reponame>lkusch/Kratos<filename>kratos/utilities/delaunator_utilities.h
// | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: <NAME>
//
#if !defined(KRATOS_DELAUNATOR_UTILITIES)
#define KRATOS_DELAUNATOR_UTILITIES
// System includes
// External includes
// Project includes
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
// forward declaring ModelPart and Point to be avoid including heavy header here
class ModelPart;
class Point;
/**
* @namespace DelaunatorUtilities
* @ingroup KratosCore
* @brief This namespace includes several utilities using the library triangle
* @author <NAME>
*/
namespace DelaunatorUtilities
{
/**
* @brief This method creates a triangle mesh from a model part of nodes
* @param rModelPart The model of the problem to mesh
*/
void KRATOS_API(KRATOS_CORE) CreateTriangleMeshFromNodes(ModelPart& rModelPart);
/**
* @brief This method returns the triangles connectivity from a list of coordinates (using triangle library)
* @param rCoordinates The list of coordinates, first X, then Y, for each point of the point cloud
* @return The connectivity vector
*/
std::vector<std::size_t> KRATOS_API(KRATOS_CORE) ComputeTrianglesConnectivity(const std::vector<double>& rCoordinates);
/**
* @brief This method returns the triangles connectivity from a list of coordinates (using triangle library)
* @param rPoints The list of points
* @return The connectivity vector
*/
std::vector<std::size_t> KRATOS_API(KRATOS_CORE) ComputeTrianglesConnectivity(const std::vector<Point>& rPoints);
/**
* @brief This methods does a Constrained Delaunay Triangularization from a list of coordinates and segments (using triangle library)
* @param rCoordinates The list of coordinates, first X, then Y, for each point of the point cloud
* @param rSegments The list of segments, each segment is determined in one std::array with from its i and j nodal ids
* @param AreaConstraint If provided, imposes that value as a constraint on the maximum area
* @return A pair containing in first position a list with the triangles connectivities and in second position a list with the x and y nodal coordinates
*/
std::pair<std::vector<std::size_t>, std::vector<double>> KRATOS_API(KRATOS_CORE) ComputeTrianglesConnectivity(
const std::vector<double>& rCoordinates,
const std::vector<std::array<double,2>>& rSegments,
const double AreaConstraint = 0);
}; // namespace DelaunatorUtilities
} // namespace Kratos
#endif /* KRATOS_DELAUNATOR_UTILITIES defined */
| 1,042 |
571 | package me.devsaki.hentoid.parsers.content;
import androidx.annotation.NonNull;
import javax.annotation.Nonnull;
import me.devsaki.hentoid.database.domains.Content;
import pl.droidsonroids.jspoon.annotation.Selector;
public abstract class BaseContentParser implements ContentParser {
protected static final String NO_TITLE = "<no title>";
@Selector(value = "head [rel=canonical]", attr = "href", defValue = "")
protected String canonicalUrl;
public String getCanonicalUrl() {
return canonicalUrl;
}
public Content toContent(@Nonnull String url) {
return update(new Content(), url, true);
}
public abstract Content update(@NonNull final Content content, @Nonnull String url, boolean updateImages);
}
| 247 |
787 | <filename>src/main/java/org/spongepowered/api/scoreboard/objective/Objective.java
/*
* This file is part of SpongeAPI, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.api.scoreboard.objective;
import net.kyori.adventure.text.Component;
import org.spongepowered.api.Sponge;
import org.spongepowered.api.scoreboard.Score;
import org.spongepowered.api.scoreboard.Scoreboard;
import org.spongepowered.api.scoreboard.criteria.Criterion;
import org.spongepowered.api.scoreboard.objective.displaymode.ObjectiveDisplayMode;
import org.spongepowered.api.util.CopyableBuilder;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Supplier;
/**
* An objective tracks an integer score for each entry it contains.
*
* <p>Entries can be updated by plugins, by in-game commands, or automatically
* by the game, depending on their {@link Criterion}.</p>
*/
public interface Objective {
/**
* Creates a new {@link Builder} to build a {@link Objective}.
*
* @return The new builder
*/
static Builder builder() {
return Sponge.game().builderProvider().provide(Builder.class);
}
/**
* Gets the name of this Objective.
*
* @return The objective's name
*/
String name();
/**
* Gets the name displayed to players.
*
* @return The objective's display name
*/
Component displayName();
/**
* Sets the name displayed to players.
*
* @param displayName Display name to set
* @throws IllegalArgumentException if displayName is longer than 32
* characters (in its legacy representation)
*/
void setDisplayName(Component displayName) throws IllegalArgumentException;
/**
* Gets the criterion for this objective.
*
* @return This objective's criterion
*/
Criterion criterion();
/**
* Gets the {@link ObjectiveDisplayMode} used to display this objective.
*
* @return The {@link ObjectiveDisplayMode} used to display this objective
*/
ObjectiveDisplayMode displayMode();
/**
* Sets the {@link ObjectiveDisplayMode} used to display this objective.
*
* @param displayMode The {@link ObjectiveDisplayMode} used to display this objective
*/
void setDisplayMode(ObjectiveDisplayMode displayMode);
/**
* Gets the set of {@link Score}s for this objective.
*
* @return The set of {@link Score}s for this objective
*/
Map<Component, Score> scores();
/**
* Returns whether this objective has a {@link Score} with the given name.
*
* @param name The name of the {@link Score} to check for.
* @return Whether this objective has a {@link Score} with the given name.
*/
boolean hasScore(Component name);
/**
* Adds the specified {@link Score} to this objective.
*
* @param score The {@link Score} to add
* @throws IllegalArgumentException If a {@link Score} with the same name exists, or the specified {@link Score} has already been added
*/
void addScore(Score score) throws IllegalArgumentException;
/**
* Gets an entry's {@link Score} for this objective, if it exists.
*
* @param name The name of the {@link Score} to get.
* @return The {@link Score} for te specified {@link Component}, if it exists.
*/
default Optional<Score> findScore(final Component name) {
if (!this.hasScore(name)) {
return Optional.empty();
}
return Optional.of(this.findOrCreateScore(name));
}
/**
* Gets an entry's {@link Score} for this objective.
*
* <p>If the {@link Score} does not exist, it will be created.</p>
*
* @param name The name of the {@link Score} to get
* @return The {@link Score} for the specified {@link Component}
*/
Score findOrCreateScore(Component name);
/**
* Removes the specified {@link Score} from this objective, if present.
*
* @param score The {@link Score} to remove
* @return Whether the score existed on this objective
*/
boolean removeScore(Score score);
/**
* Removes the {@link Score} with the specified name from this objective, if present.
*
* @param name The name of the {@link Score} to remove.
* @return Whether the score existed on this objective
*/
boolean removeScore(Component name);
/**
* Returns a {@link Set} of parent {@link Scoreboard}s this
* {@link Objective} is registered to.
*
* @return A {@link Set} of parent {@link Scoreboard}s this
* {@link Objective} is registered to
*/
Set<Scoreboard> scoreboards();
/**
* Represents a builder to create {@link Objective} instances.
*/
interface Builder extends org.spongepowered.api.util.Builder<Objective, Builder>, CopyableBuilder<Objective, Builder> {
/**
* Sets the name of the {@link Objective}.
*
* @param name The name to set
* @return This builder
*/
Builder name(String name);
/**
* Sets the display name of the {@link Objective}.
*
* @param displayName The display name to set
* @return This builder
*/
Builder displayName(Component displayName);
/**
* Sets the {@link Criterion} of the {@link Objective}.
*
* @param criterion The {@link Criterion} to set
* @return This builder
*/
default Builder criterion(final Supplier<? extends Criterion> criterion) {
return this.criterion(criterion.get());
}
/**
* Sets the {@link Criterion} of the {@link Objective}.
*
* @param criterion The {@link Criterion} to set
* @return This builder
*/
Builder criterion(Criterion criterion);
/**
* Sets the {@link ObjectiveDisplayMode} of the {@link Objective}.
*
* @param objectiveDisplayMode The {@link ObjectiveDisplayMode} to set
* @return This builder
*/
default Builder objectiveDisplayMode(final Supplier<? extends ObjectiveDisplayMode> objectiveDisplayMode) {
return this.objectiveDisplayMode(objectiveDisplayMode.get());
}
/**
* Sets the {@link ObjectiveDisplayMode} of the {@link Objective}.
*
* @param objectiveDisplayMode The {@link ObjectiveDisplayMode} to set
* @return This builder
*/
Builder objectiveDisplayMode(ObjectiveDisplayMode objectiveDisplayMode);
/**
* Builds an instance of an {@link Objective}.
*
* @return A new instance of an {@link Objective}
* @throws IllegalStateException if the {@link Objective} is not complete
*/
@Override
Objective build() throws IllegalStateException;
}
}
| 2,835 |
416 | <gh_stars>100-1000
//
// INStartCallContactResolutionResult.h
// Intents
//
// Copyright (c) 2016-2020 Apple Inc. All rights reserved.
//
#import <Intents/INPersonResolutionResult.h>
typedef NS_ENUM(NSInteger, INStartCallContactUnsupportedReason) {
INStartCallContactUnsupportedReasonNoContactFound = 1,
INStartCallContactUnsupportedReasonMultipleContactsUnsupported,
INStartCallContactUnsupportedReasonNoHandleForLabel,
INStartCallContactUnsupportedReasonInvalidHandle,
INStartCallContactUnsupportedReasonUnsupportedMmiUssd,
INStartCallContactUnsupportedReasonNoCallHistoryForRedial API_DEPRECATED("", ios(13.0, 14.0), watchos(6.0, 7.0)) API_UNAVAILABLE(macos),
INStartCallContactUnsupportedReasonNoUsableHandleForRedial,
} API_AVAILABLE(ios(13.0), watchos(6.0)) API_UNAVAILABLE(macos, tvos);
NS_ASSUME_NONNULL_BEGIN
API_AVAILABLE(ios(13.0), watchos(6.0))
API_UNAVAILABLE(macos, tvos)
@interface INStartCallContactResolutionResult : INPersonResolutionResult
+ (instancetype)unsupportedForReason:(INStartCallContactUnsupportedReason)reason NS_SWIFT_NAME(unsupported(forReason:));
- (instancetype)initWithPersonResolutionResult:(INPersonResolutionResult *)personResolutionResult;
@end
NS_ASSUME_NONNULL_END
| 428 |
8,747 | /*
* Crypto wrapper for internal crypto implementation
* Copyright (c) 2006-2011, <NAME> <<EMAIL>>
*
* This software may be distributed under the terms of the BSD license.
* See README for more details.
*/
#include "includes.h"
#include "common.h"
#include "crypto.h"
#include "sha256_i.h"
#include "sha1_i.h"
#include "md5_i.h"
struct crypto_hash {
enum crypto_hash_alg alg;
union {
struct MD5Context md5;
struct SHA1Context sha1;
#ifdef CONFIG_SHA256
struct sha256_state sha256;
#endif /* CONFIG_SHA256 */
#ifdef CONFIG_INTERNAL_SHA384
struct sha384_state sha384;
#endif /* CONFIG_INTERNAL_SHA384 */
#ifdef CONFIG_INTERNAL_SHA512
struct sha512_state sha512;
#endif /* CONFIG_INTERNAL_SHA512 */
} u;
u8 key[64];
size_t key_len;
};
struct crypto_hash * crypto_hash_init(enum crypto_hash_alg alg, const u8 *key,
size_t key_len)
{
struct crypto_hash *ctx;
u8 k_pad[64];
u8 tk[32];
size_t i;
ctx = os_zalloc(sizeof(*ctx));
if (ctx == NULL)
return NULL;
ctx->alg = alg;
switch (alg) {
case CRYPTO_HASH_ALG_MD5:
MD5Init(&ctx->u.md5);
break;
case CRYPTO_HASH_ALG_SHA1:
SHA1Init(&ctx->u.sha1);
break;
#ifdef CONFIG_SHA256
case CRYPTO_HASH_ALG_SHA256:
sha256_init(&ctx->u.sha256);
break;
#endif /* CONFIG_SHA256 */
#ifdef CONFIG_INTERNAL_SHA384
case CRYPTO_HASH_ALG_SHA384:
sha384_init(&ctx->u.sha384);
break;
#endif /* CONFIG_INTERNAL_SHA384 */
#ifdef CONFIG_INTERNAL_SHA512
case CRYPTO_HASH_ALG_SHA512:
sha512_init(&ctx->u.sha512);
break;
#endif /* CONFIG_INTERNAL_SHA512 */
case CRYPTO_HASH_ALG_HMAC_MD5:
if (key_len > sizeof(k_pad)) {
MD5Init(&ctx->u.md5);
MD5Update(&ctx->u.md5, key, key_len);
MD5Final(tk, &ctx->u.md5);
key = tk;
key_len = 16;
}
os_memcpy(ctx->key, key, key_len);
ctx->key_len = key_len;
os_memcpy(k_pad, key, key_len);
if (key_len < sizeof(k_pad))
os_memset(k_pad + key_len, 0, sizeof(k_pad) - key_len);
for (i = 0; i < sizeof(k_pad); i++)
k_pad[i] ^= 0x36;
MD5Init(&ctx->u.md5);
MD5Update(&ctx->u.md5, k_pad, sizeof(k_pad));
break;
case CRYPTO_HASH_ALG_HMAC_SHA1:
if (key_len > sizeof(k_pad)) {
SHA1Init(&ctx->u.sha1);
SHA1Update(&ctx->u.sha1, key, key_len);
SHA1Final(tk, &ctx->u.sha1);
key = tk;
key_len = 20;
}
os_memcpy(ctx->key, key, key_len);
ctx->key_len = key_len;
os_memcpy(k_pad, key, key_len);
if (key_len < sizeof(k_pad))
os_memset(k_pad + key_len, 0, sizeof(k_pad) - key_len);
for (i = 0; i < sizeof(k_pad); i++)
k_pad[i] ^= 0x36;
SHA1Init(&ctx->u.sha1);
SHA1Update(&ctx->u.sha1, k_pad, sizeof(k_pad));
break;
#ifdef CONFIG_SHA256
case CRYPTO_HASH_ALG_HMAC_SHA256:
if (key_len > sizeof(k_pad)) {
sha256_init(&ctx->u.sha256);
sha256_process(&ctx->u.sha256, key, key_len);
sha256_done(&ctx->u.sha256, tk);
key = tk;
key_len = 32;
}
os_memcpy(ctx->key, key, key_len);
ctx->key_len = key_len;
os_memcpy(k_pad, key, key_len);
if (key_len < sizeof(k_pad))
os_memset(k_pad + key_len, 0, sizeof(k_pad) - key_len);
for (i = 0; i < sizeof(k_pad); i++)
k_pad[i] ^= 0x36;
sha256_init(&ctx->u.sha256);
sha256_process(&ctx->u.sha256, k_pad, sizeof(k_pad));
break;
#endif /* CONFIG_SHA256 */
default:
os_free(ctx);
return NULL;
}
return ctx;
}
void crypto_hash_update(struct crypto_hash *ctx, const u8 *data, size_t len)
{
if (ctx == NULL)
return;
switch (ctx->alg) {
case CRYPTO_HASH_ALG_MD5:
case CRYPTO_HASH_ALG_HMAC_MD5:
MD5Update(&ctx->u.md5, data, len);
break;
case CRYPTO_HASH_ALG_SHA1:
case CRYPTO_HASH_ALG_HMAC_SHA1:
SHA1Update(&ctx->u.sha1, data, len);
break;
#ifdef CONFIG_SHA256
case CRYPTO_HASH_ALG_SHA256:
case CRYPTO_HASH_ALG_HMAC_SHA256:
sha256_process(&ctx->u.sha256, data, len);
break;
#endif /* CONFIG_SHA256 */
#ifdef CONFIG_INTERNAL_SHA384
case CRYPTO_HASH_ALG_SHA384:
sha384_process(&ctx->u.sha384, data, len);
break;
#endif /* CONFIG_INTERNAL_SHA384 */
#ifdef CONFIG_INTERNAL_SHA512
case CRYPTO_HASH_ALG_SHA512:
sha512_process(&ctx->u.sha512, data, len);
break;
#endif /* CONFIG_INTERNAL_SHA512 */
default:
break;
}
}
int crypto_hash_finish(struct crypto_hash *ctx, u8 *mac, size_t *len)
{
u8 k_pad[64];
size_t i;
if (ctx == NULL)
return -2;
if (mac == NULL || len == NULL) {
os_free(ctx);
return 0;
}
switch (ctx->alg) {
case CRYPTO_HASH_ALG_MD5:
if (*len < 16) {
*len = 16;
os_free(ctx);
return -1;
}
*len = 16;
MD5Final(mac, &ctx->u.md5);
break;
case CRYPTO_HASH_ALG_SHA1:
if (*len < 20) {
*len = 20;
os_free(ctx);
return -1;
}
*len = 20;
SHA1Final(mac, &ctx->u.sha1);
break;
#ifdef CONFIG_SHA256
case CRYPTO_HASH_ALG_SHA256:
if (*len < 32) {
*len = 32;
os_free(ctx);
return -1;
}
*len = 32;
sha256_done(&ctx->u.sha256, mac);
break;
#endif /* CONFIG_SHA256 */
#ifdef CONFIG_INTERNAL_SHA384
case CRYPTO_HASH_ALG_SHA384:
if (*len < 48) {
*len = 48;
os_free(ctx);
return -1;
}
*len = 48;
sha384_done(&ctx->u.sha384, mac);
break;
#endif /* CONFIG_INTERNAL_SHA384 */
#ifdef CONFIG_INTERNAL_SHA512
case CRYPTO_HASH_ALG_SHA512:
if (*len < 64) {
*len = 64;
os_free(ctx);
return -1;
}
*len = 64;
sha512_done(&ctx->u.sha512, mac);
break;
#endif /* CONFIG_INTERNAL_SHA512 */
case CRYPTO_HASH_ALG_HMAC_MD5:
if (*len < 16) {
*len = 16;
os_free(ctx);
return -1;
}
*len = 16;
MD5Final(mac, &ctx->u.md5);
os_memcpy(k_pad, ctx->key, ctx->key_len);
os_memset(k_pad + ctx->key_len, 0,
sizeof(k_pad) - ctx->key_len);
for (i = 0; i < sizeof(k_pad); i++)
k_pad[i] ^= 0x5c;
MD5Init(&ctx->u.md5);
MD5Update(&ctx->u.md5, k_pad, sizeof(k_pad));
MD5Update(&ctx->u.md5, mac, 16);
MD5Final(mac, &ctx->u.md5);
break;
case CRYPTO_HASH_ALG_HMAC_SHA1:
if (*len < 20) {
*len = 20;
os_free(ctx);
return -1;
}
*len = 20;
SHA1Final(mac, &ctx->u.sha1);
os_memcpy(k_pad, ctx->key, ctx->key_len);
os_memset(k_pad + ctx->key_len, 0,
sizeof(k_pad) - ctx->key_len);
for (i = 0; i < sizeof(k_pad); i++)
k_pad[i] ^= 0x5c;
SHA1Init(&ctx->u.sha1);
SHA1Update(&ctx->u.sha1, k_pad, sizeof(k_pad));
SHA1Update(&ctx->u.sha1, mac, 20);
SHA1Final(mac, &ctx->u.sha1);
break;
#ifdef CONFIG_SHA256
case CRYPTO_HASH_ALG_HMAC_SHA256:
if (*len < 32) {
*len = 32;
os_free(ctx);
return -1;
}
*len = 32;
sha256_done(&ctx->u.sha256, mac);
os_memcpy(k_pad, ctx->key, ctx->key_len);
os_memset(k_pad + ctx->key_len, 0,
sizeof(k_pad) - ctx->key_len);
for (i = 0; i < sizeof(k_pad); i++)
k_pad[i] ^= 0x5c;
sha256_init(&ctx->u.sha256);
sha256_process(&ctx->u.sha256, k_pad, sizeof(k_pad));
sha256_process(&ctx->u.sha256, mac, 32);
sha256_done(&ctx->u.sha256, mac);
break;
#endif /* CONFIG_SHA256 */
default:
os_free(ctx);
return -1;
}
os_free(ctx);
if (TEST_FAIL())
return -1;
return 0;
}
int crypto_global_init(void)
{
return 0;
}
void crypto_global_deinit(void)
{
}
| 3,464 |
1,489 | <reponame>slated/django-activity-stream
default_app_config = 'testapp_nested.apps.TestappNestedConfig'
| 37 |
751 | <reponame>amithbraj/vpp
/*
* Copyright (c) 2016 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vnet/dpo/pw_cw.h>
#include <vnet/fib/fib_node.h>
#ifndef CLIB_MARCH_VARIANT
/*
* pool of all MPLS Label DPOs
*/
pw_cw_dpo_t *pw_cw_dpo_pool;
static pw_cw_dpo_t *
pw_cw_dpo_alloc (void)
{
pw_cw_dpo_t *pwcw;
pool_get_aligned_zero(pw_cw_dpo_pool, pwcw, 8);
return (pwcw);
}
static index_t
pw_cw_dpo_get_index (pw_cw_dpo_t *pwcw)
{
return (pwcw - pw_cw_dpo_pool);
}
void
pw_cw_dpo_create (const dpo_id_t *parent,
dpo_id_t *dpo)
{
pw_cw_dpo_t *pwcw;
pwcw = pw_cw_dpo_alloc();
/*
* stack this disposition object on the parent given
*/
dpo_stack(DPO_PW_CW,
parent->dpoi_proto,
&pwcw->pwcw_parent,
parent);
/*
* set up the return DPO to refer to this object
*/
dpo_set(dpo,
DPO_PW_CW,
parent->dpoi_proto,
pw_cw_dpo_get_index(pwcw));
}
u8*
format_pw_cw_dpo (u8 *s, va_list *args)
{
index_t pwcwi = va_arg (*args, index_t);
u32 indent = va_arg (*args, u32);
pw_cw_dpo_t *pwcw;
if (pool_is_free_index(pw_cw_dpo_pool, pwcwi))
{
/*
* the packet trace can be printed after the DPO has been deleted
*/
return (format(s, "pw-cw[???,%d]:", pwcwi));
}
pwcw = pw_cw_dpo_get(pwcwi);
s = format(s, "pw-cw[%d]:", pwcwi);
s = format(s, "\n%U", format_white_space, indent);
s = format(s, "%U", format_dpo_id, &pwcw->pwcw_parent, indent+2);
return (s);
}
static void
pw_cw_dpo_lock (dpo_id_t *dpo)
{
pw_cw_dpo_t *pwcw;
pwcw = pw_cw_dpo_get(dpo->dpoi_index);
pwcw->pwcw_locks++;
}
static void
pw_cw_dpo_unlock (dpo_id_t *dpo)
{
pw_cw_dpo_t *pwcw;
pwcw = pw_cw_dpo_get(dpo->dpoi_index);
pwcw->pwcw_locks--;
if (0 == pwcw->pwcw_locks)
{
dpo_reset(&pwcw->pwcw_parent);
pool_put(pw_cw_dpo_pool, pwcw);
}
}
#endif /* CLIB_MARCH_VARIANT */
/**
* @brief A struct to hold tracing information for the MPLS label imposition
* node.
*/
typedef struct pw_cw_trace_t_
{
/**
* The CW popped
*/
u32 cw;
} pw_cw_trace_t;
always_inline uword
pw_cw_pop_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
u32 n_left_from, next_index, * from, * to_next;
from = vlib_frame_vector_args(from_frame);
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
while (n_left_from > 0)
{
u32 n_left_to_next;
vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
while (n_left_from >= 4 && n_left_to_next >= 2)
{
pw_cw_dpo_t *pwcw0, *pwcw1;
u32 bi0, pwcwi0, bi1, pwcwi1;
vlib_buffer_t * b0, *b1;
u32 next0, next1;
bi0 = to_next[0] = from[0];
bi1 = to_next[1] = from[1];
/* Prefetch next iteration. */
{
vlib_buffer_t * p2, * p3;
p2 = vlib_get_buffer(vm, from[2]);
p3 = vlib_get_buffer(vm, from[3]);
vlib_prefetch_buffer_header(p2, STORE);
vlib_prefetch_buffer_header(p3, STORE);
CLIB_PREFETCH(p2->data, sizeof(pw_cw_t), STORE);
CLIB_PREFETCH(p3->data, sizeof(pw_cw_t), STORE);
}
from += 2;
to_next += 2;
n_left_from -= 2;
n_left_to_next -= 2;
b0 = vlib_get_buffer(vm, bi0);
b1 = vlib_get_buffer(vm, bi1);
/* get the next parent DPO for the next pop */
pwcwi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
pwcwi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
pwcw0 = pw_cw_dpo_get(pwcwi0);
pwcw1 = pw_cw_dpo_get(pwcwi1);
next0 = pwcw0->pwcw_parent.dpoi_next_node;
next1 = pwcw1->pwcw_parent.dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = pwcw0->pwcw_parent.dpoi_index;
vnet_buffer(b1)->ip.adj_index[VLIB_TX] = pwcw1->pwcw_parent.dpoi_index;
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
pw_cw_trace_t *tr = vlib_add_trace(vm, node, b0, sizeof(*tr));
tr->cw = *((pw_cw_t*) vlib_buffer_get_current(b0));
}
if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
{
pw_cw_trace_t *tr = vlib_add_trace(vm, node, b1, sizeof(*tr));
tr->cw = *((pw_cw_t*) vlib_buffer_get_current(b1));
}
/* pop the PW CW */
vlib_buffer_advance (b0, sizeof(pw_cw_t));
vlib_buffer_advance (b1, sizeof(pw_cw_t));
vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
n_left_to_next,
bi0, bi1, next0, next1);
}
while (n_left_from > 0 && n_left_to_next > 0)
{
pw_cw_dpo_t *pwcw0;
vlib_buffer_t * b0;
u32 bi0, pwcwi0;
u32 next0;
bi0 = from[0];
to_next[0] = bi0;
from += 1;
to_next += 1;
n_left_from -= 1;
n_left_to_next -= 1;
b0 = vlib_get_buffer(vm, bi0);
pwcwi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
pwcw0 = pw_cw_dpo_get(pwcwi0);
next0 = pwcw0->pwcw_parent.dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = pwcw0->pwcw_parent.dpoi_index;
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
pw_cw_trace_t *tr = vlib_add_trace(vm, node, b0, sizeof(*tr));
tr->cw = *((pw_cw_t*) vlib_buffer_get_current(b0));
}
vlib_buffer_advance (b0, sizeof(pw_cw_t));
vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
}
vlib_put_next_frame(vm, node, next_index, n_left_to_next);
}
return from_frame->n_vectors;
}
static u8 *
format_pw_cw_trace (u8 * s, va_list * args)
{
CLIB_UNUSED(vlib_main_t * vm) = va_arg(*args, vlib_main_t *);
CLIB_UNUSED(vlib_node_t * node) = va_arg(*args, vlib_node_t *);
CLIB_UNUSED(pw_cw_trace_t * t);
t = va_arg(*args, pw_cw_trace_t *);
s = format(s, "cw:0x%x", t->cw);
return (s);
}
VLIB_NODE_FN (pw_cw_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
return (pw_cw_pop_inline(vm, node, frame));
}
VLIB_REGISTER_NODE(pw_cw_node) = {
.name = "pw-cw-pop",
.vector_size = sizeof(u32),
.format_trace = format_pw_cw_trace,
};
#ifndef CLIB_MARCH_VARIANT
static void
pw_cw_dpo_mem_show (void)
{
fib_show_memory_usage("PW-CW",
pool_elts(pw_cw_dpo_pool),
pool_len(pw_cw_dpo_pool),
sizeof(pw_cw_dpo_t));
}
const static dpo_vft_t pwcw_vft = {
.dv_lock = pw_cw_dpo_lock,
.dv_unlock = pw_cw_dpo_unlock,
.dv_format = format_pw_cw_dpo,
.dv_mem_show = pw_cw_dpo_mem_show,
};
const static char* const pw_cw_proto_nodes[] =
{
"pw-cw-pop",
NULL,
};
const static char* const * const pw_cw_nodes[DPO_PROTO_NUM] =
{
[DPO_PROTO_IP4] = pw_cw_proto_nodes,
[DPO_PROTO_IP6] = pw_cw_proto_nodes,
[DPO_PROTO_MPLS] = pw_cw_proto_nodes,
[DPO_PROTO_ETHERNET] = pw_cw_proto_nodes,
};
void
pw_cw_dpo_module_init (void)
{
dpo_register(DPO_PW_CW, &pwcw_vft, pw_cw_nodes);
}
#endif /* CLIB_MARCH_VARIANT */
| 4,814 |
879 | package org.zstack.header.tag;
import org.springframework.http.HttpMethod;
import org.zstack.header.identity.Action;
import org.zstack.header.query.APIQueryMessage;
import org.zstack.header.query.AutoQuery;
import org.zstack.header.rest.RestRequest;
import java.util.List;
import static java.util.Arrays.asList;
/**
*/
@AutoQuery(replyClass = APIQuerySystemTagReply.class, inventoryClass = SystemTagInventory.class)
@Action(category = TagConstant.ACTION_CATEGORY, names = {"read"})
@RestRequest(
path = "/system-tags",
optionalPaths = {"/system-tags/{uuid}"},
method = HttpMethod.GET,
responseClass = APIQuerySystemTagReply.class
)
public class APIQuerySystemTagMsg extends APIQueryMessage {
public static List<String> __example__() {
return asList("inherent=true", "resourceType=HostVO");
}
}
| 299 |
852 | <reponame>ckamtsikis/cmssw
#include <iostream>
#include <vector>
#include <memory>
#include "RecoEgamma/EgammaPhotonAlgos/interface/ConversionTrackPairFinder.h"
// Framework
#include "FWCore/MessageLogger/interface/MessageLogger.h"
//
//
#include <vector>
#include <map>
//using namespace std;
ConversionTrackPairFinder::ConversionTrackPairFinder() {
LogDebug("ConversionTrackPairFinder") << " CTOR "
<< "\n";
}
ConversionTrackPairFinder::~ConversionTrackPairFinder() {
LogDebug("ConversionTrackPairFinder") << " DTOR "
<< "\n";
}
std::map<std::vector<reco::TransientTrack>, reco::CaloClusterPtr, CompareTwoTracksVectors>
ConversionTrackPairFinder::run(const std::vector<reco::TransientTrack>& outInTrk,
const edm::Handle<reco::TrackCollection>& outInTrkHandle,
const edm::Handle<reco::TrackCaloClusterPtrAssociation>& outInTrackSCAssH,
const std::vector<reco::TransientTrack>& _inOutTrk,
const edm::Handle<reco::TrackCollection>& inOutTrkHandle,
const edm::Handle<reco::TrackCaloClusterPtrAssociation>& inOutTrackSCAssH) {
std::vector<reco::TransientTrack> inOutTrk = _inOutTrk;
LogDebug("ConversionTrackPairFinder") << "ConversionTrackPairFinder::run "
<< "\n";
std::vector<reco::TransientTrack> selectedOutInTk;
std::vector<reco::TransientTrack> selectedInOutTk;
std::vector<reco::TransientTrack> allSelectedTk;
std::map<reco::TransientTrack, reco::CaloClusterPtr, CompareTwoTracks> scTrkAssocMap;
std::multimap<int, reco::TransientTrack, std::greater<int> > auxMap;
bool oneLeg = false;
bool noTrack = false;
int iTrk = 0;
for (std::vector<reco::TransientTrack>::const_iterator iTk = outInTrk.begin(); iTk != outInTrk.end(); iTk++) {
edm::Ref<reco::TrackCollection> trackRef(outInTrkHandle, iTrk);
iTrk++;
if (iTk->numberOfValidHits() < 3 || iTk->normalizedChi2() > 5000)
continue;
if (fabs(iTk->impactPointState().globalPosition().x()) > 110 ||
fabs(iTk->impactPointState().globalPosition().y()) > 110 ||
fabs(iTk->impactPointState().globalPosition().z()) > 280)
continue;
// std::cout << " Out In Track charge " << iTk->charge() << " Num of RecHits " << iTk->recHitsSize() << " inner pt " << sqrt(iTk->track().innerMomentum().perp2()) << "\n";
const reco::TrackTransientTrack* ttt = dynamic_cast<const reco::TrackTransientTrack*>(iTk->basicTransientTrack());
reco::TrackRef myTkRef = ttt->persistentTrackRef();
//std::cout << " ConversionTrackPairFinder persistent track ref hits " << myTkRef->recHitsSize() << " inner pt " << sqrt(iTk->track().innerMomentum().perp2()) << "\n";
// std::cout << " ConversionTrackPairFinder track from handle hits " << trackRef->recHitsSize() << " inner pt " << sqrt(iTk->track().innerMomentum().perp2()) << "\n";
const reco::CaloClusterPtr aClus = (*outInTrackSCAssH)[trackRef];
// std::cout << "ConversionTrackPairFinder Reading the OutIn Map " << *outInTrackSCAss[trackRef] << " " << &outInTrackSCAss[trackRef] << std::endl;
// std::cout << "ConversionTrackPairFinder Out In track belonging to SC with energy " << aClus->energy() << "\n";
int nHits = iTk->recHitsSize();
scTrkAssocMap[*iTk] = aClus;
auxMap.insert(std::pair<int, reco::TransientTrack>(nHits, (*iTk)));
selectedOutInTk.push_back(*iTk);
allSelectedTk.push_back(*iTk);
}
iTrk = 0;
for (std::vector<reco::TransientTrack>::const_iterator iTk = inOutTrk.begin(); iTk != inOutTrk.end(); iTk++) {
edm::Ref<reco::TrackCollection> trackRef(inOutTrkHandle, iTrk);
iTrk++;
if (iTk->numberOfValidHits() < 3 || iTk->normalizedChi2() > 5000)
continue;
if (fabs(iTk->impactPointState().globalPosition().x()) > 110 ||
fabs(iTk->impactPointState().globalPosition().y()) > 110 ||
fabs(iTk->impactPointState().globalPosition().z()) > 280)
continue;
// std::cout << " In Out Track charge " << iTk->charge() << " Num of RecHits " << iTk->recHitsSize() << " inner pt " << sqrt(iTk->track().innerMomentum().perp2()) << "\n";
const reco::TrackTransientTrack* ttt = dynamic_cast<const reco::TrackTransientTrack*>(iTk->basicTransientTrack());
reco::TrackRef myTkRef = ttt->persistentTrackRef();
// std::cout << " ConversionTrackPairFinder persistent track ref hits " << myTkRef->recHitsSize() << " inner pt " << sqrt(iTk->track().innerMomentum().perp2()) << "\n";
// std::cout << " ConversionTrackPairFinder track from handle hits " << trackRef->recHitsSize() << " inner pt " << sqrt(iTk->track().innerMomentum().perp2()) << "\n";
const reco::CaloClusterPtr aClus = (*inOutTrackSCAssH)[trackRef];
// std::cout << "ConversionTrackPairFinder Filling the InOut Map " << &(*inOutTrackSCAss[trackRef]) << " " << &inOutTrackSCAss[trackRef] << std::endl;
// std::cout << "ConversionTrackPairFinder In Out track belonging to SC with energy " << aClus.energy() << "\n";
scTrkAssocMap[*iTk] = aClus;
int nHits = iTk->recHitsSize();
auxMap.insert(std::pair<int, reco::TransientTrack>(nHits, (*iTk)));
selectedInOutTk.push_back(*iTk);
allSelectedTk.push_back(*iTk);
}
// std::cout << " ConversionTrackPairFinder allSelectedTk size " << allSelectedTk.size() << " scTrkAssocMap size " << scTrkAssocMap.size() << "\n";
// Sort tracks in decreasing number of hits
if (!selectedOutInTk.empty())
std::stable_sort(selectedOutInTk.begin(), selectedOutInTk.end(), ByNumOfHits());
if (!selectedInOutTk.empty())
std::stable_sort(selectedInOutTk.begin(), selectedInOutTk.end(), ByNumOfHits());
if (!allSelectedTk.empty())
std::stable_sort(allSelectedTk.begin(), allSelectedTk.end(), ByNumOfHits());
// for( std::vector<reco::TransientTrack>::const_iterator iTk = selectedOutInTk.begin(); iTk != selectedOutInTk.end(); iTk++) {
// std::cout << " Selected Out In Tracks charge " << iTk->charge() << " Num of RecHits " << iTk->recHitsSize() << " inner momentum " << iTk->track().innerMomentum() << "\n";
//}
// for( std::vector<reco::TransientTrack>::const_iterator iTk = selectedInOutTk.begin(); iTk != selectedInOutTk.end(); iTk++) {
// std::cout << " Selected In Out Tracks charge " << iTk->charge() << " Num of RecHits " << iTk->recHitsSize() << " inner momentum " << iTk->track().innerMomentum() << "\n";
// }
// for( std::vector<reco::TransientTrack>::const_iterator iTk = allSelectedTk.begin(); iTk != allSelectedTk.end(); iTk++) {
// std::cout << " All Selected Tracks charge " << iTk->charge() << " Num of RecHits " << iTk->recHitsSize() << " chi2 " << iTk->normalizedChi2() << " pt " << sqrt(iTk->track().innerMomentum().perp2()) << "\n";
//}
std::vector<reco::TransientTrack> thePair(2);
std::vector<std::vector<reco::TransientTrack> > allPairs;
std::map<std::vector<reco::TransientTrack>, reco::CaloClusterPtr, CompareTwoTracksVectors> allPairSCAss;
std::map<std::vector<reco::TransientTrack>, reco::CaloClusterPtr, CompareTwoTracksVectors> allPairOrdInPtSCAss;
std::map<reco::TransientTrack, reco::CaloClusterPtr>::const_iterator iMap1;
std::map<reco::TransientTrack, reco::CaloClusterPtr>::const_iterator iMap2;
for (iMap1 = scTrkAssocMap.begin(); iMap1 != scTrkAssocMap.end(); ++iMap1) {
// std::cout << " Ass map track charge " << (iMap1->first).charge() <<" pt " << sqrt(((iMap1->first)).track().innerMomentum().Perp2()) << " SC E " << (iMap1->second)->energy() << " SC eta " << (iMap1->second)->eta() << " SC phi " << (iMap1->second)->phi() << std::endl;
}
std::multimap<int, reco::TransientTrack>::const_iterator iAux;
// for( iAux = auxMap.begin(); iAux!= auxMap.end(); ++iAux) {
// // std::cout << " Aux Map " << (iAux->first) <<" pt " << sqrt(((iAux->second)).track().innerMomentum().Perp2()) << std::endl;
// for( iMap1 = scTrkAssocMap.begin(); iMap1 != scTrkAssocMap.end(); ++iMap1) {
// if ( (iMap1->first) == (iAux->second) ) std::cout << " ass SC " << (iMap1->second)->energy() << std::endl;
// }
// }
if (scTrkAssocMap.size() > 2) {
for (iMap1 = scTrkAssocMap.begin(); iMap1 != scTrkAssocMap.end(); ++iMap1) {
for (iMap2 = iMap1; iMap2 != scTrkAssocMap.end(); ++iMap2) {
// consider only tracks associated to the same SC
if ((iMap1->second) != (iMap2->second))
continue;
if (((iMap1->first)).charge() * ((iMap2->first)).charge() < 0) {
// std::cout << " ConversionTrackPairFinde All selected from the map First Track charge " << (iMap1->first).charge() << " Num of RecHits " << ((iMap1->first)).recHitsSize() << " inner pt " << sqrt(((iMap1->first)).track().innerMomentum().Perp2()) << " Ass SC " << (iMap1->second)->energy() << "\n";
// std::cout << " ConversionTrackPairFinde All selected from the map Second Track charge " << ((iMap2->first)).charge() << " Num of RecHits " << ((iMap2->first)).recHitsSize() << " inner pt " << sqrt(((iMap2->first)).track().innerMomentum().Perp2()) << " Ass SC " << (iMap2->second)->energy() << "\n";
thePair.clear();
thePair.push_back(iMap1->first);
thePair.push_back(iMap2->first);
allPairs.push_back(thePair);
allPairSCAss[thePair] = iMap1->second;
}
}
}
// std::cout << " ConversionTrackPairFinder INTERMIDIATE allPairSCAss size " << allPairSCAss.size() << "\n";
if (allPairSCAss.empty()) {
// std::cout << " All Tracks had the same charge: Need to send out a single track " << "\n";
for (iMap1 = scTrkAssocMap.begin(); iMap1 != scTrkAssocMap.end(); ++iMap1) {
thePair.clear();
thePair.push_back(iMap1->first);
allPairs.push_back(thePair);
allPairSCAss[thePair] = iMap1->second;
}
}
} else if ((scTrkAssocMap.size() == 2)) {
iMap1 = scTrkAssocMap.begin(); //get the first
iMap2 = iMap1;
iMap2++; //get the second
if ((iMap1->second) == (iMap2->second)) {
if ((iMap1->first).charge() * (iMap2->first).charge() < 0) {
// std::cout << " ConversionTrackPairFinder Case when (scTrkAssocMap.size() ==2) " << (iMap1->first).charge() << std::endl;
//std::cout << " Num of RecHits " << ((iMap1->first)).recHitsSize() << std::endl;
// std::cout << " inner pt " << sqrt(((iMap1->first)).track().innerMomentum().Perp2()) << std::endl;
//std::cout << " Ass SC " << (iMap1->second)->energy() << "\n";
// std::cout << " ConversionTrackPairFinder Case when (scTrkAssocMap.size() ==2) " << (iMap2->first).charge() << std::endl;
// std::cout << " Num of RecHits " << ((iMap2->first)).recHitsSize() << std::endl;
//std::cout << " inner pt " << sqrt(((iMap2->first)).track().innerMomentum().Perp2()) << std::endl;
//std::cout << " Ass SC " << (iMap2->second)->energy() << "\n";
thePair.clear();
thePair.push_back(iMap1->first);
thePair.push_back(iMap2->first);
allPairs.push_back(thePair);
allPairSCAss[thePair] = iMap1->second;
} else {
//std::cout << " ConversionTrackPairFinder oneLeg case when 2 tracks with same sign Pick up the longest one" << std::endl;
if (((iMap1->first)).recHitsSize() > ((iMap2->first)).recHitsSize()) {
thePair.clear();
thePair.push_back(iMap1->first);
allPairs.push_back(thePair);
allPairSCAss[thePair] = iMap1->second;
} else {
thePair.clear();
thePair.push_back(iMap2->first);
allPairs.push_back(thePair);
allPairSCAss[thePair] = iMap2->second;
}
}
}
} else if (scTrkAssocMap.size() == 1) { /// ONly one track in input to the finder
// std::cout << " ConversionTrackPairFinder oneLeg case when 1 track only " << std::endl;
oneLeg = true;
} else {
noTrack = true;
}
if (oneLeg) {
thePair.clear();
// std::cout << " ConversionTrackPairFinder oneLeg case charge " << std::endl;
iMap1 = scTrkAssocMap.begin();
//std::cout << " ConversionTrackPairFinder oneLeg case charge " << (iMap1->first).charge() << " Num of RecHits " << ((iMap1->first)).recHitsSize() << " inner pt " << sqrt(((iMap1->first)).track().innerMomentum().Perp2()) << " Ass SC " << (iMap1->second)->energy() << "\n";
thePair.push_back(iMap1->first);
allPairs.push_back(thePair);
allPairSCAss[thePair] = iMap1->second;
// std::cout << " WARNING ConversionTrackPairFinder::tracks The candidate has just one leg. Need to find another way to evaltuate the vertex !!! " << "\n";
}
if (noTrack) {
// std::cout << " WARNING ConversionTrackPairFinder::tracks case noTrack " << "\n";
thePair.clear();
allPairSCAss.clear();
}
/// all cases above failed and some track-SC association is still missing
for (iMap1 = scTrkAssocMap.begin(); iMap1 != scTrkAssocMap.end(); ++iMap1) {
int nFound = 0;
for (std::map<std::vector<reco::TransientTrack>, reco::CaloClusterPtr>::const_iterator iPair = allPairSCAss.begin();
iPair != allPairSCAss.end();
++iPair) {
if ((iMap1->second) == (iPair->second))
nFound++;
}
if (nFound == 0) {
// std::cout << " nFound zero case " << std::endl;
int iList = 0;
for (iAux = auxMap.begin(); iAux != auxMap.end(); ++iAux) {
if ((iMap1->first) == (iAux->second) && iList == 0) {
thePair.clear();
thePair.push_back(iAux->second);
allPairSCAss[thePair] = iMap1->second;
}
iList++;
}
}
}
// order the tracks in the pair in order of decreasing pt
for (std::map<std::vector<reco::TransientTrack>, reco::CaloClusterPtr>::const_iterator iPair = allPairSCAss.begin();
iPair != allPairSCAss.end();
++iPair) {
thePair.clear();
if ((iPair->first).size() == 2) {
if (sqrt((iPair->first)[0].track().innerMomentum().perp2()) >
sqrt((iPair->first)[1].track().innerMomentum().perp2())) {
thePair.push_back((iPair->first)[0]);
thePair.push_back((iPair->first)[1]);
} else {
thePair.push_back((iPair->first)[1]);
thePair.push_back((iPair->first)[0]);
}
} else {
thePair.push_back((iPair->first)[0]);
}
allPairOrdInPtSCAss[thePair] = iPair->second;
}
// std::cout << " ConversionTrackPairFinder FINAL allPairOrdInPtSCAss size " << allPairOrdInPtSCAss.size() << "\n";
// for ( std::map<std::vector<reco::TransientTrack>, reco::CaloClusterPtr>::const_iterator iPair= allPairOrdInPtSCAss.begin(); iPair!= allPairOrdInPtSCAss.end(); ++iPair ) {
// std::cout << " ConversionTrackPairFindder FINAL allPairOrdInPtSCAss " << (iPair->first).size() << " SC Energy " << (iPair->second)->energy() << " eta " << (iPair->second)->eta() << " phi " << (iPair->second)->phi() << "\n";
// std::cout << " ConversionTrackPairFindder FINAL allPairOrdInPtSCAss (iPair->first).size() " << (iPair->first).size() << std::endl;
// for ( std::vector<reco::TransientTrack>::const_iterator iTk=(iPair->first).begin(); iTk!= (iPair->first).end(); ++iTk) {
// std::cout << " ConversionTrackPair ordered track pt " << sqrt(iTk->track().innerMomentum().perp2()) << std::endl;
// }
//}
return allPairOrdInPtSCAss;
}
| 6,598 |
935 | <gh_stars>100-1000
#include <thor-internal/coroutine.hpp>
#include <thor-internal/fiber.hpp>
#include <thor-internal/main.hpp>
#include <thor-internal/memory-view.hpp>
#include <thor-internal/physical.hpp>
#include <thor-internal/timer.hpp>
namespace thor {
namespace {
constexpr bool logUsage = false;
constexpr bool logUncaching = false;
// The following flags are debugging options to debug the correctness of various components.
constexpr bool tortureUncaching = false;
constexpr bool disableUncaching = false;
}
// --------------------------------------------------------
// Reclaim implementation.
// --------------------------------------------------------
struct MemoryReclaimer {
void addPage(CachePage *page) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
assert(!(page->flags & CachePage::reclaimRegistered));
_lruList.push_back(page);
page->flags |= CachePage::reclaimRegistered;
_cachedSize += kPageSize;
}
void removePage(CachePage *page) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
assert(page->flags & CachePage::reclaimRegistered);
if(page->flags & CachePage::reclaimPosted) {
if(!(page->flags & CachePage::reclaimInflight)) {
auto it = page->bundle->_reclaimList.iterator_to(page);
page->bundle->_reclaimList.erase(it);
}
page->flags &= ~(CachePage::reclaimPosted | CachePage::reclaimInflight);
}else{
auto it = _lruList.iterator_to(page);
_lruList.erase(it);
_cachedSize -= kPageSize;
}
page->flags &= ~CachePage::reclaimRegistered;
}
void bumpPage(CachePage *page) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
assert(page->flags & CachePage::reclaimRegistered);
if(page->flags & CachePage::reclaimPosted) {
if(!(page->flags & CachePage::reclaimInflight)) {
auto it = page->bundle->_reclaimList.iterator_to(page);
page->bundle->_reclaimList.erase(it);
}
page->flags &= ~(CachePage::reclaimPosted | CachePage::reclaimInflight);
_cachedSize += kPageSize;
}else{
auto it = _lruList.iterator_to(page);
_lruList.erase(it);
}
_lruList.push_back(page);
}
auto awaitReclaim(CacheBundle *bundle, async::cancellation_token ct = {}) {
return async::sequence(
async::transform(
bundle->_reclaimEvent.async_wait(ct),
[] (auto) { }
),
// TODO: Use the reclaim fiber, not WorkQueue::generalQueue().
WorkQueue::generalQueue()->schedule()
);
}
CachePage *reclaimPage(CacheBundle *bundle) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
if(bundle->_reclaimList.empty())
return nullptr;
auto page = bundle->_reclaimList.pop_front();
assert(page->flags & CachePage::reclaimRegistered);
assert(page->flags & CachePage::reclaimPosted);
assert(!(page->flags & CachePage::reclaimInflight));
page->flags |= CachePage::reclaimInflight;
return page;
}
void runReclaimFiber() {
auto checkReclaim = [this] () -> bool {
if(disableUncaching)
return false;
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
if(_lruList.empty())
return false;
if(!tortureUncaching) {
auto pagesWatermark = physicalAllocator->numTotalPages() * 3 / 4;
auto usedPages = physicalAllocator->numUsedPages();
if(usedPages < pagesWatermark) {
return false;
}else{
if(logUncaching)
infoLogger() << "thor: Uncaching page. " << usedPages
<< " pages are in use (watermark: " << pagesWatermark << ")"
<< frg::endlog;
}
}
auto page = _lruList.pop_front();
assert(page->flags & CachePage::reclaimRegistered);
assert(!(page->flags & CachePage::reclaimPosted));
assert(!(page->flags & CachePage::reclaimInflight));
page->flags |= CachePage::reclaimPosted;
_cachedSize -= kPageSize;
page->bundle->_reclaimList.push_back(page);
page->bundle->_reclaimEvent.raise();
return true;
};
KernelFiber::run([=] {
while(true) {
if(logUncaching) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
infoLogger() << "thor: " << (_cachedSize / 1024)
<< " KiB of cached pages" << frg::endlog;
}
while(checkReclaim())
;
if(tortureUncaching) {
KernelFiber::asyncBlockCurrent(generalTimerEngine()->sleepFor(10'000'000));
}else{
KernelFiber::asyncBlockCurrent(generalTimerEngine()->sleepFor(1'000'000'000));
}
}
});
}
private:
frg::ticket_spinlock _mutex;
frg::intrusive_list<
CachePage,
frg::locate_member<
CachePage,
frg::default_list_hook<CachePage>,
&CachePage::listHook
>
> _lruList;
size_t _cachedSize = 0;
};
static frg::manual_box<MemoryReclaimer> globalReclaimer;
static initgraph::Task initReclaim{&globalInitEngine, "generic.init-reclaim",
initgraph::Requires{getFibersAvailableStage()},
[] {
globalReclaimer.initialize();
globalReclaimer->runReclaimFiber();
}
};
// --------------------------------------------------------
// MemoryView.
// --------------------------------------------------------
void MemoryView::resize(size_t newSize, async::any_receiver<void> receiver) {
(void)newSize;
(void)receiver;
panicLogger() << "MemoryView does not support resize!" << frg::endlog;
}
void MemoryView::fork(async::any_receiver<frg::tuple<Error, smarter::shared_ptr<MemoryView>>> receiver) {
receiver.set_value({Error::illegalObject, nullptr});
}
// In addition to what copyFrom() does, we also have to mark the memory as dirty.
coroutine<frg::expected<Error>> MemoryView::copyTo(uintptr_t offset,
const void *pointer, size_t size,
smarter::shared_ptr<WorkQueue> wq) {
struct Node {
MemoryView *view;
uintptr_t offset;
const void *pointer;
size_t size;
smarter::shared_ptr<WorkQueue> wq;
uintptr_t progress = 0;
PhysicalAddr physical;
};
co_await async::let([=] {
return Node{.view = this, .offset = offset, .pointer = pointer, .size = size, .wq = std::move(wq)};
}, [] (Node &nd) {
return async::sequence(
async::transform(nd.view->asyncLockRange(nd.offset, nd.size,
nd.wq), [] (Error e) {
// TODO: properly propagate the error.
assert(e == Error::success);
}),
async::repeat_while([&nd] { return nd.progress < nd.size; },
[&nd] {
auto fetchOffset = (nd.offset + nd.progress) & ~(kPageSize - 1);
return async::sequence(
async::transform(nd.view->fetchRange(fetchOffset, 0, nd.wq),
[&nd] (frg::expected<Error, PhysicalRange> resultOrError) {
assert(resultOrError);
auto range = resultOrError.value();
assert(range.get<0>() != PhysicalAddr(-1));
assert(range.get<1>() >= kPageSize);
nd.physical = range.get<0>();
}),
// Do heavy copying on the WQ.
// TODO: This could use wq->enter() but we want to keep stack depth low.
nd.wq->schedule(),
async::invocable([&nd] {
auto misalign = (nd.offset + nd.progress) & (kPageSize - 1);
size_t chunk = frg::min(kPageSize - misalign, nd.size - nd.progress);
PageAccessor accessor{nd.physical};
memcpy(reinterpret_cast<uint8_t *>(accessor.get()) + misalign,
reinterpret_cast<const uint8_t *>(nd.pointer) + nd.progress,
chunk);
nd.progress += chunk;
})
);
}
),
async::invocable([&nd] {
auto misalign = nd.offset & (kPageSize - 1);
nd.view->markDirty(nd.offset & ~(kPageSize - 1),
(nd.size + misalign + kPageSize - 1) & ~(kPageSize - 1));
nd.view->unlockRange(nd.offset, nd.size);
})
);
});
co_return {};
}
coroutine<frg::expected<Error>> MemoryView::copyFrom(uintptr_t offset,
void *pointer, size_t size,
smarter::shared_ptr<WorkQueue> wq) {
struct Node {
MemoryView *view;
uintptr_t offset;
void *pointer;
size_t size;
smarter::shared_ptr<WorkQueue> wq;
uintptr_t progress = 0;
PhysicalAddr physical;
};
co_await async::let([=] {
return Node{.view = this, .offset = offset, .pointer = pointer, .size = size, .wq = std::move(wq)};
}, [] (Node &nd) {
return async::sequence(
async::transform(nd.view->asyncLockRange(nd.offset, nd.size,
nd.wq), [] (Error e) {
// TODO: properly propagate the error.
assert(e == Error::success);
}),
async::repeat_while([&nd] { return nd.progress < nd.size; },
[&nd] {
auto fetchOffset = (nd.offset + nd.progress) & ~(kPageSize - 1);
return async::sequence(
async::transform(nd.view->fetchRange(fetchOffset, 0, nd.wq),
[&nd] (frg::expected<Error, PhysicalRange> resultOrError) {
assert(resultOrError);
auto range = resultOrError.value();
assert(range.get<0>() != PhysicalAddr(-1));
assert(range.get<1>() >= kPageSize);
nd.physical = range.get<0>();
}),
// Do heavy copying on the WQ.
// TODO: This could use wq->enter() but we want to keep stack depth low.
nd.wq->schedule(),
async::invocable([&nd] {
auto misalign = (nd.offset + nd.progress) & (kPageSize - 1);
size_t chunk = frg::min(kPageSize - misalign, nd.size - nd.progress);
PageAccessor accessor{nd.physical};
memcpy(reinterpret_cast<uint8_t *>(nd.pointer) + nd.progress,
reinterpret_cast<uint8_t *>(accessor.get()) + misalign, chunk);
nd.progress += chunk;
})
);
}
),
async::invocable([&nd] {
nd.view->unlockRange(nd.offset, nd.size);
})
);
});
co_return {};
}
bool MemoryView::asyncLockRange(uintptr_t offset, size_t size,
smarter::shared_ptr<WorkQueue>, LockRangeNode *node) {
node->result = lockRange(offset, size);
return true;
}
coroutine<frg::expected<Error>>
MemoryView::touchRange(uintptr_t offset, size_t size,
FetchFlags flags, smarter::shared_ptr<WorkQueue> wq) {
size_t progress = 0;
while(progress < size) {
FRG_CO_TRY(co_await fetchRange(offset + progress, flags, wq));
progress += kPageSize;
}
co_return {};
}
Error MemoryView::updateRange(ManageRequest, size_t, size_t) {
return Error::illegalObject;
}
void MemoryView::submitManage(ManageNode *) {
panicLogger() << "MemoryView does not support management!" << frg::endlog;
}
Error MemoryView::setIndirection(size_t, smarter::shared_ptr<MemoryView>,
uintptr_t, size_t) {
return Error::illegalObject;
}
// --------------------------------------------------------
// getZeroMemory()
// --------------------------------------------------------
namespace {
struct ZeroMemory final : MemoryView, GlobalFutexSpace {
ZeroMemory() = default;
ZeroMemory(const ZeroMemory &) = delete;
~ZeroMemory() = default;
ZeroMemory &operator= (const ZeroMemory &) = delete;
size_t getLength() override {
return size_t{1} << 46;
}
coroutine<frg::expected<Error>> copyFrom(uintptr_t, void *buffer, size_t size,
smarter::shared_ptr<WorkQueue> wq) override {
co_await wq->enter();
memset(buffer, 0, size);
co_return {};
}
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
resolveGlobalFutex(uintptr_t offset) override {
smarter::shared_ptr<GlobalFutexSpace> futexSpace{selfPtr.lock()};
return frg::make_tuple(std::move(futexSpace), offset);
}
Error lockRange(uintptr_t, size_t) override {
return Error::success;
}
void unlockRange(uintptr_t, size_t) override {
// Do nothing.
}
frg::tuple<PhysicalAddr, CachingMode> peekRange(uintptr_t) override {
assert(!"ZeroMemory::peekRange() should not be called");
__builtin_unreachable();
}
coroutine<frg::expected<Error, PhysicalRange>>
fetchRange(uintptr_t, FetchFlags, smarter::shared_ptr<WorkQueue>) override {
assert(!"ZeroMemory::fetchRange() should not be called");
__builtin_unreachable();
}
void markDirty(uintptr_t, size_t) override {
infoLogger() << "\e[31m" "thor: ZeroMemory::markDirty() called,"
"" "\e[39m" << frg::endlog;
}
coroutine<frg::expected<Error, PhysicalAddr>> takeGlobalFutex(uintptr_t,
smarter::shared_ptr<WorkQueue>) override {
// TODO: Futexes are always read-write. What should we do here?
// Add a "read-only" argument to takeGlobalFutex?
assert(!"ZeroMemory::takeGlobalFutex() should not be called");
__builtin_unreachable();
}
void retireGlobalFutex(uintptr_t) override {
// Do nothing.
}
public:
// Contract: set by the code that constructs this object.
smarter::borrowed_ptr<ZeroMemory> selfPtr;
};
}
smarter::shared_ptr<MemoryView> getZeroMemory() {
static frg::eternal<smarter::shared_ptr<ZeroMemory>> singleton = [] {
auto memory = smarter::allocate_shared<ZeroMemory>(*kernelAlloc);
memory->selfPtr = memory;
return memory;
}();
return singleton.get();
}
// --------------------------------------------------------
// ImmediateMemory
// --------------------------------------------------------
ImmediateMemory::ImmediateMemory(size_t length)
: _physicalPages{*kernelAlloc} {
auto numPages = (length + kPageSize - 1) >> kPageShift;
_physicalPages.resize(numPages);
for(size_t i = 0; i < numPages; ++i) {
auto physical = physicalAllocator->allocate(kPageSize, 64);
assert(physical != PhysicalAddr(-1) && "OOM when allocating ImmediateMemory");
PageAccessor accessor{physical};
memset(accessor.get(), 0, kPageSize);
_physicalPages[i] = physical;
}
}
ImmediateMemory::~ImmediateMemory() {
for(size_t i = 0; i < _physicalPages.size(); ++i)
physicalAllocator->free(_physicalPages[i], kPageSize);
}
void ImmediateMemory::resize(size_t newSize, async::any_receiver<void> receiver) {
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
size_t currentNumPages = _physicalPages.size();
size_t newNumPages = (newSize + kPageSize - 1) >> kPageShift;
assert(newNumPages >= currentNumPages);
_physicalPages.resize(newNumPages);
for(size_t i = currentNumPages; i < newNumPages; ++i) {
auto physical = physicalAllocator->allocate(kPageSize, 64);
assert(physical != PhysicalAddr(-1) && "OOM when allocating ImmediateMemory");
PageAccessor accessor{physical};
memset(accessor.get(), 0, kPageSize);
_physicalPages[i] = physical;
}
}
receiver.set_value();
}
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
ImmediateMemory::resolveGlobalFutex(uintptr_t offset) {
smarter::shared_ptr<GlobalFutexSpace> futexSpace{selfPtr.lock()};
return frg::make_tuple(std::move(futexSpace), offset);
}
Error ImmediateMemory::lockRange(uintptr_t, size_t) {
return Error::success;
}
void ImmediateMemory::unlockRange(uintptr_t, size_t) {
// Do nothing.
}
frg::tuple<PhysicalAddr, CachingMode> ImmediateMemory::peekRange(uintptr_t offset) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
auto index = offset >> kPageShift;
if(index >= _physicalPages.size())
return {PhysicalAddr(-1), CachingMode::null};
return {_physicalPages[index], CachingMode::null};
}
coroutine<frg::expected<Error, PhysicalRange>>
ImmediateMemory::fetchRange(uintptr_t offset, FetchFlags, smarter::shared_ptr<WorkQueue>) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
auto index = offset >> kPageShift;
auto disp = offset & (kPageSize - 1);
if(index >= _physicalPages.size())
co_return Error::fault;
co_return PhysicalRange{_physicalPages[index] + disp, kPageSize - disp, CachingMode::null};
}
void ImmediateMemory::markDirty(uintptr_t, size_t) {
// Do nothing for now.
}
size_t ImmediateMemory::getLength() {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
return _physicalPages.size() * kPageSize;
}
coroutine<frg::expected<Error, PhysicalAddr>> ImmediateMemory::takeGlobalFutex(uintptr_t offset,
smarter::shared_ptr<WorkQueue>) {
auto index = offset >> kPageShift;
if(index >= _physicalPages.size())
co_return Error::fault;
co_return _physicalPages[index];
}
void ImmediateMemory::retireGlobalFutex(uintptr_t) {
// Do nothing.
}
// --------------------------------------------------------
// HardwareMemory
// --------------------------------------------------------
HardwareMemory::HardwareMemory(PhysicalAddr base, size_t length, CachingMode cache_mode)
: _base{base}, _length{length}, _cacheMode{cache_mode} {
assert(!(base % kPageSize));
assert(!(length % kPageSize));
}
HardwareMemory::~HardwareMemory() {
// For now we do nothing when deallocating hardware memory.
}
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
HardwareMemory::resolveGlobalFutex(uintptr_t) {
return Error::illegalObject;
}
Error HardwareMemory::lockRange(uintptr_t, size_t) {
// Hardware memory is "always locked".
return Error::success;
}
void HardwareMemory::unlockRange(uintptr_t, size_t) {
// Hardware memory is "always locked".
}
frg::tuple<PhysicalAddr, CachingMode> HardwareMemory::peekRange(uintptr_t offset) {
assert(offset % kPageSize == 0);
return frg::tuple<PhysicalAddr, CachingMode>{_base + offset, _cacheMode};
}
coroutine<frg::expected<Error, PhysicalRange>>
HardwareMemory::fetchRange(uintptr_t offset, FetchFlags, smarter::shared_ptr<WorkQueue>) {
assert(offset % kPageSize == 0);
co_return PhysicalRange{_base + offset, _length - offset, _cacheMode};
}
void HardwareMemory::markDirty(uintptr_t, size_t) {
// We never evict memory, there is no need to track dirty pages.
}
size_t HardwareMemory::getLength() {
return _length;
}
// --------------------------------------------------------
// AllocatedMemory
// --------------------------------------------------------
AllocatedMemory::AllocatedMemory(size_t desiredLngth,
int addressBits, size_t desiredChunkSize, size_t chunkAlign)
: _physicalChunks{*kernelAlloc},
_addressBits{addressBits}, _chunkAlign{chunkAlign} {
static_assert(sizeof(unsigned long) == sizeof(uint64_t), "Fix use of __builtin_clzl");
_chunkSize = size_t(1) << (64 - __builtin_clzl(desiredChunkSize - 1));
if(_chunkSize != desiredChunkSize)
infoLogger() << "\e[31mPhysical allocation of size " << (void *)desiredChunkSize
<< " rounded up to power of 2\e[39m" << frg::endlog;
size_t length = (desiredLngth + (_chunkSize - 1)) & ~(_chunkSize - 1);
if(length != desiredLngth)
infoLogger() << "\e[31mMemory length " << (void *)desiredLngth
<< " rounded up to chunk size " << (void *)_chunkSize
<< "\e[39m" << frg::endlog;
assert(_chunkSize % kPageSize == 0);
assert(_chunkAlign % kPageSize == 0);
assert(_chunkSize % _chunkAlign == 0);
_physicalChunks.resize(length / _chunkSize, PhysicalAddr(-1));
}
AllocatedMemory::~AllocatedMemory() {
// TODO: This destructor takes a lock. This is potentially unexpected.
// Rework this to only schedule the deallocation but not actually perform it?
if(logUsage)
infoLogger() << "thor: Releasing AllocatedMemory ("
<< (physicalAllocator->numUsedPages() * 4) << " KiB in use)" << frg::endlog;
for(size_t i = 0; i < _physicalChunks.size(); ++i) {
if(_physicalChunks[i] != PhysicalAddr(-1))
physicalAllocator->free(_physicalChunks[i], _chunkSize);
}
if(logUsage)
infoLogger() << "thor: ("
<< (physicalAllocator->numUsedPages() * 4) << " KiB in use)" << frg::endlog;
}
void AllocatedMemory::resize(size_t newSize, async::any_receiver<void> receiver) {
{
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
assert(!(newSize % _chunkSize));
size_t num_chunks = newSize / _chunkSize;
assert(num_chunks >= _physicalChunks.size());
_physicalChunks.resize(num_chunks, PhysicalAddr(-1));
}
receiver.set_value();
}
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
AllocatedMemory::resolveGlobalFutex(uintptr_t offset) {
smarter::shared_ptr<GlobalFutexSpace> futexSpace{selfPtr.lock()};
return frg::make_tuple(std::move(futexSpace), offset);
}
Error AllocatedMemory::lockRange(uintptr_t, size_t) {
// For now, we do not evict "anonymous" memory. TODO: Implement eviction here.
return Error::success;
}
void AllocatedMemory::unlockRange(uintptr_t, size_t) {
// For now, we do not evict "anonymous" memory. TODO: Implement eviction here.
}
frg::tuple<PhysicalAddr, CachingMode> AllocatedMemory::peekRange(uintptr_t offset) {
assert(offset % kPageSize == 0);
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
auto index = offset / _chunkSize;
auto disp = offset & (_chunkSize - 1);
assert(index < _physicalChunks.size());
if(_physicalChunks[index] == PhysicalAddr(-1))
return frg::tuple<PhysicalAddr, CachingMode>{PhysicalAddr(-1), CachingMode::null};
return frg::tuple<PhysicalAddr, CachingMode>{_physicalChunks[index] + disp,
CachingMode::null};
}
coroutine<frg::expected<Error, PhysicalRange>>
AllocatedMemory::fetchRange(uintptr_t offset, FetchFlags, smarter::shared_ptr<WorkQueue>) {
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
auto index = offset / _chunkSize;
auto disp = offset & (_chunkSize - 1);
assert(index < _physicalChunks.size());
if(_physicalChunks[index] == PhysicalAddr(-1)) {
auto physical = physicalAllocator->allocate(_chunkSize, _addressBits);
assert(physical != PhysicalAddr(-1) && "OOM");
assert(!(physical & (_chunkAlign - 1)));
for(size_t pg_progress = 0; pg_progress < _chunkSize; pg_progress += kPageSize) {
PageAccessor accessor{physical + pg_progress};
memset(accessor.get(), 0, kPageSize);
}
_physicalChunks[index] = physical;
}
assert(_physicalChunks[index] != PhysicalAddr(-1));
co_return PhysicalRange{_physicalChunks[index] + disp, _chunkSize - disp, CachingMode::null};
}
void AllocatedMemory::markDirty(uintptr_t, size_t) {
// Do nothing for now.
}
size_t AllocatedMemory::getLength() {
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
return _physicalChunks.size() * _chunkSize;
}
coroutine<frg::expected<Error, PhysicalAddr>> AllocatedMemory::takeGlobalFutex(uintptr_t offset,
smarter::shared_ptr<WorkQueue> wq) {
// TODO: This could be optimized further (by avoiding the coroutine call).
auto range = FRG_CO_TRY(co_await fetchRange(offset & ~(kPageSize - 1), 0, wq));
assert(range.get<0>() != PhysicalAddr(-1));
co_return range.get<0>();
}
void AllocatedMemory::retireGlobalFutex(uintptr_t) {
}
// --------------------------------------------------------
// ManagedSpace
// --------------------------------------------------------
ManagedSpace::ManagedSpace(size_t length, bool readahead)
: pages{*kernelAlloc}, numPages{length >> kPageShift}, readahead{readahead} {
assert(!(length & (kPageSize - 1)));
[] (ManagedSpace *self, enable_detached_coroutine = {}) -> void {
while(true) {
// TODO: Cancel awaitReclaim() when the ManagedSpace is destructed.
co_await globalReclaimer->awaitReclaim(self);
CachePage *page;
ManagedPage *pit;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&self->mutex);
page = globalReclaimer->reclaimPage(self);
if(!page)
continue;
size_t index = page->identity;
pit = self->pages.find(index);
assert(pit);
assert(pit->loadState == kStatePresent);
assert(!pit->lockCount);
pit->loadState = kStateEvicting;
globalReclaimer->removePage(&pit->cachePage);
}
co_await self->_evictQueue.evictRange(page->identity << kPageShift, kPageSize);
PhysicalAddr physical;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&self->mutex);
if(pit->loadState != kStateEvicting)
continue;
assert(!pit->lockCount);
assert(pit->physical != PhysicalAddr(-1));
physical = pit->physical;
pit->loadState = kStateMissing;
pit->physical = PhysicalAddr(-1);
}
if(logUncaching)
infoLogger() << "\e[33mEvicting physical page\e[39m" << frg::endlog;
physicalAllocator->free(physical, kPageSize);
}
}(this);
}
ManagedSpace::~ManagedSpace() {
// TODO: Free all physical memory.
// TODO: We also have to remove all Loaded/Evicting pages from the reclaimer.
assert(!"Implement this");
}
// Note: Neither offset nor size are necessarily multiples of the page size.
Error ManagedSpace::lockPages(uintptr_t offset, size_t size) {
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex);
if((offset + size) / kPageSize > numPages)
return Error::bufferTooSmall;
for(size_t pg = 0; pg < size; pg += kPageSize) {
size_t index = (offset + pg) / kPageSize;
auto [pit, wasInserted] = pages.find_or_insert(index, this, index);
assert(pit);
pit->lockCount++;
if(pit->lockCount == 1) {
if(pit->loadState == kStatePresent) {
globalReclaimer->removePage(&pit->cachePage);
}else if(pit->loadState == kStateEvicting) {
// Stop the eviction to keep the page present.
pit->loadState = kStatePresent;
}
}
assert(pit->loadState != kStateEvicting);
}
return Error::success;
}
// Note: Neither offset nor size are necessarily multiples of the page size.
void ManagedSpace::unlockPages(uintptr_t offset, size_t size) {
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex);
assert((offset + size) / kPageSize <= numPages);
for(size_t pg = 0; pg < size; pg += kPageSize) {
size_t index = (offset + pg) / kPageSize;
auto pit = pages.find(index);
assert(pit);
assert(pit->lockCount > 0);
pit->lockCount--;
if(!pit->lockCount) {
if(pit->loadState == kStatePresent) {
globalReclaimer->addPage(&pit->cachePage);
}
}
assert(pit->loadState != kStateEvicting);
}
}
void ManagedSpace::submitManagement(ManageNode *node) {
ManageList pending;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex);
_managementQueue.push_back(node);
_progressManagement(pending);
}
while(!pending.empty()) {
auto node = pending.pop_front();
node->complete();
}
}
void ManagedSpace::submitMonitor(MonitorNode *node) {
node->progress = 0;
MonitorList pending;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex);
assert(node->offset % kPageSize == 0);
assert(node->length % kPageSize == 0);
assert((node->offset + node->length) / kPageSize <= numPages);
_monitorQueue.push_back(node);
_progressMonitors(pending);
}
while(!pending.empty()) {
auto node = pending.pop_front();
node->event.raise();
}
}
void ManagedSpace::_progressManagement(ManageList &pending) {
// For now, we prefer writeback to initialization.
// "Proper" priorization should probably be done in the userspace driver
// (we do not want to store per-page priorities here).
while(!_writebackList.empty() && !_managementQueue.empty()) {
auto page = _writebackList.front();
auto index = page->identity;
// Fuse the request with adjacent pages in the list.
ptrdiff_t count = 0;
while(!_writebackList.empty()) {
auto fuse_cache_page = _writebackList.front();
auto fuse_index = fuse_cache_page->identity;
auto fuse_managed_page = frg::container_of(fuse_cache_page, &ManagedPage::cachePage);
if(fuse_index != index + count)
break;
assert(fuse_managed_page->loadState == kStateWantWriteback);
fuse_managed_page->loadState = kStateWriteback;
count++;
_writebackList.pop_front();
}
assert(count);
auto node = _managementQueue.pop_front();
node->setup(Error::success, ManageRequest::writeback,
index << kPageShift, count << kPageShift);
pending.push_back(node);
}
while(!_initializationList.empty() && !_managementQueue.empty()) {
auto page = _initializationList.front();
auto index = page->identity;
// Fuse the request with adjacent pages in the list.
ptrdiff_t count = 0;
while(!_initializationList.empty()) {
auto fuse_cache_page = _initializationList.front();
auto fuse_index = fuse_cache_page->identity;
auto fuse_managed_page = frg::container_of(fuse_cache_page, &ManagedPage::cachePage);
if(fuse_index != index + count)
break;
assert(fuse_managed_page->loadState == kStateWantInitialization);
fuse_managed_page->loadState = kStateInitialization;
count++;
_initializationList.pop_front();
}
assert(count);
auto node = _managementQueue.pop_front();
node->setup(Error::success, ManageRequest::initialize,
index << kPageShift, count << kPageShift);
pending.push_back(node);
}
}
void ManagedSpace::_progressMonitors(MonitorList &pending) {
// TODO: Accelerate this by storing the monitors in a RB tree ordered by their progress.
auto progressNode = [&] (MonitorNode *node) -> bool {
while(node->progress < node->length) {
size_t index = (node->offset + node->progress) >> kPageShift;
auto pit = pages.find(index);
assert(pit);
if(pit->loadState == kStateMissing
|| pit->loadState == kStateWantInitialization
|| pit->loadState == kStateInitialization)
return false;
assert(pit->loadState == kStatePresent
|| pit->loadState == kStateWantWriteback
|| pit->loadState == kStateWriteback
|| pit->loadState == kStateAnotherWriteback
|| pit->loadState == kStateEvicting);
node->progress += kPageSize;
}
return true;
};
for(auto it = _monitorQueue.begin(); it != _monitorQueue.end(); ) {
auto it_copy = it;
auto node = *it++;
assert(node->type == ManageRequest::initialize);
if(progressNode(node)) {
_monitorQueue.erase(it_copy);
node->setup(Error::success);
pending.push_back(node);
}
}
}
// --------------------------------------------------------
// BackingMemory
// --------------------------------------------------------
void BackingMemory::resize(size_t newSize, async::any_receiver<void> receiver) {
assert(!(newSize & (kPageSize - 1)));
auto newPages = newSize >> kPageShift;
async::detach_with_allocator(*kernelAlloc, [] (BackingMemory *self, size_t newPages,
async::any_receiver<void> receiver) -> coroutine<void> {
size_t oldPages;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&self->_managed->mutex);
oldPages = self->_managed->numPages;
self->_managed->numPages = newPages;
}
if(newPages > self->_managed->numPages) {
// Do nothing for now.
}else if(newPages < self->_managed->numPages) {
// TODO: also free the affected pages!
co_await self->_managed->_evictQueue.evictRange(newPages << kPageShift,
oldPages << kPageShift);
}
receiver.set_value();
}(this, newPages, std::move(receiver)));
}
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
BackingMemory::resolveGlobalFutex(uintptr_t) {
return Error::illegalObject;
}
Error BackingMemory::lockRange(uintptr_t offset, size_t size) {
return _managed->lockPages(offset, size);
}
void BackingMemory::unlockRange(uintptr_t offset, size_t size) {
_managed->unlockPages(offset, size);
}
frg::tuple<PhysicalAddr, CachingMode> BackingMemory::peekRange(uintptr_t offset) {
assert(!(offset % kPageSize));
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_managed->mutex);
auto index = offset / kPageSize;
assert(index < _managed->numPages);
auto pit = _managed->pages.find(index);
if(!pit)
return frg::tuple<PhysicalAddr, CachingMode>{PhysicalAddr(-1), CachingMode::null};
return frg::tuple<PhysicalAddr, CachingMode>{pit->physical, CachingMode::null};
}
coroutine<frg::expected<Error, PhysicalRange>>
BackingMemory::fetchRange(uintptr_t offset, FetchFlags, smarter::shared_ptr<WorkQueue>) {
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_managed->mutex);
auto index = offset >> kPageShift;
auto misalign = offset & (kPageSize - 1);
assert(index < _managed->numPages);
auto [pit, wasInserted] = _managed->pages.find_or_insert(index, _managed.get(), index);
assert(pit);
if(pit->physical == PhysicalAddr(-1)) {
PhysicalAddr physical = physicalAllocator->allocate(kPageSize);
assert(physical != PhysicalAddr(-1) && "OOM");
PageAccessor accessor{physical};
memset(accessor.get(), 0, kPageSize);
pit->physical = physical;
}
co_return PhysicalRange{pit->physical + misalign, kPageSize - misalign, CachingMode::null};
}
void BackingMemory::markDirty(uintptr_t, size_t) {
// Writes through the BackingMemory do not affect the dirty state!
}
size_t BackingMemory::getLength() {
// Size is constant so we do not need to lock.
return _managed->numPages << kPageShift;
}
void BackingMemory::submitManage(ManageNode *node) {
_managed->submitManagement(node);
}
Error BackingMemory::updateRange(ManageRequest type, size_t offset, size_t length) {
assert((offset % kPageSize) == 0);
assert((length % kPageSize) == 0);
MonitorList pending;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_managed->mutex);
assert((offset + length) / kPageSize <= _managed->numPages);
/* assert(length == kPageSize);
auto inspect = (unsigned char *)physicalToVirtual(_managed->physicalPages[offset / kPageSize]);
auto log = infoLogger() << "dump";
for(size_t b = 0; b < kPageSize; b += 16) {
log << frg::hex_fmt(offset + b) << " ";
for(size_t i = 0; i < 16; i++)
log << " " << frg::hex_fmt(inspect[b + i]);
log << "\n";
}
log << frg::endlog;*/
if(type == ManageRequest::initialize) {
for(size_t pg = 0; pg < length; pg += kPageSize) {
size_t index = (offset + pg) / kPageSize;
auto pit = _managed->pages.find(index);
assert(pit);
assert(pit->loadState == ManagedSpace::kStateInitialization);
pit->loadState = ManagedSpace::kStatePresent;
if(!pit->lockCount)
globalReclaimer->addPage(&pit->cachePage);
}
}else{
for(size_t pg = 0; pg < length; pg += kPageSize) {
size_t index = (offset + pg) / kPageSize;
auto pit = _managed->pages.find(index);
assert(pit);
if(pit->loadState == ManagedSpace::kStateWriteback) {
pit->loadState = ManagedSpace::kStatePresent;
if(!pit->lockCount)
globalReclaimer->addPage(&pit->cachePage);
}else{
assert(pit->loadState == ManagedSpace::kStateAnotherWriteback);
pit->loadState = ManagedSpace::kStateWantWriteback;
_managed->_writebackList.push_back(&pit->cachePage);
}
}
}
_managed->_progressMonitors(pending);
}
while(!pending.empty()) {
auto node = pending.pop_front();
node->event.raise();
}
return Error::success;
}
// --------------------------------------------------------
// FrontalMemory
// --------------------------------------------------------
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
FrontalMemory::resolveGlobalFutex(uintptr_t offset) {
smarter::shared_ptr<GlobalFutexSpace> futexSpace{selfPtr.lock()};
return frg::make_tuple(std::move(futexSpace), offset);
}
Error FrontalMemory::lockRange(uintptr_t offset, size_t size) {
return _managed->lockPages(offset, size);
}
void FrontalMemory::unlockRange(uintptr_t offset, size_t size) {
_managed->unlockPages(offset, size);
}
frg::tuple<PhysicalAddr, CachingMode> FrontalMemory::peekRange(uintptr_t offset) {
assert(!(offset % kPageSize));
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_managed->mutex);
auto index = offset / kPageSize;
assert(index < _managed->numPages);
auto pit = _managed->pages.find(index);
if(!pit)
return frg::tuple<PhysicalAddr, CachingMode>{PhysicalAddr(-1), CachingMode::null};
if(pit->loadState == ManagedSpace::kStatePresent
|| pit->loadState == ManagedSpace::kStateWantWriteback
|| pit->loadState == ManagedSpace::kStateWriteback
|| pit->loadState == ManagedSpace::kStateAnotherWriteback
|| pit->loadState == ManagedSpace::kStateEvicting) {
auto physical = pit->physical;
assert(physical != PhysicalAddr(-1));
if(pit->loadState == ManagedSpace::kStateEvicting) {
// Cancel evication -- the page is still needed.
pit->loadState = ManagedSpace::kStatePresent;
globalReclaimer->addPage(&pit->cachePage);
}
return frg::tuple<PhysicalAddr, CachingMode>{physical, CachingMode::null};
}else{
assert(pit->loadState == ManagedSpace::kStateMissing
|| pit->loadState == ManagedSpace::kStateWantInitialization
|| pit->loadState == ManagedSpace::kStateInitialization);
return frg::tuple<PhysicalAddr, CachingMode>{PhysicalAddr(-1), CachingMode::null};
}
}
coroutine<frg::expected<Error, PhysicalRange>>
FrontalMemory::fetchRange(uintptr_t offset, FetchFlags flags, smarter::shared_ptr<WorkQueue>) {
auto index = offset >> kPageShift;
auto misalign = offset & (kPageSize - 1);
ManageList pendingManagement;
MonitorList pendingMonitors;
MonitorNode fetchMonitor;
{
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_managed->mutex);
assert(index < _managed->numPages);
// Try the fast-paths first.
auto [pit, wasInserted] = _managed->pages.find_or_insert(index, _managed.get(), index);
assert(pit);
if(pit->loadState == ManagedSpace::kStatePresent
|| pit->loadState == ManagedSpace::kStateWantWriteback
|| pit->loadState == ManagedSpace::kStateWriteback
|| pit->loadState == ManagedSpace::kStateAnotherWriteback
|| pit->loadState == ManagedSpace::kStateEvicting) {
auto physical = pit->physical;
assert(physical != PhysicalAddr(-1));
if(pit->loadState == ManagedSpace::kStatePresent) {
if(!pit->lockCount)
globalReclaimer->bumpPage(&pit->cachePage);
}else if(pit->loadState == ManagedSpace::kStateEvicting) {
// Cancel evication -- the page is still needed.
pit->loadState = ManagedSpace::kStatePresent;
globalReclaimer->addPage(&pit->cachePage);
}
co_return PhysicalRange{physical + misalign, kPageSize - misalign, CachingMode::null};
}else{
assert(pit->loadState == ManagedSpace::kStateMissing
|| pit->loadState == ManagedSpace::kStateWantInitialization
|| pit->loadState == ManagedSpace::kStateInitialization);
}
if(flags & fetchDisallowBacking) {
infoLogger() << "\e[31m" "thor: Backing of page is disallowed" "\e[39m"
<< frg::endlog;
co_return Error::fault;
}
// We have to take the slow-path, i.e., perform the fetch asynchronously.
if(pit->loadState == ManagedSpace::kStateMissing) {
pit->loadState = ManagedSpace::kStateWantInitialization;
_managed->_initializationList.push_back(&pit->cachePage);
}
// Perform readahead.
if(_managed->readahead)
for(size_t i = 1; i < 4; ++i) {
if(!(index + i < _managed->numPages))
break;
auto [pit, wasInserted] = _managed->pages.find_or_insert(
index + i, _managed.get(), index + i);
assert(pit);
if(pit->loadState == ManagedSpace::kStateMissing) {
pit->loadState = ManagedSpace::kStateWantInitialization;
_managed->_initializationList.push_back(&pit->cachePage);
}
}
_managed->_progressManagement(pendingManagement);
fetchMonitor.setup(ManageRequest::initialize, offset, kPageSize);
fetchMonitor.progress = 0;
_managed->_monitorQueue.push_back(&fetchMonitor);
_managed->_progressMonitors(pendingMonitors);
}
while(!pendingManagement.empty()) {
auto node = pendingManagement.pop_front();
node->complete();
}
while(!pendingMonitors.empty()) {
auto node = pendingMonitors.pop_front();
node->event.raise();
}
co_await fetchMonitor.event.wait();
assert(fetchMonitor.error() == Error::success);
PhysicalAddr physical;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_managed->mutex);
auto pit = _managed->pages.find(index);
assert(pit);
assert(pit->loadState == ManagedSpace::kStatePresent);
physical = pit->physical;
assert(physical != PhysicalAddr(-1));
}
co_return PhysicalRange{physical + misalign, kPageSize - misalign, CachingMode::null};
}
void FrontalMemory::markDirty(uintptr_t offset, size_t size) {
assert(!(offset % kPageSize));
assert(!(size % kPageSize));
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_managed->mutex);
// Put the pages into the dirty state.
for(size_t pg = 0; pg < size; pg += kPageSize) {
auto index = (offset + pg) >> kPageShift;
auto pit = _managed->pages.find(index);
assert(pit);
if(pit->loadState == ManagedSpace::kStatePresent) {
pit->loadState = ManagedSpace::kStateWantWriteback;
if(!pit->lockCount)
globalReclaimer->removePage(&pit->cachePage);
_managed->_writebackList.push_back(&pit->cachePage);
}else if(pit->loadState == ManagedSpace::kStateEvicting) {
pit->loadState = ManagedSpace::kStateWantWriteback;
assert(!pit->lockCount);
_managed->_writebackList.push_back(&pit->cachePage);
}else if(pit->loadState == ManagedSpace::kStateWriteback) {
pit->loadState = ManagedSpace::kStateAnotherWriteback;
}else{
assert(pit->loadState == ManagedSpace::kStateWantWriteback
|| pit->loadState == ManagedSpace::kStateAnotherWriteback);
}
}
}
// We cannot call management callbacks with locks held, but markDirty() may be called
// with external locks held; do it on a WorkQueue.
_managed->_deferredManagement.invoke();
}
size_t FrontalMemory::getLength() {
// Size is constant so we do not need to lock.
return _managed->numPages << kPageShift;
}
coroutine<frg::expected<Error, PhysicalAddr>> FrontalMemory::takeGlobalFutex(uintptr_t offset,
smarter::shared_ptr<WorkQueue> wq) {
// For now, we pick the trival implementation here.
auto lockError = co_await MemoryView::asyncLockRange(offset & ~(kPageSize - 1), kPageSize, wq);
if(lockError != Error::success)
co_return Error::fault;
auto range = FRG_CO_TRY(co_await fetchRange(offset & ~(kPageSize - 1), 0, wq));
assert(range.get<0>() != PhysicalAddr(-1));
co_return range.get<0>();
}
void FrontalMemory::retireGlobalFutex(uintptr_t offset) {
unlockRange(offset & ~(kPageSize - 1), kPageSize);
}
// --------------------------------------------------------
// IndirectMemory
// --------------------------------------------------------
IndirectMemory::IndirectMemory(size_t numSlots)
: indirections_{*kernelAlloc} {
indirections_.resize(numSlots);
}
IndirectMemory::~IndirectMemory() {
// For now we do nothing when deallocating hardware memory.
}
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
IndirectMemory::resolveGlobalFutex(uintptr_t offset) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex_);
auto slot = offset >> 32;
auto inSlotOffset = offset & ((uintptr_t(1) << 32) - 1);
if(slot >= indirections_.size())
return Error::fault;
if(!indirections_[slot])
return Error::fault;
return indirections_[slot]->memory->resolveGlobalFutex(inSlotOffset);
}
Error IndirectMemory::lockRange(uintptr_t offset, size_t size) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex_);
auto slot = offset >> 32;
auto inSlotOffset = offset & ((uintptr_t(1) << 32) - 1);
if(slot >= indirections_.size())
return Error::fault;
if(!indirections_[slot])
return Error::fault;
if(inSlotOffset + size > indirections_[slot]->size)
return Error::fault;
return indirections_[slot]->memory->lockRange(indirections_[slot]->offset
+ inSlotOffset, size);
}
void IndirectMemory::unlockRange(uintptr_t offset, size_t size) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex_);
auto slot = offset >> 32;
auto inSlotOffset = offset & ((uintptr_t(1) << 32) - 1);
assert(slot < indirections_.size()); // TODO: Return Error::fault.
assert(indirections_[slot]); // TODO: Return Error::fault.
assert(inSlotOffset + size <= indirections_[slot]->size); // TODO: Return Error::fault.
return indirections_[slot]->memory->unlockRange(indirections_[slot]->offset
+ inSlotOffset, size);
}
frg::tuple<PhysicalAddr, CachingMode> IndirectMemory::peekRange(uintptr_t offset) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex_);
auto slot = offset >> 32;
auto inSlotOffset = offset & ((uintptr_t(1) << 32) - 1);
assert(slot < indirections_.size()); // TODO: Return Error::fault.
assert(indirections_[slot]); // TODO: Return Error::fault.
return indirections_[slot]->memory->peekRange(indirections_[slot]->offset
+ inSlotOffset);
}
coroutine<frg::expected<Error, PhysicalRange>>
IndirectMemory::fetchRange(uintptr_t offset, FetchFlags flags, smarter::shared_ptr<WorkQueue> wq) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex_);
auto slot = offset >> 32;
auto inSlotOffset = offset & ((uintptr_t(1) << 32) - 1);
assert(slot < indirections_.size()); // TODO: Return Error::fault.
assert(indirections_[slot]); // TODO: Return Error::fault.
return indirections_[slot]->memory->fetchRange(indirections_[slot]->offset
+ inSlotOffset, flags, std::move(wq));
}
void IndirectMemory::markDirty(uintptr_t offset, size_t size) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex_);
auto slot = offset >> 32;
auto inSlotOffset = offset & ((uintptr_t(1) << 32) - 1);
assert(slot < indirections_.size()); // TODO: Return Error::fault.
assert(indirections_[slot]); // TODO: Return Error::fault.
assert(inSlotOffset + size <= indirections_[slot]->size); // TODO: Return Error::fault.
indirections_[slot]->memory->markDirty(indirections_[slot]->offset
+ inSlotOffset, size);
}
size_t IndirectMemory::getLength() {
return indirections_.size() << 32;
}
Error IndirectMemory::setIndirection(size_t slot, smarter::shared_ptr<MemoryView> memory,
uintptr_t offset, size_t size) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&mutex_);
if(slot >= indirections_.size())
return Error::outOfBounds;
auto indirection = smarter::allocate_shared<IndirectionSlot>(*kernelAlloc,
this, slot, memory, offset, size);
// TODO: start a coroutine to observe evictions.
memory->addObserver(&indirection->observer);
indirections_[slot] = std::move(indirection);
return Error::success;
}
// --------------------------------------------------------
// CopyOnWriteMemory
// --------------------------------------------------------
CopyOnWriteMemory::CopyOnWriteMemory(smarter::shared_ptr<MemoryView> view,
uintptr_t offset, size_t length,
smarter::shared_ptr<CowChain> chain)
: MemoryView{&_evictQueue}, _view{std::move(view)},
_viewOffset{offset}, _length{length}, _copyChain{std::move(chain)},
_ownedPages{*kernelAlloc} {
assert(length);
assert(!(offset & (kPageSize - 1)));
assert(!(length & (kPageSize - 1)));
}
CopyOnWriteMemory::~CopyOnWriteMemory() {
for(auto it = _ownedPages.begin(); it != _ownedPages.end(); ++it) {
assert(it->state == CowState::hasCopy);
assert(it->physical != PhysicalAddr(-1));
physicalAllocator->free(it->physical, kPageSize);
}
}
size_t CopyOnWriteMemory::getLength() {
return _length;
}
frg::expected<Error, frg::tuple<smarter::shared_ptr<GlobalFutexSpace>, uintptr_t>>
CopyOnWriteMemory::resolveGlobalFutex(uintptr_t offset) {
smarter::shared_ptr<GlobalFutexSpace> futexSpace{selfPtr.lock()};
return frg::make_tuple(std::move(futexSpace), offset);
}
void CopyOnWriteMemory::fork(async::any_receiver<frg::tuple<Error, smarter::shared_ptr<MemoryView>>> receiver) {
// Note that locked pages require special attention during CoW: as we cannot
// replace them by copies, we have to copy them eagerly.
// Therefore, they are special-cased below.
smarter::shared_ptr<CopyOnWriteMemory> forked;
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
// Create a new CowChain for both the original and the forked mapping.
// To correct handle locks pages, we move only non-locked pages from
// the original mapping to the new chain.
auto newChain = smarter::allocate_shared<CowChain>(*kernelAlloc, _copyChain);
// Update the original mapping
_copyChain = newChain;
// Create a new mapping in the forked space.
forked = smarter::allocate_shared<CopyOnWriteMemory>(*kernelAlloc,
_view, _viewOffset, _length, newChain);
forked->selfPtr = forked;
// Finally, inspect all copied pages owned by the original mapping.
for(size_t pg = 0; pg < _length; pg += kPageSize) {
auto osIt = _ownedPages.find(pg >> kPageShift);
if(!osIt)
continue;
assert(osIt->state == CowState::hasCopy);
// The page is locked. We *need* to keep it in the old address space.
if(osIt->lockCount /*|| disableCow */) {
// Allocate a new physical page for a copy.
auto copyPhysical = physicalAllocator->allocate(kPageSize);
assert(copyPhysical != PhysicalAddr(-1) && "OOM");
// As the page is locked anyway, we can just copy it synchronously.
PageAccessor lockedAccessor{osIt->physical};
PageAccessor copyAccessor{copyPhysical};
memcpy(copyAccessor.get(), lockedAccessor.get(), kPageSize);
// Update the chains.
auto fsIt = forked->_ownedPages.insert(pg >> kPageShift);
fsIt->state = CowState::hasCopy;
fsIt->physical = copyPhysical;
}else{
auto physical = osIt->physical;
assert(physical != PhysicalAddr(-1));
// Update the chains.
auto pageOffset = _viewOffset + pg;
auto newIt = newChain->_pages.insert(pageOffset >> kPageShift,
PhysicalAddr(-1));
_ownedPages.erase(pg >> kPageShift);
newIt->store(physical, std::memory_order_relaxed);
}
}
}
async::detach_with_allocator(*kernelAlloc,
[] (CopyOnWriteMemory *self, smarter::shared_ptr<CopyOnWriteMemory> forked,
async::any_receiver<frg::tuple<Error, smarter::shared_ptr<MemoryView>>> receiver)
-> coroutine<void> {
co_await self->_evictQueue.evictRange(0, self->_length);
receiver.set_value({Error::success, std::move(forked)});
}(this, std::move(forked), receiver));
}
Error CopyOnWriteMemory::lockRange(uintptr_t, size_t) {
panicLogger() << "CopyOnWriteMemory does not support synchronous lockRange()"
<< frg::endlog;
__builtin_unreachable();
}
bool CopyOnWriteMemory::asyncLockRange(uintptr_t offset, size_t size,
smarter::shared_ptr<WorkQueue> wq, LockRangeNode *node) {
// For now, it is enough to populate the range, as pages can only be evicted from
// the root of the CoW chain, but copies are never evicted.
async::detach_with_allocator(*kernelAlloc, [] (CopyOnWriteMemory *self, uintptr_t overallOffset, size_t size,
smarter::shared_ptr<WorkQueue> wq, LockRangeNode *node) -> coroutine<void> {
size_t progress = 0;
while(progress < size) {
auto offset = overallOffset + progress;
smarter::shared_ptr<CowChain> chain;
smarter::shared_ptr<MemoryView> view;
uintptr_t viewOffset;
CowPage *cowIt;
bool waitForCopy = false;
{
// If the page is present in our private chain, we just return it.
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&self->_mutex);
cowIt = self->_ownedPages.find(offset >> kPageShift);
if(cowIt) {
if(cowIt->state == CowState::hasCopy) {
assert(cowIt->physical != PhysicalAddr(-1));
cowIt->lockCount++;
progress += kPageSize;
continue;
}else{
assert(cowIt->state == CowState::inProgress);
waitForCopy = true;
}
}else{
chain = self->_copyChain;
view = self->_view;
viewOffset = self->_viewOffset;
// Otherwise we need to copy from the chain or from the root view.
cowIt = self->_ownedPages.insert(offset >> kPageShift);
cowIt->state = CowState::inProgress;
}
}
if(waitForCopy) {
bool stillWaiting;
do {
stillWaiting = co_await self->_copyEvent.async_wait_if([&] () -> bool {
// TODO: this could be faster if cowIt->state was atomic.
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&self->_mutex);
if(cowIt->state == CowState::inProgress)
return true;
assert(cowIt->state == CowState::hasCopy);
return false;
});
co_await wq->schedule();
} while(stillWaiting);
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&self->_mutex);
assert(cowIt->state == CowState::hasCopy);
cowIt->lockCount++;
}
progress += kPageSize;
continue;
}
PhysicalAddr physical = physicalAllocator->allocate(kPageSize);
assert(physical != PhysicalAddr(-1) && "OOM");
PageAccessor accessor{physical};
// Try to copy from a descendant CoW chain.
auto pageOffset = viewOffset + offset;
while(chain) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&chain->_mutex);
if(auto it = chain->_pages.find(pageOffset >> kPageShift); it) {
// We can just copy synchronously here -- the descendant is not evicted.
auto srcPhysical = it->load(std::memory_order_relaxed);
assert(srcPhysical != PhysicalAddr(-1));
auto srcAccessor = PageAccessor{srcPhysical};
memcpy(accessor.get(), srcAccessor.get(), kPageSize);
break;
}
chain = chain->_superChain;
}
// Copy from the root view.
if(!chain) {
// TODO: Handle errors here -- we need to drop the lock again.
auto copyOutcome = co_await view->copyFrom(pageOffset & ~(kPageSize - 1),
accessor.get(), kPageSize, wq);
assert(copyOutcome);
}
// To make CoW unobservable, we first need to evict the page here.
// TODO: enable read-only eviction.
co_await self->_evictQueue.evictRange(offset & ~(kPageSize - 1), kPageSize);
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&self->_mutex);
assert(cowIt->state == CowState::inProgress);
cowIt->state = CowState::hasCopy;
cowIt->physical = physical;
cowIt->lockCount++;
}
self->_copyEvent.raise();
progress += kPageSize;
}
node->result = Error::success;
node->resume();
}(this, offset, size, std::move(wq), node));
return false;
}
void CopyOnWriteMemory::unlockRange(uintptr_t offset, size_t size) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
for(size_t pg = 0; pg < size; pg += kPageSize) {
auto it = _ownedPages.find((offset + pg) >> kPageShift);
assert(it);
assert(it->state == CowState::hasCopy);
assert(it->lockCount > 0);
it->lockCount--;
}
}
frg::tuple<PhysicalAddr, CachingMode> CopyOnWriteMemory::peekRange(uintptr_t offset) {
auto irq_lock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
if(auto it = _ownedPages.find(offset >> kPageShift); it) {
assert(it->state == CowState::hasCopy);
return frg::tuple<PhysicalAddr, CachingMode>{it->physical, CachingMode::null};
}
return frg::tuple<PhysicalAddr, CachingMode>{PhysicalAddr(-1), CachingMode::null};
}
coroutine<frg::expected<Error, PhysicalRange>>
CopyOnWriteMemory::fetchRange(uintptr_t offset, FetchFlags, smarter::shared_ptr<WorkQueue> wq) {
smarter::shared_ptr<CowChain> chain;
smarter::shared_ptr<MemoryView> view;
uintptr_t viewOffset;
CowPage *cowIt;
bool waitForCopy = false;
{
// If the page is present in our private chain, we just return it.
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
cowIt = _ownedPages.find(offset >> kPageShift);
if(cowIt) {
if(cowIt->state == CowState::hasCopy) {
assert(cowIt->physical != PhysicalAddr(-1));
co_return PhysicalRange{cowIt->physical, kPageSize, CachingMode::null};
}else{
assert(cowIt->state == CowState::inProgress);
waitForCopy = true;
}
}else{
chain = _copyChain;
view = _view;
viewOffset = _viewOffset;
// Otherwise we need to copy from the chain or from the root view.
cowIt = _ownedPages.insert(offset >> kPageShift);
cowIt->state = CowState::inProgress;
}
}
if(waitForCopy) {
bool stillWaiting;
do {
stillWaiting = co_await _copyEvent.async_wait_if([&] () -> bool {
// TODO: this could be faster if cowIt->state was atomic.
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
if(cowIt->state == CowState::inProgress)
return true;
assert(cowIt->state == CowState::hasCopy);
return false;
});
co_await wq->schedule();
} while(stillWaiting);
co_return PhysicalRange{cowIt->physical, kPageSize, CachingMode::null};
}
PhysicalAddr physical = physicalAllocator->allocate(kPageSize);
assert(physical != PhysicalAddr(-1) && "OOM");
PageAccessor accessor{physical};
// Try to copy from a descendant CoW chain.
auto pageOffset = viewOffset + offset;
while(chain) {
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&chain->_mutex);
if(auto it = chain->_pages.find(pageOffset >> kPageShift); it) {
// We can just copy synchronously here -- the descendant is not evicted.
auto srcPhysical = it->load(std::memory_order_relaxed);
assert(srcPhysical != PhysicalAddr(-1));
auto srcAccessor = PageAccessor{srcPhysical};
memcpy(accessor.get(), srcAccessor.get(), kPageSize);
break;
}
chain = chain->_superChain;
}
// Copy from the root view.
if(!chain) {
FRG_CO_TRY(co_await view->copyFrom(pageOffset & ~(kPageSize - 1),
accessor.get(), kPageSize, wq));
}
// To make CoW unobservable, we first need to evict the page here.
// TODO: enable read-only eviction.
co_await _evictQueue.evictRange(offset, kPageSize);
{
auto irqLock = frg::guard(&irqMutex());
auto lock = frg::guard(&_mutex);
assert(cowIt->state == CowState::inProgress);
cowIt->state = CowState::hasCopy;
cowIt->physical = physical;
}
_copyEvent.raise();
co_return PhysicalRange{cowIt->physical, kPageSize, CachingMode::null};
}
void CopyOnWriteMemory::markDirty(uintptr_t, size_t) {
// We do not need to track dirty pages.
}
coroutine<frg::expected<Error, PhysicalAddr>> CopyOnWriteMemory::takeGlobalFutex(uintptr_t offset,
smarter::shared_ptr<WorkQueue> wq) {
// For now, we pick the trival implementation here.
auto lockError = co_await MemoryView::asyncLockRange(offset & ~(kPageSize - 1), kPageSize, wq);
if(lockError != Error::success)
co_return Error::fault;
auto range = FRG_CO_TRY(co_await fetchRange(offset & ~(kPageSize - 1), 0, wq));
assert(range.get<0>() != PhysicalAddr(-1));
co_return range.get<0>();
}
void CopyOnWriteMemory::retireGlobalFutex(uintptr_t offset) {
unlockRange(offset & ~(kPageSize - 1), kPageSize);
}
// --------------------------------------------------------------------------------------
namespace {
frg::eternal<FutexRealm> globalFutexRealm;
}
FutexRealm *getGlobalFutexRealm() {
return &globalFutexRealm.get();
}
} // namespace thor
| 21,524 |
435 | {
"copyright_text": "Standard YouTube License",
"description": "En CartoDB usamos torque (https://github.com/cartodb/torque) para la generaci\u00f3n de visualizaciones en el navegador de millones de puntos. Para que sea posible es necesario hacer un procesado de los datos en el servidor. En esta charla se tratar\u00e1 como usamos python para:\n\n* Hacer an\u00e1lisis estad\u00edstico de los datos para buscar la mejor forma de codificar los datos\n* La generaci\u00f3n de los tiles desde los datos en crudo con millones de datos\n\nDel toolset de python usamos tornado para servidor datos de torque en tiempo real y numpy + pyplot para el an\u00e1lisis de datos.",
"duration": 1848,
"language": "spa",
"recorded": "2015-04-06",
"related_urls": [
"https://github.com/cartodb/torque"
],
"speakers": [
"<NAME>"
],
"tags": [
"cartodb",
"torque",
"gis"
],
"thumbnail_url": "https://i.ytimg.com/vi/pXR98TAI5Ms/maxresdefault.jpg",
"title": "Torque: Python y analisis de datos",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=pXR98TAI5Ms"
}
]
}
| 462 |
772 | {
"danceFeedbackNeedNewDancer": "Musíš vytvoriť tanečníka.",
"danceFeedbackNoDancers": "Nemáš žiadnych tanečníkov.",
"danceFeedbackNoBackground": "Potrebuješ pridať efekt v pozadí.",
"danceFeedbackTooManyDancers": "Buď opatrný! Ak vložíš blok `vytvor` dovnútra bloku `každé 2 takty`, tak vytvoriš veľa tanečníkov.",
"danceFeedbackUseSetSize": "Použi blok `nastav backup_dancer2 veľkosť na...`, aby si toho tanečníka zmenšil.",
"danceFeedbackUseSetTint": "Použi blok `nastav sfarbenie` na zmenu farby tanečníka.",
"danceFeedbackUseStartMapping": "Vyskúšaj pridať blok <xml><block type=\"Dancelab_startMapping\"><title name=\"SPRITE\">right_pineapple</title><title name=\"PROPERTY\">\"scale\"</title><title name=\"RANGE\">\"bass\"</title></block></xml> do tvojho programu.",
"danceFeedbackStartNewMove": "Tvoj tanečník po štvrtom takte neurobil nový pohyb.",
"danceFeedbackNeedDifferentDance": "Musíš použiť iný tanec.",
"danceFeedbackNeedEveryTwoMeasures": "Uisti sa, že používaš blok \"každé dva takty\"",
"danceFeedbackNeedMakeANewDancer": "Použi blok `vytvor`, aby si vytvoril druhého tanečníka.",
"danceFeedbackKeyEvent": "Nezabudni ku bloku <xml></block><block type=\"Dancelab_whenKey\"><title name=\"KEY\">\"hore\"</title></block></xml> pridať nejaký príkaz a v priebehu tanca túto klávesu aj stlačiť.",
"danceFeedbackDidntPress": "Nezabudni počas tanca stláčať šípky.",
"danceFeedbackPressedKey": "Stlačil si klávesu, ale tvoj tanečník nereagoval.",
"danceFeedbackNeedTwoDancers": "Musíš vytvoriť dvoch tanečníkov.",
"danceFeedbackOnlyOneDancerMoved": "Iba jeden z tvojich tanečníkov sa pohol.",
"danceFeedbackNeedLead": "Musíš tiež vytvoriť jedného vedúceho tanečníka pomocou bloku <xml><block type=\"Dancelab_makeAnonymousDanceSprite\"><title name=\"COSTUME\">???</title><title name=\"LOCATION\">'{'x: 200, y: 200'}'</title></block></xml>.",
"danceFeedbackNeedBackup": "Musíš vytvoriť skupinu podporných tanečníkov pomocou bloku <xml><block type=\"Dancelab_makeNewDanceSpriteGroup\"><title name=\"N\">10</title><title name=\"COSTUME\">\"UNICORN\"</title><title name=\"LAYOUT\">\"circle\"</title></block></xml>.",
"danceFeedbackSetSize": "Keď začne hrať hudba, musíš zmeniť veľkosť aspoň jedného tanečníka.",
"measure": "Takt:"
}
| 1,009 |
394 | <reponame>jjochen/DTFoundation
//
// ZipNodeViewController.m
// Zippo
//
// Created by <NAME> on 4/12/13.
// Copyright (c) 2013 Cocoanetics. All rights reserved
//
#import <Foundation/Foundation.h>
#import "DTZipArchiveNode.h"
#import "DTZipArchive.h"
#import <QuickLook/QLPreviewItem.h>
@interface ZipNodeViewController : UITableViewController
@property (nonatomic, strong) DTZipArchiveNode *node;
@property (nonatomic, strong) DTZipArchive *zipArchive;
@end | 165 |
358 | /*
* Copyright 2018 The Ripple Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MYSQL_RIPPLE_MYSQL_SLAVE_SESSION_H
#define MYSQL_RIPPLE_MYSQL_SLAVE_SESSION_H
#include <string>
#include <unordered_map>
#include "absl/container/flat_hash_map.h"
#include "absl/time/time.h"
#include "binlog.h"
#include "binlog_reader.h"
#include "executor.h" // RunnableInterface
#include "mysql_protocol.h"
#include "mysql_server_connection.h"
#include "resultset.h"
#include "session.h"
namespace mysql_ripple {
namespace mysql {
// A class representing a slave connecting to ripple
class SlaveSession : public Session, public RunnableInterface {
public:
// Interfaces used.
class RippledInterface {
public:
virtual ~RippledInterface() {}
virtual BinlogPosition GetBinlogPosition() const = 0;
virtual const Uuid &GetUuid() const = 0;
virtual std::string GetServerName() const = 0;
virtual const file::Factory &GetFileFactory() const = 0;
virtual bool AllocServerId(uint32_t server_id, absl::Duration timeout) = 0;
virtual void FreeServerId(uint32_t server_id) = 0;
virtual void Shutdown() = 0;
virtual bool StartMasterSession(std::string *msg, bool idempotent) = 0;
virtual bool StopMasterSession(std::string *msg, bool idempotent) = 0;
virtual bool FlushLogs(std::string *new_file) = 0;
virtual bool PurgeLogs(std::string *oldest_file) = 0;
};
class FactoryInterface {
public:
virtual ~FactoryInterface() {}
virtual void EndSession(SlaveSession *) = 0;
};
SlaveSession(RippledInterface *rippled, mysql::ServerConnection *connection,
BinlogReader::BinlogInterface *binlog,
FactoryInterface *factory);
virtual ~SlaveSession();
// RunnableInterface
void Run() override;
void Stop() override;
void Unref() override;
// Attach a connected (but not authenticated) connection to this session.
bool Authenticate();
bool HandleQuery(const char *query);
bool HandleSetQuery(const char *query);
bool HandleRegisterSlave(Connection::Packet p);
bool HandleBinlogDump(Connection::Packet p);
bool HandleBinlogDumpGtid(Connection::Packet p);
void HandleShutdown();
bool HandleStartSlave(std::string *msg);
bool HandleStopSlave(std::string *msg);
// on success return name of new file in new_file.
bool HandleFlushLogs(std::string *new_file);
// on success return name of oldest kept file in oldest_file.
bool HandlePurgeLogs(std::string *oldest_file);
BinlogPosition GetBinlogPosition() const {
return rippled_->GetBinlogPosition();
}
BinlogPosition GetSlaveBinlogPosition() const {
return binlog_reader_.GetBinlogPosition();
}
const Uuid& GetRippledUuid() const {
return rippled_->GetUuid();
}
std::string GetRippledServerName() const {
return rippled_->GetServerName();
}
std::string GetHost() const { return connection_->GetHost(); }
uint16_t GetPort() const { return connection_->GetPort(); }
uint32_t GetServerId() const { return server_id_; }
std::string GetServerName() const { return server_name_; }
void SetConnectionStatusMetrics();
private:
bool SendHeartbeat();
// Encapsulate the event with a LogEventHeader and send it
// using protocol->SendEvent().
// LogEventHeader is constructed so that has server_id = rippled_id
// and has timestamp = 0.
bool SendArtificialEvent(const EventBase* ev, const FilePosition *pos);
// Read from binlog_reader_ and send events to slave.
bool SendEvents();
RippledInterface *rippled_;
BinlogReader binlog_reader_;
mysql::ServerConnection* connection_;
std::unique_ptr<mysql::Protocol> protocol_;
FactoryInterface *factory_;
// session variables.
absl::flat_hash_map<std::string, std::string> variables_;
uint32_t server_id_;
absl::Duration heartbeat_period_;
std::string server_name_;
SlaveSession(SlaveSession&&) = delete;
SlaveSession(const SlaveSession&) = delete;
SlaveSession& operator=(SlaveSession&&) = delete;
SlaveSession& operator=(const SlaveSession&) = delete;
friend int fun_SHOW_BINLOG_EVENTS(resultset::QueryContext *context);
};
} // namespace mysql
} // namespace mysql_ripple
#endif // MYSQL_RIPPLE_MYSQL_SLAVE_SESSION_H
| 1,528 |
2,635 | <filename>library/src/main/java/com/cjj/sva/anim/controller/JJCircleToBarController.java<gh_stars>1000+
package com.cjj.sva.anim.controller;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.graphics.RectF;
import com.cjj.sva.anim.JJBaseController;
/**
* 这是一个神奇的类,SearchView变成bar,f**k,不想写了,太多了。
* <p>
* Created by cjj on 2016/4/3.
*/
public class JJCircleToBarController extends JJBaseController {
private String mColor = "#E91E63";
private float cx, cy, cr;
private float sign = 0.707f;
private float mCircleBig = 10;
private RectF mRectF, mRectF2;
private float mCirCleDis = 200;
private Paint mFontPaint;
public JJCircleToBarController() {
mRectF = new RectF();
mRectF2 = new RectF();
mFontPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mFontPaint.setStrokeWidth(1);
mFontPaint.setColor(Color.WHITE);
mFontPaint.setStyle(Paint.Style.FILL);
mFontPaint.setTextSize(40);
}
@Override
public void draw(Canvas canvas, Paint paint) {
canvas.drawColor(Color.parseColor(mColor));
switch (mState) {
case STATE_ANIM_NONE:
drawNormalView(paint, canvas);
break;
case STATE_ANIM_START:
drawStartAnimView(paint, canvas);
break;
case STATE_ANIM_STOP:
drawStopAnimView(paint, canvas);
break;
}
}
private void drawStopAnimView(Paint paint, Canvas canvas) {
drawNormalView(paint, canvas);
}
private void drawStartAnimView(Paint paint, Canvas canvas) {
canvas.save();
if (mPro <= 0.1f) {
canvas.drawLine(cx + cr * sign, cy + cr * sign, cx + cr * sign + cr * sign *
(1 - mPro * 10), cy + cr * sign + cr * sign * (1 - mPro * 10), paint);
canvas.drawCircle(cx, cy, cr, paint);
} else if (mPro > 0.1f && mPro <= 0.2) {
canvas.drawCircle(cx, cy, cr + (mPro - 0.1f) * mCircleBig * 10, paint);
} else if (mPro > 0.2 && mPro <= 0.3) {
mRectF.left = cx - cr - mCircleBig + mCirCleDis * (mPro - 0.2f) * 10;
mRectF.right = cx + cr + mCircleBig + mCirCleDis * (mPro - 0.2f) * 10;
canvas.drawArc(mRectF, 0, 360, false, paint);
} else if (mPro > 0.3 && mPro <= 0.4) {
mRectF2.left = cx - cr - mCircleBig + mCirCleDis * (1 - (mPro - 0.3f) * 10);
mRectF2.right = cx + cr + mCircleBig + mCirCleDis * (1 - (mPro - 0.3f) * 10);
canvas.drawArc(mRectF, 90, -180, false, paint);
canvas.drawLine(mRectF2.left + cr + mCircleBig, mRectF.top, mRectF.right - cr - mCircleBig, mRectF.top, paint);
canvas.drawLine(mRectF2.left + cr + mCircleBig, mRectF.bottom, mRectF.right - cr - mCircleBig, mRectF.bottom, paint);
canvas.drawArc(mRectF2, 90, 180, false, paint);
} else if (mPro > 0.4 && mPro <= 0.5) {
mRectF2.left = cx - cr - mCircleBig - mCirCleDis * (mPro - 0.4f) * 10;
mRectF2.right = cx + cr + mCircleBig - mCirCleDis * (mPro - 0.4f) * 10;
canvas.drawArc(mRectF, 90, -180, false, paint);
canvas.drawLine(mRectF2.left + cr + mCircleBig, mRectF.top, mRectF.right - cr - mCircleBig, mRectF.top, paint);
canvas.drawLine(mRectF2.left + cr + mCircleBig, mRectF.bottom, mRectF.right - cr - mCircleBig, mRectF.bottom, paint);
canvas.drawArc(mRectF2, 90, 180, false, paint);
} else if (mPro > 0.5 && mPro <= 0.6) {
canvas.drawArc(mRectF, 90, -180, false, paint);
canvas.drawLine(mRectF2.left + cr + mCircleBig, mRectF.top, mRectF.right - cr - mCircleBig, mRectF.top, paint);
canvas.drawLine(mRectF2.left + cr + mCircleBig, mRectF.bottom, mRectF.right - cr - mCircleBig, mRectF.bottom, paint);
canvas.drawArc(mRectF2, 90, 180, false, paint);
if (mPro > 0.5f && mPro <= 0.52f) {
canvas.drawText("J", cx - mCirCleDis, cy + cr / 2, mFontPaint);
} else if (mPro > 0.52 && mPro <= 0.53f) {
canvas.drawText("JJ", cx - mCirCleDis, cy + cr / 2, mFontPaint);
} else if (mPro > 0.53 && mPro <= 0.54f) {
canvas.drawText("JJ Search", cx - mCirCleDis, cy + cr / 2, mFontPaint);
} else if (mPro > 0.54 && mPro <= 0.55f) {
canvas.drawText("JJ Search Anim", cx - mCirCleDis, cy + cr / 2, mFontPaint);
} else {
canvas.drawText("JJ Search Animations", cx - mCirCleDis, cy + cr / 2, mFontPaint);
}
} else if (mPro > 0.6 && mPro <= 0.7) {
canvas.drawCircle(cx, cy, cr + mCircleBig, paint);
canvas.drawLine(cx - cr / 2 + 4, cy + cr / 2, cx - cr / 2 + 4 - cr / 2, cy - cr / 2 + 8, paint);
canvas.drawLine(cx - cr / 2 + 4, cy + cr / 2, (cx + cr - 4), (cy - cr / 2), paint);
} else {
canvas.drawCircle(cx, cy, cr + mCircleBig, paint);
canvas.drawText("BUG", cx - cr / 2 - 8, cy + cr / 2, mFontPaint);
//年轻的骚年啊 收尾工作交给你了
}
canvas.restore();
}
private void drawNormalView(Paint paint, Canvas canvas) {
cr = getWidth() / 15;
cx = getWidth() / 2;
cy = getHeight() / 2;
mRectF.top = cy - cr - mCircleBig;
mRectF.bottom = cy + cr + mCircleBig;
mRectF2.top = cy - cr - mCircleBig;
mRectF2.bottom = cy + cr + mCircleBig;
paint.reset();
paint.setAntiAlias(true);
paint.setStrokeCap(Paint.Cap.ROUND);
canvas.save();
paint.setColor(Color.WHITE);
paint.setStrokeWidth(4);
paint.setStyle(Paint.Style.STROKE);
canvas.drawCircle(cx, cy, cr, paint);
canvas.drawLine(cx + cr * sign, cy + cr * sign, cx + cr * 2 * sign,
cy + cr * 2 * sign, paint);
canvas.restore();
}
@Override
public void startAnim() {
if (mState == STATE_ANIM_START) return;
mState = STATE_ANIM_START;
startSearchViewAnim(0, 1, 3000);
}
@Override
public void resetAnim() {
if (mState == STATE_ANIM_STOP) return;
mState = STATE_ANIM_STOP;
startSearchViewAnim();
}
}
| 3,172 |
411 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.giraph.conf;
import org.apache.giraph.graph.GraphType;
import org.apache.hadoop.conf.Configuration;
/**
* Enum Configuration option per user graph type (IVEMM)
*
* @param <T> Enum class
*/
public class PerGraphTypeEnumConfOption<T extends Enum<T>> {
/** option for vertex id */
private final EnumConfOption<T> vertexId;
/** option for vertex value */
private final EnumConfOption<T> vertexValue;
/** option for edge value */
private final EnumConfOption<T> edgeValue;
/** option for outgoing message */
private final EnumConfOption<T> outgoingMessage;
/**
* Constructor
*
* @param keyPrefix Configuration key prefix
* @param klass Enum class
* @param defaultValue default value
* @param description description of the option
*/
public PerGraphTypeEnumConfOption(String keyPrefix, Class<T> klass,
T defaultValue, String description) {
vertexId = EnumConfOption.create(keyPrefix + ".vertex.id", klass,
defaultValue, description);
vertexValue = EnumConfOption.create(keyPrefix + ".vertex.value", klass,
defaultValue, description);
edgeValue = EnumConfOption.create(keyPrefix + ".edge.value",
klass, defaultValue, description);
outgoingMessage = EnumConfOption.create(keyPrefix + ".outgoing.message",
klass, defaultValue, description);
}
/**
* Create new EnumGraphTypeConfOption
*
* @param keyPrefix String configuration key prefix
* @param klass enum class
* @param defaultValue default enum value
* @param description description of the option
* @param <X> enum type
* @return EnumConfOption
*/
public static <X extends Enum<X>> PerGraphTypeEnumConfOption<X>
create(String keyPrefix, Class<X> klass, X defaultValue, String description) {
return new PerGraphTypeEnumConfOption<X>(keyPrefix, klass,
defaultValue, description);
}
/**
* Get option for given GraphType
*
* @param graphType GraphType
* @return EnumConfOption for given graph type
*/
public EnumConfOption<T> get(GraphType graphType) {
switch (graphType) {
case VERTEX_ID:
return vertexId;
case VERTEX_VALUE:
return vertexValue;
case EDGE_VALUE:
return edgeValue;
case OUTGOING_MESSAGE_VALUE:
return outgoingMessage;
default:
throw new IllegalArgumentException(
"Don't know how to handle GraphType " + graphType);
}
}
/**
* Set value for given GraphType
*
* @param conf Configuration
* @param graphType GraphType
* @param language Language
*/
public void set(Configuration conf, GraphType graphType, T language) {
get(graphType).set(conf, language);
}
public EnumConfOption<T> getEdgeValue() {
return edgeValue;
}
public EnumConfOption<T> getOutgoingMessage() {
return outgoingMessage;
}
public EnumConfOption<T> getVertexId() {
return vertexId;
}
public EnumConfOption<T> getVertexValue() {
return vertexValue;
}
}
| 1,214 |
1,442 | <filename>delta/data/task/kws_cls_task.py
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' kws task '''
# data format see: docs/data/kws.md
import struct
import numpy as np
import delta.compat as tf
from delta import utils
from delta.utils.register import registers
from delta.data.task.base_speech_task import SpeechTask
from delta.data.utils.htk_reader_lib import HtkReaderIO
@registers.task.register
class KwsClsTask(SpeechTask):
''' kws task '''
#pylint: disable=too-many-instance-attributes
def __init__(self, config, mode):
super().__init__(config, mode)
self.epoch = 0
self.num = 0
self.reader = HtkReaderIO()
self.window_len = config['data']['task']['audio']['window_len']
self.window_shift = config['data']['task']['audio']['window_shift']
self.cmvn_path = config['data']['task']['audio']['cmvn_path']
self.left_context = config['data']['task']['audio']['left_context']
self.right_context = config['data']['task']['audio']['right_context']
self.delta_order = config['data']['task']['audio']['delta_order']
self.delta_wind = config['data']['task']['audio']['delta_wind']
self.splice_frame = config['data']['task']['audio']['splice_frame']
feat_dim = config['data']['task']['audio']['feat_dim']
if self.splice_frame:
feat_dim = config['data']['task']['audio']['feat_dim'] * (
self.left_context + 1 + self.right_context)
self.final_feat_dim = feat_dim * (self.delta_order + 1)
if mode == utils.TRAIN:
self.lines = open(config['data']['train']['paths']).readlines()
else:
self.lines = open(config['data']['eval']['paths']).readlines()
def generate_feat(self, paths):
''' generate feature'''
def generate_cmvn(self, paths):
''' generate cmvn '''
#pylint: disable=too-many-locals
def generate_data(self):
'''
train.list file:
/path/to/10w.42.feat
/path/to/10w.42.label
./10w.42.desc
/path/to/train.7.feat
/path/to/train.7.label
./train.7.desc
'''
for i in range(0, len(self.lines), 3):
fp_feat = open(self.lines[i].strip(), 'rb')
buff = open(self.lines[i + 1].strip(), 'rb').read()
# label is 0 ~ 8,
# one label per frame
label_arr = struct.unpack('%di' % (len(buff) / 4), buff) # 570485
#desc_lines = open(self.lines[i + 2].strip()).readlines()[1:]
# read file header, frame_bytes is 160 Bytes, 40 dimensions
num_frames, _, frame_bytes, _ = struct.unpack('!%di%dh' % (2, 2),
fp_feat.read(12))
del num_frames
buff = fp_feat.read() # file body
fp_feat.close()
# ! means converting Big-Endian to Little-Endian
feat_all = struct.unpack('!%df' % (len(buff) / 4), buff)
feat_matrix = np.array(feat_all).reshape(
(-1, int(frame_bytes / 4))) # (570485, 40) (frame_num, feat_dim)
#num, bad = 0, 0
length = feat_matrix.shape[0] - self.window_len # 281508
for j in range(0, length, self.window_shift):
label_t = np.unique(label_arr[j:j + self.window_len])
if -1 in label_t:
# reduce the ratio of negative samples
continue
if len(label_t) > 2 and len(label_t) < 8:
continue
feat = feat_matrix[j:j + self.window_len]
_, feat = self.reader.add_delta(feat, self.delta_order, self.delta_wind)
# cmvn is 120 lines, each line has mean and variance
_, feat = self.reader.normalization_feat_by_mean_variance(
feat, self.cmvn_path)
if self.splice_frame:
_, feat = self.reader.splice_frames(feat, self.left_context,
self.left_context)
if set(label_t).issuperset(range(0, 8)):
# including keyword
label = 1
else:
label = 0
yield feat, label
def feature_spec(self):
''' data meta'''
output_shapes = (tf.TensorShape([self.window_len,
self.final_feat_dim]), tf.TensorShape([]))
output_types = (tf.float32, tf.int32)
return output_shapes, output_types
def preprocess_batch(self, batch):
''' preprocess of data'''
return batch
#pylint: disable=arguments-differ
def dataset(self, mode, batch_size, epoch):
''' make tf dataset'''
shapes, types = self.feature_spec()
ds = tf.data.Dataset.from_generator( #pylint: disable=invalid-name
generator=lambda: self.generate_data(), #pylint: disable=unnecessary-lambda
output_types=types,
output_shapes=shapes,
)
if mode == utils.TRAIN:
ds = ds.apply( #pylint: disable=invalid-name
tf.data.experimental.shuffle_and_repeat(
buffer_size=batch_size, count=epoch, seed=None))
def make_sample(feat, label):
return {"inputs": feat, "labels": label}, label
return ds.apply(
tf.data.experimental.map_and_batch(
make_sample, batch_size,
drop_remainder=False)).prefetch(tf.data.experimental.AUTOTUNE)
| 2,442 |
396 | #
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for the Metad protocol
"""
import logging
import random
import time
from ganeti import constants
from ganeti import errors
import ganeti.rpc.client as cl
from ganeti.rpc.transport import Transport
from ganeti.rpc.errors import TimeoutError
# If the metadata daemon is disabled, there is no stub generated for it.
# So import the module and define the client class only if enabled, otherwise
# just generate an empty placeholder class.
if constants.ENABLE_METAD:
import ganeti.rpc.stub.metad as stub
class Client(cl.AbstractStubClient, stub.ClientRpcStub):
"""High-level Metad client implementation.
This uses a backing Transport-like class on top of which it
implements data serialization/deserialization.
"""
def __init__(self, timeouts=None, transport=Transport):
"""Constructor for the Client class.
Arguments are the same as for L{AbstractClient}.
"""
cl.AbstractStubClient.__init__(self, timeouts, transport)
stub.ClientRpcStub.__init__(self)
retries = 12
for try_no in range(0, retries):
try:
self._InitTransport()
return
except TimeoutError:
logging.debug("Timout trying to connect to MetaD")
if try_no == retries - 1:
raise
logging.debug("Will retry")
time.sleep(try_no * 10 + 10 * random.random())
def _InitTransport(self):
"""(Re)initialize the transport if needed.
"""
if self.transport is None:
self.transport = self.transport_class(self._GetAddress(),
timeouts=self.timeouts,
allow_non_master=True)
else:
class Client(object):
"""An empty client representation that just throws an exception.
"""
def __init__(self, _timeouts=None, _transport=None):
raise errors.ProgrammerError("The metadata deamon is disabled, yet"
" the client has been called")
| 1,187 |
323 | #ifndef _ROS_baxter_core_msgs_EndpointState_h
#define _ROS_baxter_core_msgs_EndpointState_h
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include "ros/msg.h"
#include "std_msgs/Header.h"
#include "geometry_msgs/Pose.h"
#include "geometry_msgs/Twist.h"
#include "geometry_msgs/Wrench.h"
namespace baxter_core_msgs
{
class EndpointState : public ros::Msg
{
public:
typedef std_msgs::Header _header_type;
_header_type header;
typedef geometry_msgs::Pose _pose_type;
_pose_type pose;
typedef geometry_msgs::Twist _twist_type;
_twist_type twist;
typedef geometry_msgs::Wrench _wrench_type;
_wrench_type wrench;
EndpointState():
header(),
pose(),
twist(),
wrench()
{
}
virtual int serialize(unsigned char *outbuffer) const
{
int offset = 0;
offset += this->header.serialize(outbuffer + offset);
offset += this->pose.serialize(outbuffer + offset);
offset += this->twist.serialize(outbuffer + offset);
offset += this->wrench.serialize(outbuffer + offset);
return offset;
}
virtual int deserialize(unsigned char *inbuffer)
{
int offset = 0;
offset += this->header.deserialize(inbuffer + offset);
offset += this->pose.deserialize(inbuffer + offset);
offset += this->twist.deserialize(inbuffer + offset);
offset += this->wrench.deserialize(inbuffer + offset);
return offset;
}
const char * getType(){ return "baxter_core_msgs/EndpointState"; };
const char * getMD5(){ return "44bea01d596ff699fa1447bec34167ac"; };
};
}
#endif
| 675 |
6,958 | //
// CPUOneHot.hpp
// MNN
//
// Created by MNN on 2019/11/29.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef CPUOneHot_hpp
#define CPUOneHot_hpp
#include "core/Execution.hpp"
namespace MNN {
class CPUOneHot : public Execution{
public:
CPUOneHot(Backend* b, int axis):Execution(b), mAxis(axis){}
virtual ~CPUOneHot() = default;
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
private:
int mAxis;
};
} // namespace MNN
#endif /* CPUOneHot_hpp */
| 213 |
190,993 | /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Helpers for loading the persistent representation of a SavedModelV2.
// Please note that this is depended on by code that does not make use of
// the full runtime and its dependencies should be restricted.
#ifndef TENSORFLOW_CC_SAVED_MODEL_BUNDLE_V2_H_
#define TENSORFLOW_CC_SAVED_MODEL_BUNDLE_V2_H_
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/graph_debug_info.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
/// Represents a version 2 SavedModel that is loaded from storage (but not yet
/// loaded into an executable in-memory representation).
class SavedModelV2Bundle {
public:
using RestoreObjectsCallback =
std::function<Status(int, const TrackableObjectGraph::TrackableObject&)>;
/// Loads persistent representations for a SavedModelV2 from the specified
/// export directory.
static Status Load(const std::string& export_dir, SavedModelV2Bundle* bundle);
/// MetaGraphDef from the loaded SavedModel.
MetaGraphDef& meta_graph_def() { return meta_graph_def_; }
/// SavedObjectGraph from the MetaGraphDef.
const SavedObjectGraph& saved_object_graph() {
return meta_graph_def().object_graph_def();
}
/// TrackableObjectGraph loaded from the variable_reader() checkpoint.
TrackableObjectGraph& trackable_object_graph() {
return trackable_object_graph_;
}
/// BundleReader for accessing the variables bundle.
BundleReader* variable_reader() { return variable_reader_.get(); }
/// The GraphDebugInfo (or nullptr if none).
GraphDebugInfo* debug_info() { return debug_info_.get(); }
/// Restores objects, invoking the callback with the node id in the
/// saved_object_graph() and the corresponding TrackableObject from the
/// trackable_object_graph(). The callback may use the variable_reader() but
/// must not modify the underlying saved_object_graph().
Status VisitObjectsToRestore(RestoreObjectsCallback callback);
private:
Status RecurseObjectsToRestore(
const SavedObject* saved_object, int saved_object_node_id,
const TrackableObjectGraph::TrackableObject* trackable_object,
std::string object_name,
absl::flat_hash_set<int>* seen_trackable_node_ids,
RestoreObjectsCallback callback);
MetaGraphDef meta_graph_def_;
TrackableObjectGraph trackable_object_graph_;
std::unique_ptr<BundleReader> variable_reader_;
std::unique_ptr<GraphDebugInfo> debug_info_;
};
} // namespace tensorflow
#endif // TENSORFLOW_CC_SAVED_MODEL_BUNDLE_V2_H_
| 1,047 |
4,625 | // Copyright 2021 JanusGraph Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.janusgraph.diskstorage.cql.function.slice;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
import io.vavr.concurrent.Future;
import org.janusgraph.diskstorage.BackendException;
import org.janusgraph.diskstorage.EntryList;
import org.janusgraph.diskstorage.PermanentBackendException;
import org.janusgraph.diskstorage.cql.CQLColValGetter;
import org.janusgraph.diskstorage.cql.CQLKeyColumnValueStore;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
public class CQLExecutorServiceSliceFunction extends AbstractCQLSliceFunction{
private final CQLColValGetter getter;
private final ExecutorService executorService;
public CQLExecutorServiceSliceFunction(CqlSession session, PreparedStatement getSlice,
CQLColValGetter getter, ExecutorService executorService) {
super(session, getSlice);
this.getter = getter;
this.executorService = executorService;
}
@Override
protected EntryList getSlice(CompletableFuture<AsyncResultSet> completableFutureSlice) throws BackendException {
final Future<EntryList> result = Future.fromJavaFuture(
this.executorService,
completableFutureSlice
).map(resultSet -> fromResultSet(resultSet, this.getter));
interruptibleWait(result);
return result.getValue().get().getOrElseThrow(CQLKeyColumnValueStore.EXCEPTION_MAPPER);
}
/**
* VAVR Future.await will throw InterruptedException wrapped in a FatalException. If the Thread was in Object.wait, the interrupted
* flag will be cleared as a side effect and needs to be reset. This method checks that the underlying cause of the FatalException is
* InterruptedException and resets the interrupted flag.
*
* @param result the future to wait on
* @throws PermanentBackendException if the thread was interrupted while waiting for the future result
*/
private void interruptibleWait(final Future<?> result) throws PermanentBackendException {
try {
result.await();
} catch (Exception e) {
if (e.getCause() instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
throw new PermanentBackendException(e);
}
}
}
| 1,036 |
1,555 | <gh_stars>1000+
int main () {
int i = 23;
unsigned char c = 3;
return (unsigned char) (~i + ~c) != 228;
}
| 46 |
7,883 | // Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma
// de Barcelona (UAB).
//
// This work is licensed under the terms of the MIT license.
// For a copy, see <https://opensource.org/licenses/MIT>.
#include "Carla.h"
#include "Carla/Sensor/SemanticSegmentationCamera.h"
#include "Carla/Sensor/PixelReader.h"
FActorDefinition ASemanticSegmentationCamera::GetSensorDefinition()
{
return UActorBlueprintFunctionLibrary::MakeCameraDefinition(TEXT("semantic_segmentation"));
}
ASemanticSegmentationCamera::ASemanticSegmentationCamera(
const FObjectInitializer &ObjectInitializer)
: Super(ObjectInitializer)
{
AddPostProcessingMaterial(
TEXT("Material'/Carla/PostProcessingMaterials/PhysicLensDistortion.PhysicLensDistortion'"));
AddPostProcessingMaterial(
TEXT("Material'/Carla/PostProcessingMaterials/GTMaterial.GTMaterial'"));
}
void ASemanticSegmentationCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds)
{
TRACE_CPUPROFILER_EVENT_SCOPE(ASemanticSegmentationCamera::PostPhysTick);
FPixelReader::SendPixelsInRenderThread(*this);
}
| 351 |
636 | import time
from indy import did, crypto, wallet
import json
import logging
from indy import pool
from src.utils import run_coroutine, PROTOCOL_VERSION
logger = logging.getLogger(__name__)
async def demo():
logger.info("Crypto sample -> started")
signer = {
'wallet_config': json.dumps({'id': 'signer_wallet'}),
'wallet_credentials': json.dumps({'key': 'signer_wallet_key'})
}
verifier = {
'wallet_config': json.dumps({"id": "verifier_wallet"}),
'wallet_credentials': json.dumps({"key": "verifier_wallet_key"})
}
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
# 1. Create Wallet and Get Wallet Handle
await wallet.create_wallet(signer['wallet_config'], signer['wallet_credentials'])
signer['wallet'] = await wallet.open_wallet(signer['wallet_config'], signer['wallet_credentials'])
await wallet.create_wallet(verifier['wallet_config'], verifier['wallet_credentials'])
verifier['wallet'] = await wallet.open_wallet(verifier['wallet_config'], verifier['wallet_credentials'])
# 2. Signer Create DID
(signer['did'], signer['verkey']) = await did.create_and_store_my_did(signer['wallet'], "{}")
# 3. Verifier Create DID
(verifier['did'], verifier['verkey']) = await did.create_and_store_my_did(verifier['wallet'], "{}")
signer['verifier_did'] = verifier['did']
signer['verifier_verkey'] = verifier['verkey']
verifier['signer_did'] = signer['did']
verifier['signer_verkey'] = signer['verkey']
# 4. Signer auth crypt message
message = json.dumps({
"reqId": 1495034346617224651,
"identifier": "GJ1SzoWzavQYfNL9XkaJdrQejfztN4XqdsiV4ct3LXKL",
"operation": {
"type": "1",
"dest": "<KEY>"
}
})
signer['encrypted_message'] = \
await crypto.auth_crypt(signer['wallet'], signer['verkey'], signer['verifier_verkey'], message.encode('utf-8'))
verifier['encrypted_message'] = signer['encrypted_message']
# 5. Verifier decrypt message
verkey, decrypted_message = \
await crypto.auth_decrypt(verifier['wallet'], verifier['verkey'], verifier['encrypted_message'])
assert verifier['signer_verkey'] == verkey
assert message == decrypted_message.decode("utf-8")
# 6. Close and delete Signer wallet
await wallet.close_wallet(signer['wallet'])
await wallet.delete_wallet(signer['wallet_config'], signer['wallet_credentials'])
# 7. Close and delete Verifier wallet
await wallet.close_wallet(verifier['wallet'])
await wallet.delete_wallet(verifier['wallet_config'], verifier['wallet_credentials'])
logger.info("Crypto sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete
| 1,098 |
453 | <reponame>kanishkan/tce
#ifdef __SPE__
#include <_ansi.h>
#include <limits.h>
#include <errno.h>
#include <stdlib.h>
#include <reent.h>
#include "vfieeefp.h"
/*
* Convert a string to a fixed-point 32-bit value.
*
* Ignores `locale' stuff.
*/
__uint32_t
_DEFUN (_strtoufix32_r, (rptr, nptr, endptr),
struct _reent *rptr _AND
_CONST char *nptr _AND
char **endptr)
{
union double_union dbl;
int exp, negexp;
__uint32_t tmp, tmp2, result = 0;
dbl.d = _strtod_r (rptr, nptr, endptr);
/* treat NAN as domain error, +/- infinity as saturation */
if (!finite(dbl.d))
{
if (isnan (dbl.d))
{
rptr->_errno = EDOM;
return 0;
}
rptr->_errno = ERANGE;
if (word0(dbl) & Sign_bit)
return 0;
return ULONG_MAX;
}
/* check for normal saturation */
if (dbl.d >= 1.0)
{
rptr->_errno = ERANGE;
return ULONG_MAX;
}
else if (dbl.d < 0)
{
rptr->_errno = ERANGE;
return 0;
}
/* otherwise we have normal positive number in range */
/* strip off exponent */
exp = ((word0(dbl) & Exp_mask) >> Exp_shift) - Bias;
negexp = -exp;
if (negexp > 32)
return 0;
word0(dbl) &= ~(Exp_mask | Sign_bit);
/* add in implicit normalized bit */
word0(dbl) |= Exp_msk1;
/* shift so result is contained left-justified in word */
tmp = word0(dbl) << Ebits;
tmp |= ((unsigned long)word1(dbl) >> (32 - Ebits));
/* perform rounding */
if (negexp > 1)
{
tmp2 = tmp + (1 << (negexp - 2));
result = (tmp2 >> (negexp - 1));
/* if rounding causes carry, add carry bit in */
if (tmp2 < tmp)
result += 1 << (32 - negexp);
}
else
{
result = tmp + ((word1(dbl) & (1 << (32 - Ebits - 1))) != 0);
/* if rounding causes carry, then saturation has occurred */
if (result < tmp)
{
rptr->_errno = ERANGE;
return ULONG_MAX;
}
}
return result;
}
#ifndef _REENT_ONLY
__uint32_t
_DEFUN (strtoufix32, (s, ptr, base),
_CONST char *s _AND
char **ptr)
{
return _strtoufix32_r (_REENT, s, ptr);
}
#endif
#endif /* __SPE__ */
| 939 |
590 | /*
* This source file is part of MyGUI. For the latest info, see http://mygui.info/
* Distributed under the MIT License
* (See accompanying file COPYING.MIT or copy at http://opensource.org/licenses/MIT)
*/
#include "MyGUI_Precompiled.h"
#include "MyGUI_Gui.h"
#include "MyGUI_Widget.h"
#include "MyGUI_InputManager.h"
#include "MyGUI_SubWidgetManager.h"
#include "MyGUI_LogManager.h"
#include "MyGUI_SkinManager.h"
#include "MyGUI_WidgetManager.h"
#include "MyGUI_LayerManager.h"
#include "MyGUI_FontManager.h"
#include "MyGUI_ControllerManager.h"
#include "MyGUI_PointerManager.h"
#include "MyGUI_ClipboardManager.h"
#include "MyGUI_LayoutManager.h"
#include "MyGUI_PluginManager.h"
#include "MyGUI_DynLibManager.h"
#include "MyGUI_LanguageManager.h"
#include "MyGUI_ResourceManager.h"
#include "MyGUI_RenderManager.h"
#include "MyGUI_FactoryManager.h"
#include "MyGUI_ToolTipManager.h"
#include "MyGUI_TextureUtility.h"
namespace MyGUI
{
MYGUI_SINGLETON_DEFINITION(Gui);
Gui::Gui() :
mInputManager(nullptr),
mSubWidgetManager(nullptr),
mLayerManager(nullptr),
mSkinManager(nullptr),
mWidgetManager(nullptr),
mFontManager(nullptr),
mControllerManager(nullptr),
mPointerManager(nullptr),
mClipboardManager(nullptr),
mLayoutManager(nullptr),
mDynLibManager(nullptr),
mPluginManager(nullptr),
mLanguageManager(nullptr),
mResourceManager(nullptr),
mFactoryManager(nullptr),
mToolTipManager(nullptr),
mIsInitialise(false),
mSingletonHolder(this)
{
}
void Gui::initialise(const std::string& _core)
{
MYGUI_ASSERT(!mIsInitialise, getClassTypeName() << " initialised twice");
MYGUI_LOG(Info, "* Initialise: " << getClassTypeName());
MYGUI_LOG(Info, "* MyGUI version "
<< MYGUI_VERSION_MAJOR << "."
<< MYGUI_VERSION_MINOR << "."
<< MYGUI_VERSION_PATCH);
// создаем и инициализируем синглтоны
mResourceManager = new ResourceManager();
mLayerManager = new LayerManager();
mWidgetManager = new WidgetManager();
mInputManager = new InputManager();
mSubWidgetManager = new SubWidgetManager();
mSkinManager = new SkinManager();
mFontManager = new FontManager();
mControllerManager = new ControllerManager();
mPointerManager = new PointerManager();
mClipboardManager = new ClipboardManager();
mLayoutManager = new LayoutManager();
mDynLibManager = new DynLibManager();
mPluginManager = new PluginManager();
mLanguageManager = new LanguageManager();
mFactoryManager = new FactoryManager();
mToolTipManager = new ToolTipManager();
mResourceManager->initialise();
mLayerManager->initialise();
mWidgetManager->initialise();
mInputManager->initialise();
mSubWidgetManager->initialise();
mSkinManager->initialise();
mFontManager->initialise();
mControllerManager->initialise();
mPointerManager->initialise();
mClipboardManager->initialise();
mLayoutManager->initialise();
mDynLibManager->initialise();
mPluginManager->initialise();
mLanguageManager->initialise();
mFactoryManager->initialise();
mToolTipManager->initialise();
WidgetManager::getInstance().registerUnlinker(this);
// загружаем дефолтные настройки если надо
if (!_core.empty())
mResourceManager->load(_core);
BackwardCompatibility::initialise();
MYGUI_LOG(Info, getClassTypeName() << " successfully initialized");
mIsInitialise = true;
}
#ifndef MYGUI_DONT_USE_OBSOLETE
void Gui::initialise(const std::string& _core, const std::string& _logFileName)
{
initialise(_core);
}
#endif // MYGUI_DONT_USE_OBSOLETE
void Gui::shutdown()
{
MYGUI_ASSERT(mIsInitialise, getClassTypeName() << " is not initialised");
MYGUI_LOG(Info, "* Shutdown: " << getClassTypeName());
BackwardCompatibility::shutdown();
_destroyAllChildWidget();
// деинициализируем и удаляем синглтоны
mPointerManager->shutdown();
mInputManager->shutdown();
mSkinManager->shutdown();
mSubWidgetManager->shutdown();
mLayerManager->shutdown();
mFontManager->shutdown();
mControllerManager->shutdown();
mClipboardManager->shutdown();
mLayoutManager->shutdown();
mPluginManager->shutdown();
mDynLibManager->shutdown();
mLanguageManager->shutdown();
mResourceManager->shutdown();
mFactoryManager->shutdown();
mToolTipManager->shutdown();
WidgetManager::getInstance().unregisterUnlinker(this);
mWidgetManager->shutdown();
delete mPointerManager;
delete mWidgetManager;
delete mInputManager;
delete mSkinManager;
delete mSubWidgetManager;
delete mLayerManager;
delete mFontManager;
delete mControllerManager;
delete mClipboardManager;
delete mLayoutManager;
delete mDynLibManager;
delete mPluginManager;
delete mLanguageManager;
delete mResourceManager;
delete mFactoryManager;
delete mToolTipManager;
// сбрасываем кеш
texture_utility::getTextureSize("", false);
MYGUI_LOG(Info, getClassTypeName() << " successfully shutdown");
mIsInitialise = false;
}
Widget* Gui::baseCreateWidget(WidgetStyle _style, const std::string& _type, const std::string& _skin, const IntCoord& _coord, Align _align, const std::string& _layer, const std::string& _name)
{
Widget* widget = WidgetManager::getInstance().createWidget(_style, _type, _skin, _coord, /*_align, */nullptr, nullptr, _name);
mWidgetChild.push_back(widget);
widget->setAlign(_align);
// присоединяем виджет с уровню
if (!_layer.empty())
LayerManager::getInstance().attachToLayerNode(_layer, widget);
return widget;
}
Widget* Gui::findWidgetT(const std::string& _name, bool _throw)
{
for (VectorWidgetPtr::iterator iter = mWidgetChild.begin(); iter != mWidgetChild.end(); ++iter)
{
Widget* widget = (*iter)->findWidget(_name);
if (widget != nullptr) return widget;
}
MYGUI_ASSERT(!_throw, "Widget '" << _name << "' not found");
return nullptr;
}
// удяляет неудачника
void Gui::_destroyChildWidget(Widget* _widget)
{
MYGUI_ASSERT(nullptr != _widget, "invalid widget pointer");
VectorWidgetPtr::iterator iter = std::find(mWidgetChild.begin(), mWidgetChild.end(), _widget);
if (iter != mWidgetChild.end())
{
// сохраняем указатель
MyGUI::Widget* widget = *iter;
// удаляем из списка
mWidgetChild.erase(iter);
// отписываем от всех
mWidgetManager->unlinkFromUnlinkers(_widget);
// непосредственное удаление
WidgetManager::getInstance()._deleteWidget(widget);
}
else
{
MYGUI_EXCEPT("Widget '" << _widget->getName() << "' not found");
}
}
// удаляет всех детей
void Gui::_destroyAllChildWidget()
{
while (!mWidgetChild.empty())
{
// сразу себя отписывем, иначе вложенной удаление убивает все
Widget* widget = mWidgetChild.back();
mWidgetChild.pop_back();
// отписываем от всех
mWidgetManager->unlinkFromUnlinkers(widget);
// и сами удалим, так как его больше в списке нет
WidgetManager::getInstance()._deleteWidget(widget);
}
}
void Gui::destroyWidget(Widget* _widget)
{
Widget* parent = _widget->getParent();
if (parent != nullptr)
parent->_destroyChildWidget(_widget);
else
_destroyChildWidget(_widget);
}
void Gui::destroyWidgets(const VectorWidgetPtr& _widgets)
{
for (VectorWidgetPtr::const_iterator iter = _widgets.begin(); iter != _widgets.end(); ++iter)
destroyWidget(*iter);
}
void Gui::destroyWidgets(EnumeratorWidgetPtr& _widgets)
{
VectorWidgetPtr widgets;
while (_widgets.next())
widgets.push_back(_widgets.current());
destroyWidgets(widgets);
}
void Gui::_unlinkWidget(Widget* _widget)
{
eventFrameStart.clear(_widget);
}
void Gui::_linkChildWidget(Widget* _widget)
{
VectorWidgetPtr::iterator iter = std::find(mWidgetChild.begin(), mWidgetChild.end(), _widget);
MYGUI_ASSERT(iter == mWidgetChild.end(), "widget already exist");
mWidgetChild.push_back(_widget);
}
void Gui::_unlinkChildWidget(Widget* _widget)
{
VectorWidgetPtr::iterator iter = std::remove(mWidgetChild.begin(), mWidgetChild.end(), _widget);
MYGUI_ASSERT(iter != mWidgetChild.end(), "widget not found");
mWidgetChild.erase(iter);
}
Widget* Gui::createWidgetT(const std::string& _type, const std::string& _skin, const IntCoord& _coord, Align _align, const std::string& _layer, const std::string& _name)
{
return baseCreateWidget(WidgetStyle::Overlapped, _type, _skin, _coord, _align, _layer, _name);
}
/** See Gui::createWidgetT */
Widget* Gui::createWidgetT(const std::string& _type, const std::string& _skin, int _left, int _top, int _width, int _height, Align _align, const std::string& _layer, const std::string& _name)
{
return createWidgetT(_type, _skin, IntCoord(_left, _top, _width, _height), _align, _layer, _name);
}
/** Create widget using coordinates relative to parent widget. see Gui::createWidgetT */
Widget* Gui::createWidgetRealT(const std::string& _type, const std::string& _skin, const FloatCoord& _coord, Align _align, const std::string& _layer, const std::string& _name)
{
IntSize size = RenderManager::getInstance().getViewSize();
return createWidgetT(_type, _skin, IntCoord((int)(_coord.left * size.width), (int)(_coord.top * size.height), (int)(_coord.width * size.width), (int)(_coord.height * size.height)), _align, _layer, _name);
}
/** Create widget using coordinates relative to parent. see Gui::createWidgetT */
Widget* Gui::createWidgetRealT(const std::string& _type, const std::string& _skin, float _left, float _top, float _width, float _height, Align _align, const std::string& _layer, const std::string& _name)
{
IntSize size = RenderManager::getInstance().getViewSize();
return createWidgetT(_type, _skin, IntCoord((int)(_left * size.width), (int)(_top * size.height), (int)(_width * size.width), (int)(_height * size.height)), _align, _layer, _name);
}
Widget* Gui::findWidgetT(const std::string& _name, const std::string& _prefix, bool _throw)
{
return findWidgetT(_prefix + _name, _throw);
}
void Gui::destroyChildWidget(Widget* _widget)
{
_destroyChildWidget(_widget);
}
void Gui::destroyAllChildWidget()
{
_destroyAllChildWidget();
}
EnumeratorWidgetPtr Gui::getEnumerator() const
{
return EnumeratorWidgetPtr(mWidgetChild);
}
void Gui::frameEvent(float _time)
{
eventFrameStart(_time);
}
} // namespace MyGUI
| 3,884 |
573 | /**
* Defines the `ZydisInstructionCategory` enum.
*/
typedef enum ZydisInstructionCategory_
{
ZYDIS_CATEGORY_INVALID,
ZYDIS_CATEGORY_ADOX_ADCX,
ZYDIS_CATEGORY_AES,
ZYDIS_CATEGORY_AMD3DNOW,
ZYDIS_CATEGORY_AMX_TILE,
ZYDIS_CATEGORY_AVX,
ZYDIS_CATEGORY_AVX2,
ZYDIS_CATEGORY_AVX2GATHER,
ZYDIS_CATEGORY_AVX512,
ZYDIS_CATEGORY_AVX512_4FMAPS,
ZYDIS_CATEGORY_AVX512_4VNNIW,
ZYDIS_CATEGORY_AVX512_BITALG,
ZYDIS_CATEGORY_AVX512_VBMI,
ZYDIS_CATEGORY_AVX512_VP2INTERSECT,
ZYDIS_CATEGORY_BINARY,
ZYDIS_CATEGORY_BITBYTE,
ZYDIS_CATEGORY_BLEND,
ZYDIS_CATEGORY_BMI1,
ZYDIS_CATEGORY_BMI2,
ZYDIS_CATEGORY_BROADCAST,
ZYDIS_CATEGORY_CALL,
ZYDIS_CATEGORY_CET,
ZYDIS_CATEGORY_CLDEMOTE,
ZYDIS_CATEGORY_CLFLUSHOPT,
ZYDIS_CATEGORY_CLWB,
ZYDIS_CATEGORY_CLZERO,
ZYDIS_CATEGORY_CMOV,
ZYDIS_CATEGORY_COMPRESS,
ZYDIS_CATEGORY_COND_BR,
ZYDIS_CATEGORY_CONFLICT,
ZYDIS_CATEGORY_CONVERT,
ZYDIS_CATEGORY_DATAXFER,
ZYDIS_CATEGORY_DECIMAL,
ZYDIS_CATEGORY_ENQCMD,
ZYDIS_CATEGORY_EXPAND,
ZYDIS_CATEGORY_FCMOV,
ZYDIS_CATEGORY_FLAGOP,
ZYDIS_CATEGORY_FMA4,
ZYDIS_CATEGORY_GATHER,
ZYDIS_CATEGORY_GFNI,
ZYDIS_CATEGORY_IFMA,
ZYDIS_CATEGORY_INTERRUPT,
ZYDIS_CATEGORY_IO,
ZYDIS_CATEGORY_IOSTRINGOP,
ZYDIS_CATEGORY_KMASK,
ZYDIS_CATEGORY_KNC,
ZYDIS_CATEGORY_KNCMASK,
ZYDIS_CATEGORY_KNCSCALAR,
ZYDIS_CATEGORY_LOGICAL,
ZYDIS_CATEGORY_LOGICAL_FP,
ZYDIS_CATEGORY_LZCNT,
ZYDIS_CATEGORY_MISC,
ZYDIS_CATEGORY_MMX,
ZYDIS_CATEGORY_MOVDIR,
ZYDIS_CATEGORY_MPX,
ZYDIS_CATEGORY_NOP,
ZYDIS_CATEGORY_PADLOCK,
ZYDIS_CATEGORY_PCLMULQDQ,
ZYDIS_CATEGORY_PCONFIG,
ZYDIS_CATEGORY_PKU,
ZYDIS_CATEGORY_POP,
ZYDIS_CATEGORY_PREFETCH,
ZYDIS_CATEGORY_PREFETCHWT1,
ZYDIS_CATEGORY_PT,
ZYDIS_CATEGORY_PUSH,
ZYDIS_CATEGORY_RDPID,
ZYDIS_CATEGORY_RDPRU,
ZYDIS_CATEGORY_RDRAND,
ZYDIS_CATEGORY_RDSEED,
ZYDIS_CATEGORY_RDWRFSGS,
ZYDIS_CATEGORY_RET,
ZYDIS_CATEGORY_ROTATE,
ZYDIS_CATEGORY_SCATTER,
ZYDIS_CATEGORY_SEGOP,
ZYDIS_CATEGORY_SEMAPHORE,
ZYDIS_CATEGORY_SERIALIZE,
ZYDIS_CATEGORY_SETCC,
ZYDIS_CATEGORY_SGX,
ZYDIS_CATEGORY_SHA,
ZYDIS_CATEGORY_SHIFT,
ZYDIS_CATEGORY_SMAP,
ZYDIS_CATEGORY_SSE,
ZYDIS_CATEGORY_STRINGOP,
ZYDIS_CATEGORY_STTNI,
ZYDIS_CATEGORY_SYSCALL,
ZYDIS_CATEGORY_SYSRET,
ZYDIS_CATEGORY_SYSTEM,
ZYDIS_CATEGORY_TBM,
ZYDIS_CATEGORY_TSX_LDTRK,
ZYDIS_CATEGORY_UFMA,
ZYDIS_CATEGORY_UNCOND_BR,
ZYDIS_CATEGORY_VAES,
ZYDIS_CATEGORY_VBMI2,
ZYDIS_CATEGORY_VFMA,
ZYDIS_CATEGORY_VPCLMULQDQ,
ZYDIS_CATEGORY_VTX,
ZYDIS_CATEGORY_WAITPKG,
ZYDIS_CATEGORY_WIDENOP,
ZYDIS_CATEGORY_X87_ALU,
ZYDIS_CATEGORY_XOP,
ZYDIS_CATEGORY_XSAVE,
ZYDIS_CATEGORY_XSAVEOPT,
/**
* Maximum value of this enum.
*/
ZYDIS_CATEGORY_MAX_VALUE = ZYDIS_CATEGORY_XSAVEOPT,
/**
* The minimum number of bits required to represent all values of this enum.
*/
ZYDIS_CATEGORY_REQUIRED_BITS = ZYAN_BITS_TO_REPRESENT(ZYDIS_CATEGORY_MAX_VALUE)
} ZydisInstructionCategory;
| 1,932 |
665 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.testdomain.interact;
import java.util.function.Supplier;
import org.apache.isis.commons.internal.base._Either;
import org.apache.isis.core.metamodel.interactions.managed.ActionInteraction;
import org.apache.isis.core.metamodel.interactions.managed.InteractionVeto;
import org.apache.isis.core.metamodel.interactions.managed.ParameterNegotiationModel;
import org.apache.isis.core.metamodel.spec.ManagedObject;
import lombok.Getter;
public class SimulatedUiSubmit extends HasActionValidation {
// might require a weak reference when actually implementing
private Supplier<_Either<ManagedObject, InteractionVeto>> doSubmit;
@Getter private _Either<ManagedObject, InteractionVeto> result;
public void bind(final ActionInteraction interaction, final ParameterNegotiationModel pendingArgs) {
super.bind(pendingArgs);
doSubmit = ()->interaction.invokeWith(pendingArgs);
}
public void simulateSubmit() {
result = doSubmit.get();
}
}
| 558 |
681 | <reponame>Siddhesh-Ghadi/airline
package io.airlift.airline;
public interface CommandFactory<T>
{
T createInstance(Class<?> type);
}
| 52 |
14,425 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.PathIsDirectoryException;
import org.apache.hadoop.io.IOUtils;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
/**
* Show the first 1KB of the file.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Head extends FsCommand {
public static void registerCommands(CommandFactory factory) {
factory.addClass(Head.class, "-head");
}
public static final String NAME = "head";
public static final String USAGE = "<file>";
public static final String DESCRIPTION =
"Show the first 1KB of the file.\n";
private long endingOffset = 1024;
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(1, 1);
cf.parse(args);
}
@Override
protected List<PathData> expandArgument(String arg) throws IOException {
List<PathData> items = new LinkedList<PathData>();
items.add(new PathData(arg, getConf()));
return items;
}
@Override
protected void processPath(PathData item) throws IOException {
if (item.stat.isDirectory()) {
throw new PathIsDirectoryException(item.toString());
}
dumpToOffset(item);
}
private void dumpToOffset(PathData item) throws IOException {
FSDataInputStream in = item.fs.open(item.path);
try {
IOUtils.copyBytes(in, System.out, endingOffset, false);
} finally {
in.close();
}
}
}
| 744 |
1,700 | // Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "../../../external/catch.hpp"
#include "../common/algorithms/parallel_filter.h"
#include <map>
#include <vector>
using namespace embree;
namespace parallel_filter_unit_test {
TEST_CASE("Test parallel_filter", "[parallel_filter]")
{
bool passed = true;
auto pred = [&](uint32_t v) { return (v & 0x3) == 0; };
for (size_t N = 10; N < 1000000; N = size_t(2.1 * N))
{
size_t N0 = rand() % N;
/* initialize array with random numbers */
std::vector<uint32_t> src(N);
std::map<uint32_t, int> m;
for (size_t i = 0; i < N; i++)
src[i] = rand();
/* count elements up */
for (size_t i = N0; i < N; i++)
if (pred(src[i]))
m[src[i]] = 0;
for (size_t i = N0; i < N; i++)
if (pred(src[i]))
m[src[i]]++;
/* filter array */
//size_t M = sequential_filter(src.data(),N0,N,pred);
size_t M = parallel_filter(src.data(), N0, N, size_t(1024), pred);
/* check if filtered data is correct */
for (size_t i = N0; i < M; i++)
{
passed &= pred(src[i]);
m[src[i]]--;
}
for (size_t i = N0; i < M; i++)
passed &= (m[src[i]] == 0);
}
REQUIRE(passed);
}
}
| 567 |
348 | {"nom":"Cadaujac","circ":"9ème circonscription","dpt":"Gironde","inscrits":4331,"abs":2751,"votants":1580,"blancs":169,"nuls":89,"exp":1322,"res":[{"nuance":"MDM","nom":"<NAME>","voix":717},{"nuance":"SOC","nom":"<NAME>","voix":605}]} | 97 |
317 | package test.swig;
import java.util.Random;
import java.util.Date;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
public class swigtest {
static {
System.loadLibrary("swigtest");
}
public static String FILE__() {
return Thread.currentThread().getStackTrace()[2].getFileName();
}
public static int LINE__() {
return Thread.currentThread().getStackTrace()[2].getLineNumber();
}
public static String LG__() {
DateFormat dateFormat = new SimpleDateFormat("HH:mm:ss");
return "[" + dateFormat.format(new Date()) + "] " + FILE__() + ":";
}
public static void main(String argv[]) {
test.int_input_test(42);
if (test.int_return_test() == -35)
System.out.println(LG__() + LINE__() + ": " + "int_return_test passed");
else
System.out.println(LG__() + LINE__() + ": " + "int_return_test failed");
Random randnum = new Random();
randnum.setSeed(42);
int n = 10;
int[] int_test = new int[n];
for (int i = 0; i < n; i++) {
int_test[i] = randnum.nextInt() % 1000;
}
test.int_arr_input_test(int_test, n);
test.float_input_test(42.42f);
if (Math.abs(test.float_return_test() - 42.42) < 1e-5)
System.out.println(LG__() + LINE__() + ": " + "float_return_test passed");
else
System.out.println(LG__() + LINE__() + ": " + "float_return_test failed");
float[] test_arr = {0.894f, 0.223f, 0.009f, 0.343f, 0.826f,
0.601f, 0.201f, 0.76f, 0.65f, 0.545f};
float[] arr = test.float_arr_return_test();
for (int i = 0; i < 10; i++) {
if (Math.abs(test_arr[i] - arr[i]) > 1e-5)
System.out.println(LG__() + LINE__() + ": " + "float_arr_return_test failed");
}
System.out.println(LG__() + LINE__() + ": " + "float_arr_return_test passed");
float[] float_test = new float[n];
for (int i = 0; i < n; i++) {
float_test[i] = randnum.nextFloat() % 1000;
}
test.float_arr_input_test(float_test, n);
test.string_input_test("string_input_test");
if (test.string_return_test().equals("string_return_test"))
System.out.println(LG__() + LINE__() + ": " + "string_return_test passed");
else
System.out.println(LG__() + LINE__() + ": " + "string_return_test failed");
}
}
| 978 |
1,244 | <reponame>afeng11/tomato-arm
/*
version 20080912
<NAME>
Public domain.
*/
#include <stdint.h>
#include <stdlib.h>
#include "crypto_core_hsalsa20.h"
#include "private/common.h"
#define ROUNDS 20
#define U32C(v) (v##U)
static uint32_t rotate(uint32_t u,int c)
{
return (u << c) | (u >> (32 - c));
}
int crypto_core_hsalsa20(
unsigned char *out,
const unsigned char *in,
const unsigned char *k,
const unsigned char *c
)
{
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
int i;
if (c == NULL) {
x0 = U32C(0x61707865);
x5 = U32C(0x3320646e);
x10 = U32C(0x79622d32);
x15 = U32C(0x6b206574);
} else {
x0 = LOAD32_LE(c + 0);
x5 = LOAD32_LE(c + 4);
x10 = LOAD32_LE(c + 8);
x15 = LOAD32_LE(c + 12);
}
x1 = LOAD32_LE(k + 0);
x2 = LOAD32_LE(k + 4);
x3 = LOAD32_LE(k + 8);
x4 = LOAD32_LE(k + 12);
x11 = LOAD32_LE(k + 16);
x12 = LOAD32_LE(k + 20);
x13 = LOAD32_LE(k + 24);
x14 = LOAD32_LE(k + 28);
x6 = LOAD32_LE(in + 0);
x7 = LOAD32_LE(in + 4);
x8 = LOAD32_LE(in + 8);
x9 = LOAD32_LE(in + 12);
for (i = ROUNDS;i > 0;i -= 2) {
x4 ^= rotate( x0+x12, 7);
x8 ^= rotate( x4+ x0, 9);
x12 ^= rotate( x8+ x4,13);
x0 ^= rotate(x12+ x8,18);
x9 ^= rotate( x5+ x1, 7);
x13 ^= rotate( x9+ x5, 9);
x1 ^= rotate(x13+ x9,13);
x5 ^= rotate( x1+x13,18);
x14 ^= rotate(x10+ x6, 7);
x2 ^= rotate(x14+x10, 9);
x6 ^= rotate( x2+x14,13);
x10 ^= rotate( x6+ x2,18);
x3 ^= rotate(x15+x11, 7);
x7 ^= rotate( x3+x15, 9);
x11 ^= rotate( x7+ x3,13);
x15 ^= rotate(x11+ x7,18);
x1 ^= rotate( x0+ x3, 7);
x2 ^= rotate( x1+ x0, 9);
x3 ^= rotate( x2+ x1,13);
x0 ^= rotate( x3+ x2,18);
x6 ^= rotate( x5+ x4, 7);
x7 ^= rotate( x6+ x5, 9);
x4 ^= rotate( x7+ x6,13);
x5 ^= rotate( x4+ x7,18);
x11 ^= rotate(x10+ x9, 7);
x8 ^= rotate(x11+x10, 9);
x9 ^= rotate( x8+x11,13);
x10 ^= rotate( x9+ x8,18);
x12 ^= rotate(x15+x14, 7);
x13 ^= rotate(x12+x15, 9);
x14 ^= rotate(x13+x12,13);
x15 ^= rotate(x14+x13,18);
}
STORE32_LE(out + 0,x0);
STORE32_LE(out + 4,x5);
STORE32_LE(out + 8,x10);
STORE32_LE(out + 12,x15);
STORE32_LE(out + 16,x6);
STORE32_LE(out + 20,x7);
STORE32_LE(out + 24,x8);
STORE32_LE(out + 28,x9);
return 0;
}
| 1,337 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.php.zend2;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import javax.swing.event.ChangeListener;
import org.netbeans.api.annotations.common.CheckForNull;
import org.netbeans.modules.php.api.phpmodule.PhpModule;
import org.netbeans.modules.php.api.util.FileUtils;
import org.netbeans.modules.php.spi.phpmodule.ImportantFilesImplementation;
import org.openide.filesystems.FileChangeAdapter;
import org.openide.filesystems.FileEvent;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileRenameEvent;
import org.openide.filesystems.FileUtil;
import org.openide.util.ChangeSupport;
public final class ConfigurationFiles extends FileChangeAdapter implements ImportantFilesImplementation {
private static final String CONFIG_DIRECTORY = "config"; // NOI18N
private final PhpModule phpModule;
private final ChangeSupport changeSupport = new ChangeSupport(this);
// @GuardedBy("this")
private FileObject sourceDirectory = null;
ConfigurationFiles(PhpModule phpModule) {
assert phpModule != null;
this.phpModule = phpModule;
}
@Override
public Collection<FileInfo> getFiles() {
FileObject sourceDir = getSourceDirectory();
if (sourceDir == null) {
// broken project
return Collections.emptyList();
}
List<FileInfo> files = new ArrayList<>();
FileObject configDir = sourceDir.getFileObject(CONFIG_DIRECTORY);
if (configDir != null
&& configDir.isFolder()
&& configDir.isValid()) {
Enumeration<? extends FileObject> children = configDir.getChildren(true);
while (children.hasMoreElements()) {
FileObject child = children.nextElement();
if (child.isData()
&& child.isValid()
&& FileUtils.isPhpFile(child)) {
files.add(new FileInfo(child));
}
}
Collections.sort(files, FileInfo.COMPARATOR);
}
return files;
}
@Override
public void addChangeListener(ChangeListener listener) {
changeSupport.addChangeListener(listener);
}
@Override
public void removeChangeListener(ChangeListener listener) {
changeSupport.removeChangeListener(listener);
}
private void fireChange() {
changeSupport.fireChange();
}
@CheckForNull
private synchronized FileObject getSourceDirectory() {
if (sourceDirectory == null) {
sourceDirectory = phpModule.getSourceDirectory();
if (sourceDirectory != null) {
File sources = FileUtil.toFile(sourceDirectory);
addListener(new File(sources, CONFIG_DIRECTORY));
}
}
return sourceDirectory;
}
private void addListener(File path) {
try {
FileUtil.addRecursiveListener(this, path);
} catch (IllegalArgumentException ex) {
// noop, already listening...
assert false : path;
}
}
//~ FS
@Override
public void fileRenamed(FileRenameEvent fe) {
fireChange();
}
@Override
public void fileDeleted(FileEvent fe) {
fireChange();
}
@Override
public void fileDataCreated(FileEvent fe) {
fireChange();
}
@Override
public void fileFolderCreated(FileEvent fe) {
fireChange();
}
}
| 1,641 |
1,127 | <filename>docs/template_plugin/tests/functional/op_reference/hswish.cpp
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include "openvino/op/hswish.hpp"
#include "base_reference_test.hpp"
using namespace reference_tests;
using namespace ov;
using namespace InferenceEngine;
namespace {
struct HSwishParams {
template <class IT>
HSwishParams(const ov::PartialShape& shape, const ov::element::Type& iType, const std::vector<IT>& iValues, const std::vector<IT>& oValues)
: pshape(shape),
inType(iType),
outType(iType),
inputData(CreateTensor(iType, iValues)),
refData(CreateTensor(iType, oValues)) {}
ov::PartialShape pshape;
ov::element::Type inType;
ov::element::Type outType;
ov::Tensor inputData;
ov::Tensor refData;
};
class ReferenceHSwishLayerTest : public testing::TestWithParam<HSwishParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape, params.inType, params.outType);
inputData = {params.inputData};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<HSwishParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "shape=" << param.pshape << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Model> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
const element::Type& HSwishected_output_type) {
const auto in = std::make_shared<op::v0::Parameter>(input_type, input_shape);
const auto HSwish = std::make_shared<op::v4::HSwish>(in);
return std::make_shared<ov::Model>(NodeVector {HSwish}, ParameterVector {in});
}
};
TEST_P(ReferenceHSwishLayerTest, CompareWithRefs) {
Exec();
}
template <element::Type_t IN_ET>
std::vector<HSwishParams> generateHSwishFloatParams() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<HSwishParams> hSwishParams {
HSwishParams(ov::PartialShape {2, 3},
IN_ET,
std::vector<T>{1.f, 8.f, -8.f, 17.f, -0.5f, -1.f},
std::vector<T>{0.66666667f, 8.f, 0.f, 17.f, -0.20833333f, -0.33333333f}),
HSwishParams(ov::PartialShape {2, 2, 1, 2},
IN_ET,
std::vector<T>{0.1f, 0.6f, 20.f, -7.f, -5.3f, 3.5f, -9.f, 11.f},
std::vector<T>{0.05166667f, 0.36f, 20.f, 0.f, 0.f, 3.5f, 0.f, 11.f})
};
return hSwishParams;
}
std::vector<HSwishParams> generateHSwishCombinedParams() {
const std::vector<std::vector<HSwishParams>> hSwishTypeParams {
generateHSwishFloatParams<element::Type_t::f32>(),
generateHSwishFloatParams<element::Type_t::f16>()
};
std::vector<HSwishParams> combinedParams;
for (const auto& params : hSwishTypeParams) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
}
return combinedParams;
}
INSTANTIATE_TEST_SUITE_P(smoke_HSwish_With_Hardcoded_Refs, ReferenceHSwishLayerTest,
testing::ValuesIn(generateHSwishCombinedParams()), ReferenceHSwishLayerTest::getTestCaseName);
} // namespace
| 1,554 |
515 | import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.core.window import Window
from random import randint, choice
import kivent_core
from kivent_core.gameworld import GameWorld
from kivent_core.systems.position_systems import PositionSystem2D
from kivent_core.systems.renderers import Renderer
from kivent_core.managers.resource_managers import texture_manager
from kivy.properties import StringProperty
from os.path import dirname, join, abspath
texture_manager.load_atlas(join(dirname(dirname(abspath(__file__))), 'assets',
'background_objects.atlas'))
class TestGame(Widget):
def on_kv_post(self, *args):
self.gameworld.init_gameworld(
['renderer', 'position'],
callback=self.init_game)
def init_game(self):
self.setup_states()
self.set_state()
self.load_models()
self.draw_some_stuff()
def load_models(self):
model_manager = self.gameworld.model_manager
model_manager.load_textured_rectangle('vertex_format_4f', 7., 7.,
'star1', 'star1-4')
model_manager.load_textured_rectangle('vertex_format_4f', 10., 10.,
'star1', 'star1-4-2')
def draw_some_stuff(self):
init_entity = self.gameworld.init_entity
for x in range(2000):
pos = randint(0, Window.width), randint(0, Window.height)
model_key = choice(['star1-4', 'star1-4-2'])
create_dict = {
'position': pos,
'renderer': {'texture': 'star1',
'model_key': model_key},
}
ent = init_entity(create_dict, ['position', 'renderer'])
#If you do not set Renderer.force_update to True, call update_trigger
#self.ids.renderer.update_trigger()
def setup_states(self):
self.gameworld.add_state(state_name='main',
systems_added=['renderer'],
systems_removed=[], systems_paused=[],
systems_unpaused=['renderer'],
screenmanager_screen='main')
def set_state(self):
self.gameworld.state = 'main'
class DebugPanel(Widget):
fps = StringProperty(None)
def __init__(self, **kwargs):
super(DebugPanel, self).__init__(**kwargs)
Clock.schedule_once(self.update_fps)
def update_fps(self,dt):
self.fps = str(int(Clock.get_fps()))
Clock.schedule_once(self.update_fps, .05)
class YourAppNameApp(App):
def build(self):
Window.clearcolor = (0, 0, 0, 1.)
if __name__ == '__main__':
YourAppNameApp().run()
| 1,141 |
363 | <reponame>elizabethking2/UnrealGDK
// Copyright (c) Improbable Worlds Ltd, All Rights Reserved
#pragma once
#include "HAL/PlatformProcess.h"
#include "Interfaces/IPluginManager.h"
/**
* This class ensures that the C API worker library is loaded before it is needed by code.
* This is only required when a platform uses PublicDelayLoadDLLs in SpatialGDK.Build.cs.
*/
class FSpatialGDKLoader
{
public:
FSpatialGDKLoader()
{
#if PLATFORM_WINDOWS
FString Path = IPluginManager::Get().FindPlugin(TEXT("SpatialGDK"))->GetBaseDir() / TEXT("Binaries/ThirdParty/Improbable");
#if PLATFORM_64BITS
Path = Path / TEXT("Win64");
#else
Path = Path / TEXT("Win32");
#endif // PLATFORM_64BITS
FString WorkerFilePath = Path / TEXT("improbable_worker.dll");
WorkerLibraryHandle = FPlatformProcess::GetDllHandle(*WorkerFilePath);
if (WorkerLibraryHandle == nullptr)
{
UE_LOG(LogTemp, Fatal, TEXT("Failed to load %s. Have you run `UnrealGDK/Setup.bat`?"), *WorkerFilePath);
}
#if TRACE_LIB_ACTIVE
FString TraceFilePath = Path / TEXT("trace_dynamic.dll");
TraceLibraryHandle = FPlatformProcess::GetDllHandle(*TraceFilePath);
if (TraceLibraryHandle == nullptr)
{
UE_LOG(LogTemp, Fatal, TEXT("Failed to load %s. Have you run `UnrealGDK/SetupIncTraceLibs.bat`?"), *TraceFilePath);
}
#endif // TRACE_LIB_ACTIVE
#elif PLATFORM_PS4
WorkerLibraryHandle = FPlatformProcess::GetDllHandle(TEXT("libworker.prx"));
#endif
}
~FSpatialGDKLoader()
{
if (WorkerLibraryHandle != nullptr)
{
FPlatformProcess::FreeDllHandle(WorkerLibraryHandle);
WorkerLibraryHandle = nullptr;
}
if (TraceLibraryHandle != nullptr)
{
FPlatformProcess::FreeDllHandle(TraceLibraryHandle);
TraceLibraryHandle = nullptr;
}
}
FSpatialGDKLoader(const FSpatialGDKLoader& rhs) = delete;
FSpatialGDKLoader& operator=(const FSpatialGDKLoader& rhs) = delete;
private:
void* WorkerLibraryHandle = nullptr;
void* TraceLibraryHandle = nullptr;
};
| 714 |
466 | package mu.nu.nullpo.game.subsystem.mode;
import mu.nu.nullpo.game.event.EventReceiver;
import mu.nu.nullpo.game.play.GameEngine;
import mu.nu.nullpo.game.play.GameManager;
import mu.nu.nullpo.util.GeneralUtil;
/**
* NET-VS-LINE RACE mode
*/
public class NetVSLineRaceMode extends NetDummyVSMode {
/** Number of lines required to win */
private int goalLines; // TODO: Add option to change this
/*
* Mode name
*/
@Override
public String getName() {
return "NET-VS-LINE RACE";
}
/*
* Mode init
*/
@Override
public void modeInit(GameManager manager) {
super.modeInit(manager);
goalLines = 40;
}
/*
* Player init
*/
@Override
protected void netPlayerInit(GameEngine engine, int playerID) {
super.netPlayerInit(engine, playerID);
engine.meterColor = GameEngine.METER_COLOR_GREEN;
}
/**
* Apply room settings, but ignore non-speed settings
*/
@Override
protected void netvsApplyRoomSettings(GameEngine engine) {
if(netCurrentRoomInfo != null) {
engine.speed.gravity = netCurrentRoomInfo.gravity;
engine.speed.denominator = netCurrentRoomInfo.denominator;
engine.speed.are = netCurrentRoomInfo.are;
engine.speed.areLine = netCurrentRoomInfo.areLine;
engine.speed.lineDelay = netCurrentRoomInfo.lineDelay;
engine.speed.lockDelay = netCurrentRoomInfo.lockDelay;
engine.speed.das = netCurrentRoomInfo.das;
}
}
/*
* Called at game start
*/
@Override
public void startGame(GameEngine engine, int playerID) {
super.startGame(engine, playerID);
engine.meterColor = GameEngine.METER_COLOR_GREEN;
engine.meterValue = owner.receiver.getMeterMax(engine);
}
/**
* Get player's place
* @param engine GameEngine
* @param playerID Player ID
* @return Player's place
*/
private int getNowPlayerPlace(GameEngine engine, int playerID) {
if(!netvsPlayerExist[playerID] || netvsPlayerDead[playerID]) return -1;
int place = 0;
int myLines = Math.min(engine.statistics.lines, goalLines);
for(int i = 0; i < getPlayers(); i++) {
if((i != playerID) && netvsPlayerExist[i] && !netvsPlayerDead[i]) {
int enemyLines = Math.min(owner.engine[i].statistics.lines, goalLines);
if(myLines < enemyLines) {
place++;
} else if((myLines == enemyLines) && (engine.statistics.pps < owner.engine[i].statistics.pps)) {
place++;
} else if((myLines == enemyLines) && (engine.statistics.pps == owner.engine[i].statistics.pps) &&
(engine.statistics.lpm < owner.engine[i].statistics.lpm))
{
place++;
}
}
}
return place;
}
/**
* Update progress meter
* @param engine GameEngine
*/
private void updateMeter(GameEngine engine) {
if(goalLines > 0) {
int remainLines = goalLines - engine.statistics.lines;
engine.meterValue = (remainLines * owner.receiver.getMeterMax(engine)) / goalLines;
engine.meterColor = GameEngine.METER_COLOR_GREEN;
if(remainLines <= 30) engine.meterColor = GameEngine.METER_COLOR_YELLOW;
if(remainLines <= 20) engine.meterColor = GameEngine.METER_COLOR_ORANGE;
if(remainLines <= 10) engine.meterColor = GameEngine.METER_COLOR_RED;
}
}
/*
* Calculate Score
*/
@Override
public void calcScore(GameEngine engine, int playerID, int lines) {
// Meter
updateMeter(engine);
// All clear
if((lines >= 1) && (engine.field.isEmpty())) {
engine.playSE("bravo");
}
// Game Completed
if((engine.statistics.lines >= goalLines) && (playerID == 0)) {
if(netvsIsPractice) {
engine.stat = GameEngine.Status.EXCELLENT;
engine.resetStatc();
} else {
// Send game end message
int[] places = new int[NETVS_MAX_PLAYERS];
int[] uidArray = new int[NETVS_MAX_PLAYERS];
for(int i = 0; i < getPlayers(); i++) {
places[i] = getNowPlayerPlace(owner.engine[i], i);
uidArray[i] = -1;
}
for(int i = 0; i < getPlayers(); i++) {
if((places[i] >= 0) && (places[i] < NETVS_MAX_PLAYERS)) {
uidArray[places[i]] = netvsPlayerUID[i];
}
}
String strMsg = "racewin";
for(int i = 0; i < getPlayers(); i++) {
if(uidArray[i] != -1) strMsg += "\t" + uidArray[i];
}
strMsg += "\n";
netLobby.netPlayerClient.send(strMsg);
// Wait until everyone dies
engine.stat = GameEngine.Status.NOTHING;
engine.resetStatc();
}
}
}
/*
* Drawing processing at the end of every frame
*/
@Override
public void renderLast(GameEngine engine, int playerID) {
super.renderLast(engine, playerID);
int x = owner.receiver.getFieldDisplayPositionX(engine, playerID);
int y = owner.receiver.getFieldDisplayPositionY(engine, playerID);
int fontColor = EventReceiver.COLOR_WHITE;
if(netvsPlayerExist[playerID] && engine.isVisible) {
if( ((netvsIsGameActive) || ((netvsIsPractice) && (playerID == 0))) && (engine.stat != GameEngine.Status.RESULT) ) {
// Lines left
int remainLines = Math.max(0, goalLines - engine.statistics.lines);
fontColor = EventReceiver.COLOR_WHITE;
if((remainLines <= 30) && (remainLines > 0)) fontColor = EventReceiver.COLOR_YELLOW;
if((remainLines <= 20) && (remainLines > 0)) fontColor = EventReceiver.COLOR_ORANGE;
if((remainLines <= 10) && (remainLines > 0)) fontColor = EventReceiver.COLOR_RED;
String strLines = String.valueOf(remainLines);
if(engine.displaysize != -1) {
if(strLines.length() == 1) {
owner.receiver.drawMenuFont(engine, playerID, 4, 21, strLines, fontColor, 2.0f);
} else if(strLines.length() == 2) {
owner.receiver.drawMenuFont(engine, playerID, 3, 21, strLines, fontColor, 2.0f);
} else if(strLines.length() == 3) {
owner.receiver.drawMenuFont(engine, playerID, 2, 21, strLines, fontColor, 2.0f);
}
} else {
if(strLines.length() == 1) {
owner.receiver.drawDirectFont(engine, playerID, x + 4 + 32, y + 168, strLines, fontColor, 1.0f);
} else if(strLines.length() == 2) {
owner.receiver.drawDirectFont(engine, playerID, x + 4 + 24, y + 168, strLines, fontColor, 1.0f);
} else if(strLines.length() == 3) {
owner.receiver.drawDirectFont(engine, playerID, x + 4 + 16, y + 168, strLines, fontColor, 1.0f);
}
}
}
if((netvsIsGameActive) && (engine.stat != GameEngine.Status.RESULT)) {
// Place
int place = getNowPlayerPlace(engine, playerID);
if(netvsPlayerDead[playerID]) place = netvsPlayerPlace[playerID];
if(engine.displaysize != -1) {
if(place == 0) {
owner.receiver.drawMenuFont(engine, playerID, -2, 22, "1ST", EventReceiver.COLOR_ORANGE);
} else if(place == 1) {
owner.receiver.drawMenuFont(engine, playerID, -2, 22, "2ND", EventReceiver.COLOR_WHITE);
} else if(place == 2) {
owner.receiver.drawMenuFont(engine, playerID, -2, 22, "3RD", EventReceiver.COLOR_RED);
} else if(place == 3) {
owner.receiver.drawMenuFont(engine, playerID, -2, 22, "4TH", EventReceiver.COLOR_GREEN);
} else if(place == 4) {
owner.receiver.drawMenuFont(engine, playerID, -2, 22, "5TH", EventReceiver.COLOR_BLUE);
} else if(place == 5) {
owner.receiver.drawMenuFont(engine, playerID, -2, 22, "6TH", EventReceiver.COLOR_PURPLE);
}
} else {
if(place == 0) {
owner.receiver.drawDirectFont(engine, playerID, x, y + 168, "1ST", EventReceiver.COLOR_ORANGE, 0.5f);
} else if(place == 1) {
owner.receiver.drawDirectFont(engine, playerID, x, y + 168, "2ND", EventReceiver.COLOR_WHITE, 0.5f);
} else if(place == 2) {
owner.receiver.drawDirectFont(engine, playerID, x, y + 168, "3RD", EventReceiver.COLOR_RED, 0.5f);
} else if(place == 3) {
owner.receiver.drawDirectFont(engine, playerID, x, y + 168, "4TH", EventReceiver.COLOR_GREEN, 0.5f);
} else if(place == 4) {
owner.receiver.drawDirectFont(engine, playerID, x, y + 168, "5TH", EventReceiver.COLOR_BLUE, 0.5f);
} else if(place == 5) {
owner.receiver.drawDirectFont(engine, playerID, x, y + 168, "6TH", EventReceiver.COLOR_PURPLE, 0.5f);
}
}
}
// Games count
else if(!netvsIsPractice || (playerID != 0)) {
String strTemp = netvsPlayerWinCount[playerID] + "/" + netvsPlayerPlayCount[playerID];
if(engine.displaysize != -1) {
int y2 = 21;
if(engine.stat == GameEngine.Status.RESULT) y2 = 22;
owner.receiver.drawMenuFont(engine, playerID, 0, y2, strTemp, EventReceiver.COLOR_WHITE);
} else {
owner.receiver.drawDirectFont(engine, playerID, x + 4, y + 168, strTemp, EventReceiver.COLOR_WHITE, 0.5f);
}
}
}
}
/*
* Render results screen
*/
@Override
public void renderResult(GameEngine engine, int playerID) {
super.renderResult(engine, playerID);
float scale = 1.0f;
if(engine.displaysize == -1) scale = 0.5f;
drawResultScale(engine, playerID, owner.receiver, 2, EventReceiver.COLOR_ORANGE, scale,
"LINE", String.format("%10d", engine.statistics.lines),
"PIECE", String.format("%10d", engine.statistics.totalPieceLocked),
"LINE/MIN", String.format("%10g", engine.statistics.lpm),
"PIECE/SEC", String.format("%10g", engine.statistics.pps),
"TIME", String.format("%10s", GeneralUtil.getTime(engine.statistics.time)));
}
/*
* Send stats
*/
@Override
protected void netSendStats(GameEngine engine) {
if((engine.playerID == 0) && !netvsIsPractice && !netvsIsWatch()) {
String strMsg = "game\tstats\t" + engine.statistics.lines + "\t" + engine.statistics.pps + "\t" + engine.statistics.lpm + "\n";
netLobby.netPlayerClient.send(strMsg);
}
}
/*
* Receive stats
*/
@Override
protected void netRecvStats(GameEngine engine, String[] message) {
if(message.length > 4) engine.statistics.lines = Integer.parseInt(message[4]);
if(message.length > 5) engine.statistics.pps = Float.parseFloat(message[5]);
if(message.length > 6) engine.statistics.lpm = Float.parseFloat(message[6]);
updateMeter(engine);
}
/*
* Send end-of-game stats
*/
@Override
protected void netSendEndGameStats(GameEngine engine) {
int playerID = engine.playerID;
String msg = "gstat\t";
msg += netvsPlayerPlace[playerID] + "\t";
msg += 0 + "\t" + 0 + "\t" + 0 + "\t";
msg += engine.statistics.lines + "\t" + engine.statistics.lpm + "\t";
msg += engine.statistics.totalPieceLocked + "\t" + engine.statistics.pps + "\t";
msg += netvsPlayTimer + "\t" + 0 + "\t" + netvsPlayerWinCount[playerID] + "\t" + netvsPlayerPlayCount[playerID];
msg += "\n";
netLobby.netPlayerClient.send(msg);
}
/*
* Receive end-of-game stats
*/
@Override
protected void netvsRecvEndGameStats(String[] message) {
int seatID = Integer.parseInt(message[2]);
int playerID = netvsGetPlayerIDbySeatID(seatID);
if((playerID != 0) || (netvsIsWatch())) {
GameEngine engine = owner.engine[playerID];
engine.statistics.lines = Integer.parseInt(message[8]);
engine.statistics.lpm = Float.parseFloat(message[9]);
engine.statistics.totalPieceLocked = Integer.parseInt(message[10]);
engine.statistics.pps = Float.parseFloat(message[11]);
engine.statistics.time = Integer.parseInt(message[12]);
netvsPlayerResultReceived[playerID] = true;
}
}
}
| 4,795 |
848 | <gh_stars>100-1000
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
"""Computes a nested dict which maps task and logical core to replicas."""
task_and_cores_to_replicas = {}
for replica in xrange(core_assignment.shape[0]):
for logical_core in xrange(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `DeviceAssignment.build()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology, core_assignment):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
"got shape {}".format(core_assignment.shape))
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"minor dimension of core_assignment must have size equal to topology "
"rank ({}), got shape {}".format(topology.mesh_rank,
core_assignment.shape))
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self):
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self):
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self):
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self):
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def coordinates(self, replica, logical_core):
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id, logical_core):
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica=0, logical_core=0):
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self, replica=0, logical_core=0, job=None):
"""Returns the CPU device attached to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self, replica=0, logical_core=0, job=None):
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology,
computation_shape=None,
computation_stride=None,
num_replicas=1):
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _ring_2d(height, width):
"""Ring-order of a height x width mesh.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Args:
height: An integer represents the height.
width: An integer represents the width.
Returns:
A list of [y, x] pairs with ring order.
"""
if height == 1:
return [(0, i) for i in range(width)]
if width == 1:
return [(i, 0) for i in range(height)]
if height % 2 != 0:
logging.warning("Odd dimension")
return [(i % height, i // height) for i in range(width * height)]
ret = [(0, 0)]
for i in range(height // 2):
for j in range(1, width):
ret.append((2 * i, j))
for j in range(width - 1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(height - 1, 0, -1):
ret.append((i, 0))
return ret
def device_assignment(topology,
computation_shape=None,
computation_stride=None,
num_replicas=1):
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology.
To obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor` here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError("`topology` is not a Topology object; got {}".format(
type(topology)))
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError("computation_shape must have shape [{}]; got {}".format(
topology_rank, computation_shape.shape))
if computation_stride.shape != (topology_rank,):
raise ValueError("computation_stride must have shape [{}]; got {}".format(
topology_rank, computation_stride.shape))
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible, in order of
# increasing dimension size. By visiting dimensions in increasing size, we
# assign the most constrained dimension first, so we won't make infeasible
# choices.
#
# As a secondary sort order, visit the dimensions in reverse order. This
# means we try to use both cores on the same chip in preference to two cores
# on different chips.
for x, ni in sorted(((x, -i) for (i, x) in enumerate(replica_counts))):
i = -ni
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
# TODO(ylc): Revisit here when topology_rank > 3.
enable_2d_tiling = (
topology_rank == 3 and
computation_shape[-1] == 2 # Only handle 2D case.
and np.prod(computation_stride) == 1 # Ensure no stride.
and num_replicas == max_replicas) # Full replication.
logging.info("enable_2d_tiling: {}".format(enable_2d_tiling))
if enable_2d_tiling:
assignment = []
inner_ring = _ring_2d(computation_shape[0], computation_shape[1])
outer_ring = _ring_2d(replica_shape[0], replica_shape[1])
for replica in xrange(num_replicas):
outer_x, outer_y = outer_ring[replica]
per_replica_assignment = []
for index in xrange(np.prod(computation_shape)):
inner_x, inner_y = inner_ring[index // 2]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = index % 2
per_replica_assignment.append([px, py, pz])
assignment.append(per_replica_assignment)
else:
for replica in xrange(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
for dim in replica_shape[::-1]:
pos.append(t % dim)
t //= dim
replica_pos = np.array(pos[::-1], dtype=np.int32)
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in xrange(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
return DeviceAssignment(topology, core_assignment=assignment)
| 5,469 |
429 | #!/usr/bin/python
'''
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from ior_test_base import IorTestBase
class EcodCellSize(IorTestBase):
# pylint: disable=too-many-ancestors
# pylint: disable=too-few-public-methods
"""EC IOR class to run tests with different cell size.
Test Class Description: To validate Erasure code object with different cell
sizes.
:avocado: recursive
"""
def test_ec_cell_size(self):
"""Jira ID: DAOS-7311.
Test Description:
Test Erasure code object with IOR with different cell sizes.
Use Case:
Create the medium size of pool and run IOR with supported EC object
type class with container cell size properties from 64K to 1M.
:avocado: tags=all,full_regression
:avocado: tags=hw,large,ib2
:avocado: tags=ec,ec_ior,ior
:avocado: tags=ec_cell_size
"""
obj_class = self.params.get("dfs_oclass", '/run/ior/objectclass/*')
for oclass in obj_class:
self.ior_cmd.dfs_oclass.update(oclass)
self.run_ior_with_pool()
| 510 |
521 | <filename>include/retdec/utils/value.h
/**
* @file include/retdec/utils/value.h
* @brief Values and other derived class representation.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
#ifndef RETDEC_UTILS_VALUE_H
#define RETDEC_UTILS_VALUE_H
#include <cassert>
#include <ostream>
namespace retdec {
namespace utils {
/**
* Class encapsulates value of any type and adds information if the value
* was defined or is still undefined. Any attempt to work with an undefined
* value ends on assertion.
*
* Example usage #1:
* @code{.cpp}
* Maybe<int> val;
* val.isDefined(); // false
* val.isUndefined(); // true
* val = 10;
* val.isDefined(); // true
* val.isUndefined(); // false
* int x = val + 20;
* @endcode
*
* Example usage #2:
* @code{.cpp}
* Maybe<int> val(10);
* val.isDefined(); // true
* val.isUndefined(); // false
* @endcode
*/
template <class T>
class Maybe
{
public:
Maybe() {}
Maybe(const T& value) : _defined(true), _value(value) {}
Maybe(T&& value) : _defined(true), _value(std::move(value)) {}
Maybe(const Maybe<T>&) = default;
Maybe(Maybe<T>&&) = default;
Maybe& operator=(Maybe<T> rhs)
{
std::swap(_defined, rhs._defined);
std::swap(_value, rhs._value);
return *this;
}
operator T() const { return getValue(); }
const T& getValue() const { assert(isDefined()); return _value; }
bool isUndefined() const { return !isDefined(); }
bool isDefined() const { return _defined; }
void setUndefined() { _defined = false; _value = T{}; }
friend std::ostream& operator<< (std::ostream &out, const Maybe<T> &v)
{
if (v.isDefined())
return out << v.getValue();
else
return out << "UNDEFINED";
}
private:
bool _defined = false;
T _value{};
};
/**
* Two Maybe objects are equal if they are both undefined or both defined
* with the same value.
*/
template <typename T>
bool operator==(const Maybe<T>& v1, const Maybe<T>& v2)
{
if (v1.isUndefined() && v2.isUndefined())
{
return true;
}
if (v1.isDefined() && v2.isDefined())
{
return v1.getValue() == v2.getValue();
}
else
{
return false;
}
}
template <typename T>
bool operator!=(const Maybe<T>& v1, const Maybe<T>& v2)
{
return !(v1 == v2);
}
} // namespace utils
} // namespace retdec
#endif
| 866 |
21,382 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.ray.streaming.state.keystate.state.impl;
import io.ray.streaming.state.backend.KeyStateBackend;
import io.ray.streaming.state.backend.impl.MemoryStateBackend;
import io.ray.streaming.state.keystate.KeyGroup;
import io.ray.streaming.state.keystate.desc.ListStateDescriptor;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
public class ListStateImplTest {
ListStateImpl<Integer> listState;
KeyStateBackend keyStateBackend;
@BeforeClass
public void setUp() throws Exception {
keyStateBackend =
new KeyStateBackend(1, new KeyGroup(1, 2), new MemoryStateBackend(new HashMap<>()));
ListStateDescriptor<Integer> descriptor =
ListStateDescriptor.build("ListStateImplTest", Integer.class);
descriptor.setTableName("table");
listState = (ListStateImpl<Integer>) keyStateBackend.getListState(descriptor);
}
@Test
public void testAddGet() throws Exception {
keyStateBackend.setContext(1L, 1);
List<Integer> list = listState.get();
Assert.assertEquals(list.size(), 0);
listState.add(1);
listState.add(2);
Assert.assertEquals(listState.get(), Arrays.asList(1, 2));
listState.add(3);
Assert.assertEquals(listState.get(), Arrays.asList(1, 2, 3));
list = listState.get();
list.set(1, -1);
listState.add(4);
Assert.assertEquals(listState.get(), Arrays.asList(1, -1, 3, 4));
keyStateBackend.setCurrentKey(2);
listState.add(5);
listState.add(6);
Assert.assertEquals(listState.get(), Arrays.asList(5, 6));
}
@Test(dependsOnMethods = {"testAddGet"})
public void testUpdate() throws Exception {
Assert.assertEquals(listState.get(), Arrays.asList(5, 6));
listState.update(Arrays.asList(7, 8, 9));
List<Integer> list = listState.get();
Assert.assertEquals(list, Arrays.asList(7, 8, 9));
list.set(1, 10);
listState.update(list);
Assert.assertEquals(list, Arrays.asList(7, 10, 9));
listState.update(null);
Assert.assertEquals(listState.get().size(), 0);
}
}
| 1,008 |
6,159 | package com.juns.wechat.common;
import java.util.Comparator;
import com.juns.wechat.bean.User;
public class PinyinComparator implements Comparator {
@Override
public int compare(Object arg0, Object arg1) {
// 按照名字排序
User user0 = (User) arg0;
User user1 = (User) arg1;
String catalog0 = "";
String catalog1 = "";
if (user0 != null && user0.getUserName() != null
&& user0.getUserName().length() > 1)
catalog0 = PingYinUtil.converterToFirstSpell(user0.getUserName())
.substring(0, 1);
if (user1 != null && user1.getUserName() != null
&& user1.getUserName().length() > 1)
catalog1 = PingYinUtil.converterToFirstSpell(user1.getUserName())
.substring(0, 1);
int flag = catalog0.compareTo(catalog1);
return flag;
}
}
| 354 |
728 | <filename>bundles/sirix-saxon/src/test/java/org/sirix/saxon/wrapper/TestNodeWrapper.java
/**
* Copyright (c) 2011, University of Konstanz, Distributed Systems Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University of Konstanz nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sirix.saxon.wrapper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import javax.xml.stream.XMLEventReader;
import net.sf.saxon.Configuration;
import net.sf.saxon.om.Axis;
import net.sf.saxon.om.NodeInfo;
import net.sf.saxon.pattern.NameTest;
import net.sf.saxon.s9api.Processor;
import net.sf.saxon.trans.XPathException;
import net.sf.saxon.tree.iter.AxisIterator;
import net.sf.saxon.tree.iter.NamespaceIterator.NamespaceNodeImpl;
import net.sf.saxon.type.Type;
import net.sf.saxon.value.UntypedAtomicValue;
import net.sf.saxon.value.Value;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.sirix.Holder;
import org.sirix.TestHelper;
import org.sirix.access.Databases;
import org.sirix.access.conf.DatabaseConfiguration;
import org.sirix.access.conf.ResourceConfiguration;
import org.sirix.access.conf.SessionConfiguration;
import org.sirix.api.Database;
import org.sirix.api.NodeWriteTrx;
import org.sirix.api.Session;
import org.sirix.exception.SirixException;
import org.sirix.service.xml.shredder.Insert;
import org.sirix.service.xml.shredder.XMLShredder;
/**
* Test implemented methods in NodeWrapper.
*
* @author <NAME>, University of Konstanz
*
*/
public class TestNodeWrapper {
private static final DatabaseConfiguration DB_CONFIG = new DatabaseConfiguration(
TestHelper.PATHS.PATH1.getFile());
private Database mDatabase;
/** sirix session on sirix test document. */
private Holder mHolder;
/** Document node. */
private NodeWrapper node;
@Before
public void beforeMethod() throws SirixException {
Databases.truncateDatabase(DB_CONFIG);
Databases.createDatabase(DB_CONFIG);
TestHelper.createTestDocument();
mHolder = Holder.generateRtx();
final Processor proc = new Processor(false);
final Configuration config = proc.getUnderlyingConfiguration();
node = new DocumentWrapper(mHolder.getSession(), config).getNodeWrapper();
}
@After
public void afterMethod() throws SirixException {
mHolder.close();
}
@Test
public void testAtomize() throws Exception {
final Value value = node.atomize();
assertEquals(true, value instanceof UntypedAtomicValue);
assertEquals("oops1foooops2baroops3", value.getStringValue());
}
@Test
public void testCompareOrder() throws XPathException, SirixException {
final Processor proc = new Processor(false);
final Configuration config = proc.getUnderlyingConfiguration();
final Session session = generateSession();
final NodeWriteTrx trx = session.beginNodeWriteTrx();
trx.commit();
trx.close();
// Not the same document.
NodeInfo node = new DocumentWrapper(session, config);
NodeInfo other = new NodeWrapper(new DocumentWrapper(mHolder.getSession(),
config), 3);
try {
node.compareOrder(other);
fail();
} catch (final IllegalStateException e) {
}
// Before.
node = new DocumentWrapper(mHolder.getSession(), config);
other = new NodeWrapper(new DocumentWrapper(mHolder.getSession(), config),
3);
assertEquals(-1, node.compareOrder(other));
// After.
node = new NodeWrapper(new DocumentWrapper(mHolder.getSession(), config), 3);
other = new NodeWrapper(new DocumentWrapper(mHolder.getSession(), config),
0);
assertEquals(1, node.compareOrder(other));
// Same.
node = new NodeWrapper(new DocumentWrapper(mHolder.getSession(), config), 3);
other = new NodeWrapper(new DocumentWrapper(mHolder.getSession(), config),
3);
assertEquals(0, node.compareOrder(other));
session.close();
mDatabase.close();
}
@Test
public void testGetAttributeValue() throws SirixException {
final Processor proc = new Processor(false);
node = new NodeWrapper(new DocumentWrapper(mHolder.getSession(),
proc.getUnderlyingConfiguration()), 1);
final AxisIterator iterator = node.iterateAxis(Axis.ATTRIBUTE);
final NodeInfo attribute = (NodeInfo) iterator.next();
node.getNamePool().allocate(attribute.getPrefix(), attribute.getURI(),
attribute.getLocalPart());
// Only supported on element nodes.
// node = (NodeWrapper) node.getParent();
assertEquals("j", node.getAttributeValue(attribute.getFingerprint()));
}
@Test
public void testGetBaseURI() throws Exception {
// Test with xml:base specified.
final File source = new File("src" + File.separator + "test"
+ File.separator + "resources" + File.separator + "data"
+ File.separator + "testBaseURI.xml");
final Session session = generateSession();
final NodeWriteTrx wtx = session.beginNodeWriteTrx();
final XMLEventReader reader = XMLShredder.createFileReader(source);
final XMLShredder shredder = new XMLShredder.Builder(wtx, reader,
Insert.ASFIRSTCHILD).commitAfterwards().build();
shredder.call();
wtx.close();
final Processor proc = new Processor(false);
final NodeInfo doc = new DocumentWrapper(session,
proc.getUnderlyingConfiguration());
doc.getNamePool().allocate("xml", "http://www.w3.org/XML/1998/namespace",
"base");
doc.getNamePool().allocate("", "", "baz");
final NameTest test = new NameTest(Type.ELEMENT, "", "baz",
doc.getNamePool());
final AxisIterator iterator = doc.iterateAxis(Axis.DESCENDANT, test);
final NodeInfo baz = (NodeInfo) iterator.next();
assertEquals("http://example.org", baz.getBaseURI());
session.close();
mDatabase.close();
}
@Test
public void testGetDeclaredNamespaces() {
// Namespace declared.
final AxisIterator iterator = node.iterateAxis(Axis.CHILD);
node = (NodeWrapper) iterator.next();
final int[] namespaces = node.getDeclaredNamespaces(new int[1]);
node.getNamePool().allocateNamespaceCode("p", "ns");
final int expected = node.getNamePool().getNamespaceCode("p", "ns");
assertEquals(expected, namespaces[0]);
// Namespace not declared (on element node) -- returns zero length
// array.
final AxisIterator iter = node.iterateAxis(Axis.DESCENDANT);
node = (NodeWrapper) iter.next();
node = (NodeWrapper) iter.next();
final int[] namesp = node.getDeclaredNamespaces(new int[1]);
assertTrue(namesp.length == 0);
// Namespace nod declared on other nodes -- return null.
final AxisIterator it = node.iterateAxis(Axis.DESCENDANT);
node = (NodeWrapper) it.next();
assertNull(node.getDeclaredNamespaces(new int[1]));
}
@Test
public void testGetStringValueCS() {
// Test on document node.
assertEquals("oops1foooops2baroops3", node.getStringValueCS());
// Test on element node.
AxisIterator iterator = node.iterateAxis(Axis.DESCENDANT);
node = (NodeWrapper) iterator.next();
assertEquals("oops1foooops2baroops3", node.getStringValueCS());
// Test on namespace node.
iterator = node.iterateAxis(Axis.NAMESPACE);
NamespaceNodeImpl namespace = (NamespaceNodeImpl) iterator.next();
/*
* Elements have always the default xml:NamespaceConstant.XML namespace, so
* we have to search if "ns" is found somewhere in the iterator (order
* unpredictable because it's implemented with a HashMap internally).
*/
while (!"ns".equals(namespace.getStringValueCS()) && namespace != null) {
namespace = (NamespaceNodeImpl) iterator.next();
}
if (namespace == null) {
fail("namespace is null!");
} else {
assertEquals("ns", namespace.getStringValueCS());
}
// Test on attribute node.
final NodeWrapper attrib = (NodeWrapper) node.iterateAxis(Axis.ATTRIBUTE)
.next();
assertEquals("j", attrib.getStringValueCS());
// Test on text node.
final NodeWrapper text = (NodeWrapper) node.iterateAxis(Axis.CHILD).next();
assertEquals("oops1", text.getStringValueCS());
}
@Test
public void testGetSiblingPosition() {
// Test every node in test document.
final AxisIterator iterator = node.iterateAxis(Axis.DESCENDANT);
node = (NodeWrapper) iterator.next();
node = (NodeWrapper) iterator.next();
assertEquals(0, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(1, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(0, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(1, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(2, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(3, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(0, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(1, node.getSiblingPosition());
node = (NodeWrapper) iterator.next();
assertEquals(4, node.getSiblingPosition());
}
@Ignore
public Session generateSession() throws SirixException {
final DatabaseConfiguration dbConfig = new DatabaseConfiguration(
TestHelper.PATHS.PATH2.getFile());
Databases.truncateDatabase(dbConfig);
Databases.createDatabase(dbConfig);
mDatabase = Databases.openDatabase(dbConfig.getFile());
mDatabase.createResource(new ResourceConfiguration.Builder(
TestHelper.RESOURCE, dbConfig).build());
return mDatabase.getSession(new SessionConfiguration.Builder(
TestHelper.RESOURCE).build());
}
}
| 3,596 |
1,145 | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SRC_RESOURCES_MEMORY_RESOURCE_HANDLER_H_
#define SRC_RESOURCES_MEMORY_RESOURCE_HANDLER_H_
#include <memory>
#include <string>
using ::std::string;
#include "base/macros.h"
#include "system_api/kernel_api.h"
#include "lmctfy/controllers/memory_controller.h"
#include "lmctfy/resources/cgroup_resource_handler.h"
#include "include/lmctfy.h"
#include "util/task/statusor.h"
namespace containers {
namespace lmctfy {
class CgroupFactory;
class ContainerSpec;
class ContainerStats;
class EventFdNotifications;
class ResourceHandler;
typedef ::system_api::KernelAPI KernelApi;
// Factory for MemoryResourceHandlers.
//
// Memory has a 1:1 mapping from container name to cgroup hierarchy.
//
// Class is thread-safe.
class MemoryResourceHandlerFactory : public CgroupResourceHandlerFactory {
public:
// Create an instance of this factory. If the resource is not supported on
// this machine a NOT_FOUND error is returned. Does not take ownership of
// any argument.
static ::util::StatusOr<MemoryResourceHandlerFactory *> New(
CgroupFactory *cgroup_factory, const KernelApi *kernel,
EventFdNotifications *eventfd_notifications);
// Takes ownership of memory_controller_factory. Does not own cgroup_factory
// or kernel.
MemoryResourceHandlerFactory(
const MemoryControllerFactory *memory_controller_factory,
CgroupFactory *cgroup_factory,
const KernelApi *kernel);
virtual ~MemoryResourceHandlerFactory() {}
protected:
virtual ::util::StatusOr<ResourceHandler *> GetResourceHandler(
const string &container_name) const;
virtual ::util::StatusOr<ResourceHandler *> CreateResourceHandler(
const string &container_name, const ContainerSpec &spec) const;
private:
// Controller factory for memory cgroup controllers.
const ::std::unique_ptr<const MemoryControllerFactory>
memory_controller_factory_;
friend class MemoryResourceHandlerFactoryTest;
DISALLOW_COPY_AND_ASSIGN(MemoryResourceHandlerFactory);
};
// Resource handler for memory. Currently only does simple memory management
// used for subcontainers and only uses the memory cgroup hierarchy.
//
// Class is thread-safe.
class MemoryResourceHandler : public CgroupResourceHandler {
public:
// Does not own kernel. Takes ownership of memory_controller.
MemoryResourceHandler(
const string &container_name,
const KernelApi *kernel,
MemoryController *memory_controller);
virtual ~MemoryResourceHandler() {}
virtual ::util::Status CreateOnlySetup(const ContainerSpec &spec);
virtual ::util::Status Update(const ContainerSpec &spec,
Container::UpdatePolicy policy);
virtual ::util::Status Stats(Container::StatsType type,
ContainerStats *output) const;
virtual ::util::Status Spec(ContainerSpec *spec) const;
virtual ::util::StatusOr<Container::NotificationId> RegisterNotification(
const EventSpec &spec, Callback1< ::util::Status> *callback);
private:
::util::Status SetDirty(const MemorySpec_Dirty &dirty,
Container::UpdatePolicy policy);
// Gets the dirty memory spec from the kernel and updates 'memory_spec'.
::util::Status GetDirtyMemorySpec(MemorySpec *memory_spec) const;
// The Memory cgroup controller, it is owned by controllers.
MemoryController *memory_controller_;
DISALLOW_COPY_AND_ASSIGN(MemoryResourceHandler);
};
} // namespace lmctfy
} // namespace containers
#endif // SRC_RESOURCES_MEMORY_RESOURCE_HANDLER_H_
| 1,258 |
3,212 | <filename>nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-security/src/main/java/org/apache/nifi/web/security/anonymous/NiFiAnonymousAuthenticationProvider.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.web.security.anonymous;
import org.apache.nifi.authorization.Authorizer;
import org.apache.nifi.authorization.user.NiFiUserDetails;
import org.apache.nifi.authorization.user.StandardNiFiUser;
import org.apache.nifi.util.NiFiProperties;
import org.apache.nifi.web.security.InvalidAuthenticationException;
import org.apache.nifi.web.security.NiFiAuthenticationProvider;
import org.apache.nifi.web.security.token.NiFiAuthenticationToken;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.AuthenticationException;
/**
*
*/
public class NiFiAnonymousAuthenticationProvider extends NiFiAuthenticationProvider {
final NiFiProperties properties;
public NiFiAnonymousAuthenticationProvider(NiFiProperties nifiProperties, Authorizer authorizer) {
super(nifiProperties, authorizer);
this.properties = nifiProperties;
}
@Override
public Authentication authenticate(Authentication authentication) throws AuthenticationException {
final NiFiAnonymousAuthenticationRequestToken request = (NiFiAnonymousAuthenticationRequestToken) authentication;
if (request.isSecureRequest() && !properties.isAnonymousAuthenticationAllowed()) {
throw new InvalidAuthenticationException("Anonymous authentication has not been configured.");
}
return new NiFiAuthenticationToken(new NiFiUserDetails(StandardNiFiUser.populateAnonymousUser(null, request.getClientAddress())));
}
@Override
public boolean supports(Class<?> authentication) {
return NiFiAnonymousAuthenticationRequestToken.class.isAssignableFrom(authentication);
}
}
| 750 |
441 | <filename>Source/Urho3D/Glow/LightmapUVGenerator.cpp
//
// Copyright (c) 2017-2020 the rbfx project.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#include "../Glow/LightmapUVGenerator.h"
#include <xatlas.h>
namespace Urho3D
{
const ea::string LightmapUVGenerationSettings::LightmapSizeKey{ "LightmapSize" };
const ea::string LightmapUVGenerationSettings::LightmapDensityKey{ "LightmapDensity" };
const ea::string LightmapUVGenerationSettings::LightmapSharedUV{ "LightmapSharedUV" };
bool GenerateLightmapUV(ModelView& modelView, const LightmapUVGenerationSettings& settings)
{
// Create atlas
const auto releaseAtlas = [](xatlas::Atlas* atlas)
{
xatlas::Destroy(atlas);
};
ea::unique_ptr<xatlas::Atlas, decltype(releaseAtlas)> atlas(xatlas::Create(), releaseAtlas);
// Fill input mesh
// TODO: Do something more clever about connectivity
auto& sourceGeometries = modelView.GetGeometries();
ea::vector<ea::pair<unsigned, unsigned>> meshToGeometryLodMapping;
unsigned geometryIndex = 0;
for (const GeometryView& geometryView : sourceGeometries)
{
unsigned lodIndex = 0;
for (const GeometryLODView& geometryLodView : geometryView.lods_)
{
if (!geometryLodView.vertices_.empty())
{
xatlas::MeshDecl meshDecl;
meshDecl.vertexCount = geometryLodView.vertices_.size();
meshDecl.vertexPositionData = &geometryLodView.vertices_[0].position_;
meshDecl.vertexPositionStride = sizeof(ModelVertex);
meshDecl.vertexNormalData = &geometryLodView.vertices_[0].normal_;
meshDecl.vertexNormalStride = sizeof(ModelVertex);
meshDecl.indexData = geometryLodView.indices_.data();
meshDecl.indexCount = geometryLodView.indices_.size();
meshDecl.indexFormat = xatlas::IndexFormat::UInt32;
const xatlas::AddMeshError::Enum error = xatlas::AddMesh(atlas.get(), meshDecl);
if (error != xatlas::AddMeshError::Success)
return false;
meshToGeometryLodMapping.emplace_back(geometryIndex, lodIndex);
}
++lodIndex;
}
++geometryIndex;
}
// Generate things
xatlas::PackOptions packOptions;
packOptions.padding = 1;
packOptions.texelsPerUnit = settings.texelPerUnit_;
xatlas::AddMeshJoin(atlas.get());
xatlas::Generate(atlas.get(), {}, nullptr, packOptions);
// Copy output
const IntVector2 atlasSize{ static_cast<int>(atlas->width), static_cast<int>(atlas->height) };
const float uScale = 1.f / atlas->width;
const float vScale = 1.f / atlas->height;
for (unsigned meshIndex = 0; meshIndex < atlas->meshCount; ++meshIndex)
{
const unsigned geometryIndex = meshToGeometryLodMapping[meshIndex].first;
const unsigned lodIndex = meshToGeometryLodMapping[meshIndex].second;
GeometryLODView& geometryLodView = sourceGeometries[geometryIndex].lods_[lodIndex];
const xatlas::Mesh& mesh = atlas->meshes[meshIndex];
ea::vector<ModelVertex> newVertices;
for (unsigned vertexIndex = 0; vertexIndex < mesh.vertexCount; ++vertexIndex)
{
const xatlas::Vertex& vertex = mesh.vertexArray[vertexIndex];
ModelVertex newVertex = geometryLodView.vertices_[vertex.xref];
newVertex.uv_[settings.uvChannel_].x_ = uScale * vertex.uv[0];
newVertex.uv_[settings.uvChannel_].y_ = vScale * vertex.uv[1];
newVertices.push_back(newVertex);
}
ea::vector<unsigned> newIndices;
newIndices.assign(mesh.indexArray, mesh.indexArray + mesh.indexCount);
geometryLodView.vertices_ = ea::move(newVertices);
geometryLodView.indices_ = ea::move(newIndices);
}
// Finalize
ModelVertexFormat vertexFormat = modelView.GetVertexFormat();
vertexFormat.uv_[settings.uvChannel_] = TYPE_VECTOR2;
modelView.SetVertexFormat(vertexFormat);
modelView.AddMetadata(LightmapUVGenerationSettings::LightmapSizeKey, atlasSize);
modelView.AddMetadata(LightmapUVGenerationSettings::LightmapDensityKey, settings.texelPerUnit_);
modelView.AddMetadata(LightmapUVGenerationSettings::LightmapSharedUV, false);
return true;
}
}
| 2,053 |
1,008 | /*
* Copyright (C) 2019 The Turms Project
* https://github.com/turms-im/turms
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package im.turms.service.workflow.access.http.codec;
import org.reactivestreams.Publisher;
import org.springframework.core.ResolvableType;
import org.springframework.core.codec.AbstractEncoder;
import org.springframework.core.codec.CharSequenceEncoder;
import org.springframework.core.codec.EncodingException;
import org.springframework.core.codec.Hints;
import org.springframework.core.io.buffer.DataBuffer;
import org.springframework.core.io.buffer.DataBufferFactory;
import org.springframework.core.io.buffer.DataBufferUtils;
import org.springframework.core.log.LogFormatUtils;
import org.springframework.lang.Nullable;
import org.springframework.util.MimeType;
import reactor.core.publisher.Flux;
import java.nio.charset.Charset;
import java.nio.charset.CoderMalfunctionError;
import java.nio.charset.StandardCharsets;
import java.util.Map;
/**
* @author <NAME>
* @implNote We don't use {@link CharSequenceEncoder} because it will multiply the actual size of text by 3 for UTF-8,
* and it's disaster for system. e.g. If the size of text output of Prometheus metrics is actually 6000,
* {@link CharSequenceEncoder} will request a ByteBuf of the size of 18000
*/
public final class TurmsCharSequenceEncoder extends AbstractEncoder<CharSequence> {
public static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
public TurmsCharSequenceEncoder(MimeType... mimeTypes) {
super(mimeTypes);
}
@Override
public boolean canEncode(ResolvableType elementType, @Nullable MimeType mimeType) {
Class<?> clazz = elementType.toClass();
return super.canEncode(elementType, mimeType) && CharSequence.class.isAssignableFrom(clazz);
}
@Override
public Flux<DataBuffer> encode(Publisher<? extends CharSequence> inputStream,
DataBufferFactory bufferFactory, ResolvableType elementType,
@Nullable MimeType mimeType, @Nullable Map<String, Object> hints) {
return Flux.from(inputStream).map(charSequence ->
encodeValue(charSequence, bufferFactory, elementType, mimeType, hints));
}
@Override
public DataBuffer encodeValue(CharSequence charSequence, DataBufferFactory bufferFactory,
ResolvableType valueType, @Nullable MimeType mimeType, @Nullable Map<String, Object> hints) {
if (!Hints.isLoggingSuppressed(hints)) {
LogFormatUtils.traceDebug(logger, traceOn -> {
String formatted = LogFormatUtils.formatValue(charSequence, !traceOn);
return Hints.getLogPrefix(hints) + "Writing " + formatted;
});
}
boolean release = true;
Charset charset = getCharset(mimeType);
DataBuffer dataBuffer = bufferFactory.allocateBuffer(charSequence.length());
try {
dataBuffer.write(charSequence, charset);
release = false;
} catch (CoderMalfunctionError ex) {
throw new EncodingException("String encoding error: " + ex.getMessage(), ex);
} finally {
if (release) {
DataBufferUtils.release(dataBuffer);
}
}
return dataBuffer;
}
private Charset getCharset(@Nullable MimeType mimeType) {
if (mimeType != null && mimeType.getCharset() != null) {
return mimeType.getCharset();
} else {
return DEFAULT_CHARSET;
}
}
} | 1,536 |
353 | <filename>ceph_deploy/tests/unit/util/test_system.py<gh_stars>100-1000
from mock import Mock
from pytest import raises
from ceph_deploy.util import system
from ceph_deploy import exc
class TestExecutablePath(object):
def test_returns_path(self):
fake_conn = Mock()
fake_conn.remote_module.which = Mock(return_value='/path')
result = system.executable_path(fake_conn, 'foo')
assert result == '/path'
def test_cannot_find_executable(self):
fake_conn = Mock()
fake_conn.remote_module.which = Mock(return_value=None)
with raises(exc.ExecutableNotFound):
system.executable_path(fake_conn, 'foo')
class TestIsUpstart(object):
def test_it_is_actually_systemd(self):
fake_conn = Mock()
fake_conn.remote_module.grep = Mock(return_value=True)
result = system.is_upstart(fake_conn)
assert result is False
def test_no_initctl(self):
fake_conn = Mock()
fake_conn.remote_module.grep = Mock(return_value=False)
fake_conn.remote_module.which = Mock(return_value=None)
result = system.is_upstart(fake_conn)
assert result is False
def test_initctl_version_says_upstart(self, monkeypatch):
fake_conn = Mock()
fake_conn.remote_module.grep = Mock(return_value=False)
fake_conn.remote_module.which = Mock(return_value='/bin/initctl')
fake_stdout = ([b'init', b'(upstart 1.12.1)'], [], 0)
fake_check = Mock(return_value=fake_stdout)
monkeypatch.setattr("ceph_deploy.util.system.remoto.process.check", lambda *a: fake_check())
result = system.is_upstart(fake_conn)
assert result is True
def test_initctl_version_says_something_else(self, monkeypatch):
fake_conn = Mock()
fake_conn.remote_module.grep = Mock(return_value=False)
fake_conn.remote_module.which = Mock(return_value='/bin/initctl')
fake_stdout = ([b'nosh', b'version', b'1.14'], [], 0)
fake_check = Mock(return_value=fake_stdout)
monkeypatch.setattr("ceph_deploy.util.system.remoto.process.check", lambda *a: fake_check())
result = system.is_upstart(fake_conn)
assert result is False
| 928 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-6gh6-8c4x-5346",
"modified": "2022-05-13T01:43:47Z",
"published": "2022-05-13T01:43:47Z",
"aliases": [
"CVE-2017-15536"
],
"details": "An issue was discovered in Cloudera Data Science Workbench (CDSW) 1.x before 1.2.0. Several web application vulnerabilities allow malicious authenticated users of CDSW to escalate privileges in CDSW. CDSW users can exploit these vulnerabilities in combination to gain root access to CDSW nodes, gain access to the CDSW database which includes Kerberos keytabs of CDSW users and bcrypt hashed passwords, and gain access to other privileged information such as session tokens, invitation tokens, and environment variables.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2017-15536"
},
{
"type": "WEB",
"url": "https://www.cloudera.com/documentation/other/security-bulletins/topics/Security-Bulletin.html#tsb_248"
}
],
"database_specific": {
"cwe_ids": [
"CWE-269"
],
"severity": "HIGH",
"github_reviewed": false
}
} | 515 |
1,219 | import numpy as np
from sklearn.grid_search import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.utils.testing import assert_array_almost_equal
from splearn.grid_search import SparkGridSearchCV
from splearn.naive_bayes import SparkMultinomialNB
from splearn.utils.testing import SplearnTestCase
class TestGridSearchCV(SplearnTestCase):
def test_same_result(self):
X, y, Z = self.make_classification(2, 40000, nonnegative=True)
parameters = {'alpha': [0.1, 1, 10]}
fit_params = {'classes': np.unique(y)}
local_estimator = MultinomialNB()
local_grid = GridSearchCV(estimator=local_estimator,
param_grid=parameters)
estimator = SparkMultinomialNB()
grid = SparkGridSearchCV(estimator=estimator,
param_grid=parameters,
fit_params=fit_params)
local_grid.fit(X, y)
grid.fit(Z)
locscores = [r.mean_validation_score for r in local_grid.grid_scores_]
scores = [r.mean_validation_score for r in grid.grid_scores_]
assert_array_almost_equal(locscores, scores, decimal=2)
| 524 |
348 | {"nom":"Beaucaire","circ":"2ème circonscription","dpt":"Gers","inscrits":225,"abs":114,"votants":111,"blancs":5,"nuls":3,"exp":103,"res":[{"nuance":"REM","nom":"<NAME>","voix":56},{"nuance":"SOC","nom":"<NAME>","voix":47}]} | 90 |
1,104 | <gh_stars>1000+
package org.apache.solr.handler.admin;
import java.io.BufferedReader;
import java.io.File;
import java.io.InputStreamReader;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* linux 下cpu 内存 磁盘 jvm的使用监控
*
* @author peng,.chen
*
*/
public class LinuxOsInfo {
/**
* group1:use cpu used(%) <br>
* group2:system cpu used(%)
*/
static final String cpuRegex = "Cpu\\(s\\):\\s+(\\d+\\.\\d+)%us,\\s+(\\d+\\.\\d+)%sy.*";
public static final Pattern cpuPattern = Pattern.compile(cpuRegex);
/**
* group1: total mem(K)<br>
* group2: used mem(K)<br>
* group3: free mem(K)<br>
*/
static final String memRegex = "Mem:\\s+(\\d+)k\\s+total,\\s+(\\d+)k\\s+used,\\s+(\\d+)k\\s+free,.*";
public static final Pattern memPattern = Pattern.compile(memRegex);
public static LinuxOsInfo INSTANCE = null;
/**
*
* @creation 2012-1-16 下午12:03:15
* @return
*/
public static LinuxOsInfo getInstance(){
if(INSTANCE == null){
return new LinuxOsInfo();
}
return INSTANCE;
}
private LinuxOsInfo() {
}
/**
*
* @creation 2012-1-16 下午12:55:07
*/
public static Info getInfo(){
Info info = new Info();
double[] cpuData = new double[]{0, 0};
try {
cpuData = getInstance().getCpuData();
} catch (Exception e) {
}
info.setCpuUsed(cpuData[0]);
double diskFree = getInstance().getDiskFree();
info.setDiskFree(diskFree);
double diskTotal = getInstance().getDiskTotal();
info.setDiskTotal(diskTotal);
double[] memData = new double[]{0, 0, 0};
try {
memData = getInstance().getMemData();
} catch (Exception e) {
e.printStackTrace();
}
info.setMemUsed(memData[1]);
info.setMemTotal(memData[0]);
info.setPath(new File("/").getAbsolutePath());
return info;
}
/**
* 获取cpu使用情况, 返回double数组{usCpu, syCpu}
*
* @return
* @throws Exception
*/
public double[] getCpuData() throws Exception {
double[] data = null;
if(!isLinuxOs()){
return new double[0];
}
Runtime rt = Runtime.getRuntime();
Process p = rt.exec("top -b -n 1");// 调用系统的“top"命令
BufferedReader in = null;
try {
in = new BufferedReader(new InputStreamReader(p.getInputStream()));
String str = null;
while ((str = in.readLine()) != null) {
Matcher matcher = cpuPattern.matcher(str);
if(matcher.find()){
String usCpu = matcher.group(1);
String syCpu = matcher.group(2);
data = new double[]{Double.parseDouble(usCpu), Double.parseDouble(syCpu)};
break;
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
in.close();
}
return data;
}
/**
* 内存监控, 返回double数组{totalMem, usedMem, freeMem}
*
* @return
* @throws Exception
*/
public double[] getMemData() throws Exception {
double[] data = null;
if(!isLinuxOs()){
return new double[0];
}
Runtime rt = Runtime.getRuntime();
Process p = rt.exec("top -b -n 1");// 调用系统的“top"命令
BufferedReader in = null;
try {
in = new BufferedReader(new InputStreamReader(p.getInputStream()));
String str = null;
while ((str = in.readLine()) != null) {
Matcher matcher = memPattern.matcher(str);
if(matcher.find()){
String totalMem = matcher.group(1);
String usedMem = matcher.group(2);
String freeMem = matcher.group(3);
data = new double[]{Double.parseDouble(totalMem), Double.parseDouble(usedMem), Double.parseDouble(freeMem)};
// System.out.println("totalMem:"+totalMem+", usedMem:"+usedMem+", freeMem:"+freeMem);
break;
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
in.close();
}
return data;
}
/**
* 获得当前磁盘总容量
* @creation 2012-1-16 下午12:06:22
* @return
*/
public double getDiskTotal(){
return new File("/").getTotalSpace();
}
/**
* 获得剩余空间容量
* @creation 2012-1-16 下午12:06:54
* @return
*/
public double getDiskFree(){
return new File("/").getFreeSpace();
}
/**
* 获取磁盘空间大小
*
* @return
* @throws Exception
*/
public double getDeskUsage() throws Exception {
double totalHD = 0;
double usedHD = 0;
Runtime rt = Runtime.getRuntime();
Process p = rt.exec("df -hl");// df -hl 查看硬盘空间
BufferedReader in = null;
try {
in = new BufferedReader(new InputStreamReader(p.getInputStream()));
String str = null;
String[] strArray = null;
int flag = 0;
while ((str = in.readLine()) != null) {
int m = 0;
// if (flag > 0) {
// flag++;
strArray = str.split(" ");
for (String tmp : strArray) {
if (tmp.trim().length() == 0)
continue;
++m;
// System.out.println("----tmp----" + tmp);
if (tmp.indexOf("G") != -1) {
if (m == 2) {
// System.out.println("---G----" + tmp);
if (!tmp.equals("") && !tmp.equals("0"))
totalHD += Double.parseDouble(tmp.substring(0,
tmp.length() - 1)) * 1024;
}
if (m == 3) {
// System.out.println("---G----" + tmp);
if (!tmp.equals("none") && !tmp.equals("0"))
usedHD += Double.parseDouble(tmp.substring(0,
tmp.length() - 1)) * 1024;
}
}
if (tmp.indexOf("M") != -1) {
if (m == 2) {
// System.out.println("---M---" + tmp);
if (!tmp.equals("") && !tmp.equals("0"))
totalHD += Double.parseDouble(tmp.substring(0,
tmp.length() - 1));
}
if (m == 3) {
// System.out.println("---M---" + tmp);
if (!tmp.equals("none") && !tmp.equals("0"))
usedHD += Double.parseDouble(tmp.substring(0,
tmp.length() - 1));
// System.out.println("----3----" + usedHD);
}
}
}
// }
}
} catch (Exception e) {
e.printStackTrace();
} finally {
in.close();
}
return (usedHD / totalHD) * 100;
}
/**
* 是否是linux OS
* @creation 2012-5-11 下午5:01:19
* @return
*/
public boolean isLinuxOs(){
String os = System.getProperty("os.name");
if(os.contains("Linux")){
return true;
}
return false;
}
/**
*
* @creation 2012-1-16 下午12:04:21
* @param args
*/
public static void main1(String[] args){
System.out.println(Runtime.getRuntime().maxMemory());;//Returns the maximum amount of memory that the Java virtual machine will attempt to use.
System.out.println(Runtime.getRuntime().totalMemory());;//Returns the total amount of memory in the Java virtual machine.
System.out.println(Runtime.getRuntime().freeMemory());//Returns the amount of free memory in the Java Virtual Machine.
System.out.println(Runtime.getRuntime().availableProcessors());
// Runtime.getRuntime().removeShutdownHook(hook);
// Runtime.getRuntime().addShutdownHook(hook)
}
public static void main2(String[] args) throws Exception {
LinuxOsInfo cpu = new LinuxOsInfo();
// System.out.println("---------------cpu used:" + cpu.getCpuUsage() + "%");
// System.out.println("---------------mem used:" + cpu.getMemUsage() + "%");
// System.out.println("---------------HD used:" + cpu.getDeskUsage() + "%");
System.out.println("---------------disk free:" + cpu.getDiskFree());
System.out.println("---------------disk total:" + cpu.getDiskTotal());
System.out.println("------------jvm监控----------------------");
Runtime lRuntime = Runtime.getRuntime();
System.out.println("--------------Free Momery:" + lRuntime.freeMemory() + "K");
System.out.println("--------------Max Momery:" + lRuntime.maxMemory() + "K");
System.out.println("--------------Total Momery:" + lRuntime.totalMemory() + "K");
System.out.println("---------------Available Processors :" + lRuntime.availableProcessors());
}
public static void main(String[] args) {
Info info = LinuxOsInfo.getInfo();
System.out.println("cpuUsed : "+info.getCpuUsed());
System.out.println("diskFree : "+info.getDiskFree());
System.out.println("diskTotal : "+info.getDiskTotal());
System.out.println("memTotal : "+info.getMemTotal());
System.out.println("memUsed : "+info.getMemUsed());
System.out.println("currentPath : "+info.getPath());
System.out.println();
}
}
| 3,372 |
1,176 | <reponame>zuzhi/rssant
from typing import List, Dict, Any
import logging
import random
import ipaddress
import ssl
import socket
import asyncio
from urllib.parse import urlparse
from collections import defaultdict, OrderedDict
import yarl
import aiohttp
import requests.adapters
from rssant_config import CONFIG
from rssant_common import _proxy_helper
from .rss_proxy import RSSProxyClient, ProxyStrategy
from .helper import get_or_create_event_loop
LOG = logging.getLogger(__name__)
_cache_records_text = """
172.16.58.3 rsshub.app
172.16.58.3 rsshub.app
172.16.17.32 kindle4rss.com
172.16.17.32 feedmaker.kindle4rss.com
192.168.127.12 github.com
172.16.31.10 api.github.com
"""
def _read_records(text) -> dict:
records = defaultdict(set)
for line in text.strip().splitlines():
ip, host = line.split()
records[host].add(ip)
return records
_CACHE_RECORDS = _read_records(_cache_records_text)
class DNSError(Exception):
"""DNS Error"""
class PrivateAddressError(DNSError):
"""
Private IP address Error.
Prevent request private address, which will attack local network.
"""
class NameNotResolvedError(DNSError):
"""Name not resolved Error"""
def _is_public_ipv4(value):
try:
ip = ipaddress.ip_address(value)
except ipaddress.AddressValueError:
return False
return ip.version == 4 and (not ip.is_private)
class DNSService:
def __init__(self, client: RSSProxyClient, records: dict = None, allow_private_address: bool = False):
self.hosts = list(records or {})
self.update(records or {})
self.client = client
self.allow_private_address = allow_private_address
@staticmethod
def create(
*,
proxy_url: str = None,
rss_proxy_url: str = None,
rss_proxy_token: str = None,
allow_private_address: bool = False,
):
def proxy_strategy(url):
if 'google.com' in url:
return ProxyStrategy.PROXY_FIRST
else:
return ProxyStrategy.DIRECT_FIRST
_rss_proxy_client = RSSProxyClient(
proxy_url=proxy_url,
rss_proxy_url=rss_proxy_url,
rss_proxy_token=rss_proxy_token,
proxy_strategy=proxy_strategy,
)
service = DNSService(
client=_rss_proxy_client,
records=_CACHE_RECORDS,
allow_private_address=allow_private_address,
)
return service
def update(self, records: dict):
new_records = defaultdict(set)
for host, ip_set in records.items():
new_records[host].update(ip_set)
self.records = new_records
def is_resolved_host(self, host) -> bool:
return bool(self.records.get(host))
def is_resolved_url(self, url) -> bool:
host = urlparse(url).hostname
return self.is_resolved_host(host)
def _sync_resolve(self, host) -> list:
addrinfo = socket.getaddrinfo(host, None)
for family, __, __, __, sockaddr in addrinfo:
if family == socket.AF_INET:
ip, __ = sockaddr
yield ip
elif family == socket.AF_INET6:
ip, __, __, __ = sockaddr
yield ip
def _local_resolve(self, host) -> list:
ip_set = self.records.get(host)
return list(ip_set) if ip_set else []
def _select_ip(self, ip_set: list, *, host: str) -> list:
# Discard private and prefer ipv4
groups = OrderedDict([
((4, False), []),
((6, False), []),
((4, True), []),
((6, True), []),
])
for ip in ip_set:
ip = ipaddress.ip_address(ip)
key = (ip.version, ip.is_private)
if key not in groups:
LOG.error(f'unknown version IP {ip}')
continue
groups[key].append(str(ip))
public_s = groups[(4, False)] or groups[(6, False)]
if public_s:
return random.choice(public_s)
private_s = groups[(4, True)] or groups[(4, True)]
if self.allow_private_address:
if private_s:
return random.choice(private_s)
else:
if private_s:
raise PrivateAddressError(private_s[0])
raise NameNotResolvedError(host)
def resolve_urllib3(self, host) -> str:
ip_set = self._local_resolve(host)
if not ip_set:
ip_set = list(set(self._sync_resolve(host)))
LOG.debug('resolve_urllib3 %s to %s', host, ip_set)
ip = self._select_ip(ip_set, host=host)
return ip
def aiohttp_resolver(self, **kwargs):
return RssantAsyncResolver(dns_service=self, **kwargs)
def requests_http_adapter(self, **kwargs):
return RssantHttpAdapter(dns_service=self, **kwargs)
def refresh(self):
records = defaultdict(set)
for host, ip_set in self.query_from_cloudflare().items():
records[host].update(ip_set)
LOG.info('resolved from cloudflare: %r', dict(records))
if self.client.has_proxy:
for host, ip_set in self.query_from_google().items():
records[host].update(ip_set)
LOG.info('resolved from google: %r', dict(records))
records = self.validate_records(records)
LOG.info('refresh records: %r', dict(records))
self.update(records)
async def _verify_record_task(self, host, ip):
_NetworkErrors = (
socket.timeout, TimeoutError, asyncio.TimeoutError,
ssl.SSLError, ssl.CertificateError, ConnectionError,
)
try:
reader, writer = await asyncio.wait_for(asyncio.open_connection(
host=ip, port=443,
family=socket.AF_INET,
ssl=True,
server_hostname=host,
ssl_handshake_timeout=10,
), timeout=15)
except _NetworkErrors as ex:
LOG.info(f'verify_record host={host} ip={ip} {ex!r}')
return (host, ip, False)
try:
writer.close()
await writer.wait_closed()
except _NetworkErrors:
pass # ignore
return (host, ip, True)
async def _validate_records(self, records: dict):
valid_records = defaultdict(set)
tasks = []
for host, ip_set in records.items():
for ip in ip_set:
tasks.append(self._verify_record_task(host, ip))
for item in await asyncio.gather(*tasks):
host, ip, ok = item
if ok:
valid_records[host].add(ip)
return valid_records
def validate_records(self, records: dict) -> dict:
loop = get_or_create_event_loop()
valid_records = loop.run_until_complete(self._validate_records(records))
return valid_records
def query_from_dns_over_tls(self, url_template: str) -> dict:
headers = {'accept': 'application/dns-json'}
records = defaultdict(set)
for host in self.hosts:
url = url_template.format(name=host)
LOG.info(f'query {url}')
try:
response = self.client.request('GET', url, headers=headers)
response.raise_for_status()
except Exception as ex:
LOG.warning(f'{type(ex).__name__}: {ex}')
continue
for item in response.json()['Answer']:
if item['type'] == 1: # ipv4
ip = item['data']
if ip and _is_public_ipv4(ip):
records[host].add(ip)
return records
def query_from_cloudflare(self):
url_template = 'https://cloudflare-dns.com/dns-query?name={name}&type=A'
return self.query_from_dns_over_tls(url_template)
def query_from_google(self):
url_template = 'https://dns.google.com/resolve?name={name}&type=A'
return self.query_from_dns_over_tls(url_template)
class RssantAsyncResolver(aiohttp.AsyncResolver):
def __init__(self, *args, dns_service: DNSService, **kwargs):
self._dns_service = dns_service
super().__init__(*args, **kwargs)
async def _async_resolve(self, hostname) -> list:
hosts = await super().resolve(hostname, family=socket.AF_INET)
return list(set(item['host'] for item in hosts))
async def resolve(
self, host: str, port: int = 0,
family: int = socket.AF_INET
) -> List[Dict[str, Any]]:
ip_set = self._dns_service._local_resolve(host)
if not ip_set:
ip_set = await self._async_resolve(host)
LOG.debug('resolve_aiohttp %s to %s', host, ip_set)
ip = self._dns_service._select_ip(ip_set, host=host)
return [{
'hostname': host,
'host': ip, 'port': port,
'family': socket.AF_INET, 'proto': 0,
'flags': socket.AI_NUMERICHOST,
}]
class RssantHttpAdapter(requests.adapters.HTTPAdapter):
"""
https://stackoverflow.com/questions/22609385/python-requests-library-define-specific-dns
"""
def __init__(self, dns_service: DNSService, **kwargs):
self.dns_service = dns_service
super().__init__(**kwargs)
def send(self, request: requests.Request, **kwargs):
if kwargs.get('proxies'):
return super().send(request, **kwargs)
origin_request_url = request.url
parsed_url = yarl.URL(request.url)
hostname = parsed_url.raw_host
ip = self.dns_service.resolve_urllib3(hostname)
request.url = str(parsed_url.with_host(ip))
request.headers['Host'] = str(parsed_url.origin()).split('://', 1)[1]
connection_pool_kwargs = self.poolmanager.connection_pool_kw
if parsed_url.scheme == 'https':
connection_pool_kwargs['server_hostname'] = hostname
connection_pool_kwargs['assert_hostname'] = hostname
else:
connection_pool_kwargs.pop('server_hostname', None)
connection_pool_kwargs.pop('assert_hostname', None)
response: requests.Response = super().send(request, **kwargs)
response.url = request.url = origin_request_url
return response
def _setup():
_proxy_options = _proxy_helper.get_proxy_options()
service = DNSService.create(
**_proxy_options,
allow_private_address=CONFIG.allow_private_address,
)
return service
DNS_SERVICE = _setup()
| 4,913 |
456 | <filename>src/test/java/com/swingfrog/summer/test/ecsgameserver/module/team/TeamManager.java
package com.swingfrog.summer.test.ecsgameserver.module.team;
import com.swingfrog.summer.annotation.Autowired;
import com.swingfrog.summer.annotation.Component;
import com.swingfrog.summer.app.Summer;
import com.swingfrog.summer.ecs.entity.mananger.AbstractAsyncEntityManager;
import com.swingfrog.summer.test.ecsgameserver.infrastructure.ErrorCode;
@Component
public class TeamManager extends AbstractAsyncEntityManager<Long, Team> {
@Autowired
private TeamDataDao teamDataDao;
@Override
protected Team loadEntity(Long entityId) {
return new Team(entityId);
}
@Override
protected long expireTime() {
return 30 * 60 * 1000;
}
public boolean exist(long teamId) {
return teamDataDao.get(teamId) != null;
}
public void checkExist(long teamId) {
if (!exist(teamId))
throw Summer.createCodeException(ErrorCode.TEAM_NOT_EXIST.getCodeMsg());
}
public void addTeamData(TeamData teamData) {
teamDataDao.add(teamData);
}
}
| 416 |
628 | /*************************************************************************
* Copyright (c) 2014 <NAME>
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**************************************************************************/
#ifndef STRUTIL_H
#define STRUTIL_H
/*
<NAME>
Princeton University
strutil.h
Miscellaneous string-manipulation utilities
Usage:
std::string s("foo.bar");
std::string s2 = replace_ext(s, "baz"); // "foo.baz"
begins_with("Foobar", "foo") // true
ends_with("foobar", "baz") // false
*/
#include <string>
#include <cstring>
#ifdef _WIN32
# ifndef strncasecmp
# define strncasecmp _strnicmp
# endif
#endif
// Replace the extension of a filename, else add one if none present
static inline std::string replace_ext(const std::string &filename,
const std::string &ext)
{
std::string x = filename;
std::string::size_type dot = x.rfind(".", x.length());
if (dot != std::string::npos)
x.erase(dot);
return x + std::string(".") + ext;
}
// Does string s1 begin/end with s2? (Case-insensitive)
static inline bool begins_with(const char *s1, const char *s2)
{
using namespace std;
return !strncasecmp(s1, s2, strlen(s2));
}
static inline bool begins_with(const std::string &s1, const std::string &s2)
{
return begins_with(s1.c_str(), s2.c_str());
}
static inline bool ends_with(const char *s1, const char *s2)
{
using namespace std;
size_t l1 = strlen(s1), l2 = strlen(s2);
return (l1 >= l2) && !strncasecmp(s1 + l1 - l2, s2, l2);
}
static inline bool ends_with(const std::string &s1, const std::string &s2)
{
return ends_with(s1.c_str(), s2.c_str());
}
#endif
| 709 |
1,968 | <filename>librtt/Rtt_LuaCoronaBaseLib.h<gh_stars>1000+
//////////////////////////////////////////////////////////////////////////////
//
// This file is part of the Corona game engine.
// For overview and more information on licensing please refer to README.md
// Home page: https://github.com/coronalabs/corona
// Contact: <EMAIL>
//
//////////////////////////////////////////////////////////////////////////////
#ifndef _Rtt_LuaCoronaBaseLib_H__
#define _Rtt_LuaCoronaBaseLib_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "lua.h"
int luaopen_coronabaselib(lua_State *L);
#ifdef __cplusplus
}
#endif
#endif
| 194 |
335 | <filename>N/Noddy_noun.json
{
"word": "Noddy",
"definitions": [
"A foolish person.",
"A tropical tern with mainly dark-coloured plumage.",
"A brief shot in a filmed interview in which the interviewer nods in agreement or acknowledgement."
],
"parts-of-speech": "Noun"
} | 117 |
866 | <filename>Stephanie/local_libs/pyzomato/core/requester.py
import requests
from Stephanie.local_libs.pyzomato.core.endpoint_manager import EndpointManager
class Requester(EndpointManager):
def __init__(self, API_KEY=""):
super().__init__()
self.API_KEY = API_KEY
self.headers = {'user-key': self.API_KEY}
self.r = object
def request(self, endpoint_name=None, endpoint_format=None, payload=None, raw=False, raw_url=None):
if payload is None:
payload = {}
if endpoint_format:
raw_url = self.endpoints[endpoint_name]
if isinstance(endpoint_format, str):
url = raw_url.format(endpoint_format)
else:
url = raw_url.format(*endpoint_format)
elif raw_url:
url = raw_url
else:
url = self.endpoints[endpoint_name]
self.r = requests.get(url, params=payload, headers=self.headers)
# self.check_for_exceptions(self.r)
if raw:
return self.r
return self.r.json()
@staticmethod
def check_for_exceptions(request):
status_code = request.status_code
if status_code != 200:
raise ConnectionError("The website couldn't be retrieved.")
| 576 |
965 | //sa is of type COleSafeArray with 2 dimensions
//Determine upper bounds for both dimensions
long lNumRows;
long lNumCols;
sa.GetUBound(1, &lNumRows);
sa.GetUBound(2, &lNumCols);
//Display the elements in the SAFEARRAY.
long index[2];
VARIANT val;
//Determine lower bounds for both dimensions
long lowRow, lowCol;
sa.GetLBound(1, &lowRow);
sa.GetLBound(2, &lowCol);
for (long r = lowRow; r <= lNumRows; r++)
{
for (long c = lowCol; c <= lNumCols; c++)
{
index[0] = r;
index[1] = c;
//retrieve each element of the safearray
sa.GetElement(index, &val);
switch (val.vt)
{
case VT_R8:
TRACE(_T("%1.2f\n"), val.dblVal);
break;
case VT_BSTR:
TRACE(_T("%s\n"), (CString)val.bstrVal);
break;
// other cases omitted
case VT_EMPTY:
TRACE(_T("<empty>\n"));
break;
}
}
} | 431 |
1,338 | /*
* Copyright 2011-2015, <NAME>, <EMAIL>. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include "MemoryView.h"
#include <algorithm>
#include <ctype.h>
#include <stdio.h>
#include <ByteOrder.h>
#include <Clipboard.h>
#include <Looper.h>
#include <MenuItem.h>
#include <MessageRunner.h>
#include <Messenger.h>
#include <PopUpMenu.h>
#include <Region.h>
#include <ScrollView.h>
#include <String.h>
#include "Architecture.h"
#include "AutoDeleter.h"
#include "MessageCodes.h"
#include "Team.h"
#include "TeamMemoryBlock.h"
enum {
MSG_TARGET_ADDRESS_CHANGED = 'mtac',
MSG_VIEW_AUTOSCROLL = 'mvas'
};
static const bigtime_t kScrollTimer = 10000LL;
MemoryView::MemoryView(::Team* team, Listener* listener)
:
BView("memoryView", B_WILL_DRAW | B_FRAME_EVENTS | B_NAVIGABLE
| B_SUBPIXEL_PRECISE),
fTeam(team),
fTargetBlock(NULL),
fEditableData(NULL),
fEditedOffsets(),
fTargetAddress(0LL),
fEditMode(false),
fEditLowNybble(false),
fCharWidth(0.0),
fLineHeight(0.0),
fTextCharsPerLine(0),
fHexBlocksPerLine(0),
fHexMode(HexMode8BitInt),
fTextMode(TextModeASCII),
fSelectionBase(0),
fSelectionStart(0),
fSelectionEnd(0),
fScrollRunner(NULL),
fTrackingMouse(false),
fListener(listener)
{
Architecture* architecture = team->GetArchitecture();
fTargetAddressSize = architecture->AddressSize() * 2;
fCurrentEndianMode = architecture->IsBigEndian()
? EndianModeBigEndian : EndianModeLittleEndian;
}
MemoryView::~MemoryView()
{
if (fTargetBlock != NULL)
fTargetBlock->ReleaseReference();
delete[] fEditableData;
}
/*static */ MemoryView*
MemoryView::Create(::Team* team, Listener* listener)
{
MemoryView* self = new MemoryView(team, listener);
try {
self->_Init();
} catch(...) {
delete self;
throw;
}
return self;
}
void
MemoryView::SetTargetAddress(TeamMemoryBlock* block, target_addr_t address)
{
fTargetAddress = address;
if (block != fTargetBlock) {
if (fTargetBlock != NULL)
fTargetBlock->ReleaseReference();
fTargetBlock = block;
if (block != NULL)
fTargetBlock->AcquireReference();
}
MakeFocus(true);
BMessenger(this).SendMessage(MSG_TARGET_ADDRESS_CHANGED);
}
void
MemoryView::UnsetListener()
{
fListener = NULL;
}
status_t
MemoryView::SetEditMode(bool enabled)
{
if (fTargetBlock == NULL)
return B_BAD_VALUE;
else if (fEditMode == enabled)
return B_OK;
if (enabled) {
status_t error = _SetupEditableData();
if (error != B_OK)
return error;
} else {
delete[] fEditableData;
fEditableData = NULL;
fEditedOffsets.clear();
fEditLowNybble = false;
}
fEditMode = enabled;
Invalidate();
return B_OK;
}
void
MemoryView::AttachedToWindow()
{
BView::AttachedToWindow();
SetViewUIColor(B_DOCUMENT_BACKGROUND_COLOR);
SetFont(be_fixed_font);
fCharWidth = be_fixed_font->StringWidth("a");
font_height fontHeight;
be_fixed_font->GetHeight(&fontHeight);
fLineHeight = ceilf(fontHeight.ascent + fontHeight.descent
+ fontHeight.leading);
}
void
MemoryView::Draw(BRect rect)
{
rect = Bounds();
float divider = (fTargetAddressSize + 1) * fCharWidth;
StrokeLine(BPoint(divider, rect.top),
BPoint(divider, rect.bottom));
if (fTargetBlock == NULL)
return;
uint32 hexBlockSize = _GetHexDigitsPerBlock() + 1;
uint32 blockByteSize = hexBlockSize / 2;
if (fHexMode != HexModeNone && fTextMode != TextModeNone) {
divider += (fHexBlocksPerLine * hexBlockSize + 1) * fCharWidth;
StrokeLine(BPoint(divider, rect.top),
BPoint(divider, rect.bottom));
}
char buffer[32];
char textbuffer[512];
const char* dataSource = (const char*)(fEditMode ? fEditableData
: fTargetBlock->Data());
int32 startLine = int32(rect.top / fLineHeight);
const char* currentAddress = dataSource + fHexBlocksPerLine
* blockByteSize * startLine;
const char* maxAddress = dataSource + fTargetBlock->Size();
const char* targetAddress = dataSource + fTargetAddress
- fTargetBlock->BaseAddress();
BPoint drawPoint(1.0, (startLine + 1) * fLineHeight);
int32 currentBlocksPerLine = fHexBlocksPerLine;
int32 currentCharsPerLine = fTextCharsPerLine;
font_height fh;
GetFontHeight(&fh);
target_addr_t lineAddress = fTargetBlock->BaseAddress() + startLine
* currentCharsPerLine;
bool highlightBlock = false;
rgb_color highlightColor;
for (; currentAddress < maxAddress && drawPoint.y < rect.bottom
+ fLineHeight; drawPoint.y += fLineHeight) {
drawPoint.x = 1.0;
snprintf(buffer, sizeof(buffer), "%0*" B_PRIx64,
(int)fTargetAddressSize, lineAddress);
PushState();
SetHighColor(tint_color(HighColor(), B_LIGHTEN_1_TINT));
DrawString(buffer, drawPoint);
drawPoint.x += fCharWidth * (fTargetAddressSize + 2);
PopState();
if (fHexMode != HexModeNone) {
if (currentAddress + (currentBlocksPerLine * blockByteSize)
> maxAddress) {
currentCharsPerLine = maxAddress - currentAddress;
currentBlocksPerLine = currentCharsPerLine
/ blockByteSize;
}
for (int32 j = 0; j < currentBlocksPerLine; j++) {
const char* blockAddress = currentAddress + (j
* blockByteSize);
_GetNextHexBlock(buffer, sizeof(buffer), blockAddress);
highlightBlock = false;
if (fEditMode)
{
int32 offset = blockAddress - dataSource;
for (uint32 i = 0; i < blockByteSize; i++) {
if (fEditedOffsets.count(offset + i) != 0) {
highlightBlock = true;
highlightColor.set_to(0, 216, 0);
break;
}
}
} else if (targetAddress >= blockAddress && targetAddress <
blockAddress + blockByteSize) {
highlightBlock = true;
highlightColor.set_to(216, 0, 0);
}
if (highlightBlock) {
PushState();
SetHighColor(highlightColor);
}
DrawString(buffer, drawPoint);
if (highlightBlock)
PopState();
drawPoint.x += fCharWidth * hexBlockSize;
}
if (currentBlocksPerLine < fHexBlocksPerLine)
drawPoint.x += fCharWidth * hexBlockSize
* (fHexBlocksPerLine - currentBlocksPerLine);
}
if (fTextMode != TextModeNone) {
drawPoint.x += fCharWidth;
for (int32 j = 0; j < currentCharsPerLine; j++) {
// filter non-printable characters
textbuffer[j] = currentAddress[j] > 32 ? currentAddress[j]
: '.';
}
textbuffer[fTextCharsPerLine] = '\0';
DrawString(textbuffer, drawPoint);
if (targetAddress >= currentAddress && targetAddress
< currentAddress + currentCharsPerLine) {
PushState();
SetHighColor(B_TRANSPARENT_COLOR);
SetDrawingMode(B_OP_INVERT);
uint32 blockAddress = uint32(targetAddress - currentAddress);
if (fHexMode != HexModeNone)
blockAddress &= ~(blockByteSize - 1);
float startX = drawPoint.x + fCharWidth * blockAddress;
float endX = startX;
if (fHexMode != HexModeNone)
endX += fCharWidth * ((hexBlockSize - 1) / 2);
else
endX += fCharWidth;
FillRect(BRect(startX, drawPoint.y - fh.ascent, endX,
drawPoint.y + fh.descent));
PopState();
}
}
if (currentBlocksPerLine > 0) {
currentAddress += currentBlocksPerLine * blockByteSize;
lineAddress += currentBlocksPerLine * blockByteSize;
} else {
currentAddress += fTextCharsPerLine;
lineAddress += fTextCharsPerLine;
}
}
if (fSelectionStart != fSelectionEnd) {
PushState();
BRegion selectionRegion;
_GetSelectionRegion(selectionRegion);
SetDrawingMode(B_OP_INVERT);
FillRegion(&selectionRegion, B_SOLID_HIGH);
PopState();
}
if (fEditMode) {
PushState();
BRect caretRect;
_GetEditCaretRect(caretRect);
SetDrawingMode(B_OP_INVERT);
FillRect(caretRect, B_SOLID_HIGH);
PopState();
}
}
void
MemoryView::FrameResized(float width, float height)
{
BView::FrameResized(width, height);
_RecalcScrollBars();
Invalidate();
}
void
MemoryView::KeyDown(const char* bytes, int32 numBytes)
{
bool handled = true;
if (fTargetBlock != NULL) {
target_addr_t newAddress = fTargetAddress;
target_addr_t maxAddress = fTargetBlock->BaseAddress()
+ fTargetBlock->Size() - 1;
int32 blockSize = 1;
if (fHexMode != HexModeNone)
blockSize = 1 << (fHexMode - 1);
int32 lineCount = int32(Bounds().Height() / fLineHeight);
switch(bytes[0]) {
case B_UP_ARROW:
{
newAddress -= blockSize * fHexBlocksPerLine;
break;
}
case B_DOWN_ARROW:
{
newAddress += blockSize * fHexBlocksPerLine;
break;
}
case B_LEFT_ARROW:
{
if (fEditMode) {
if (!fEditLowNybble)
newAddress--;
fEditLowNybble = !fEditLowNybble;
if (newAddress == fTargetAddress)
Invalidate();
} else
newAddress -= blockSize;
break;
}
case B_RIGHT_ARROW:
{
if (fEditMode) {
if (fEditLowNybble)
newAddress++;
fEditLowNybble = !fEditLowNybble;
if (newAddress == fTargetAddress)
Invalidate();
} else
newAddress += blockSize;
break;
}
case B_PAGE_UP:
{
newAddress -= (blockSize * fHexBlocksPerLine) * lineCount;
break;
}
case B_PAGE_DOWN:
{
newAddress += (blockSize * fHexBlocksPerLine) * lineCount;
break;
}
case B_HOME:
{
newAddress = fTargetBlock->BaseAddress();
fEditLowNybble = false;
break;
}
case B_END:
{
newAddress = maxAddress;
fEditLowNybble = true;
break;
}
default:
{
if (fEditMode && isxdigit(bytes[0]))
{
int value = 0;
if (isdigit(bytes[0]))
value = bytes[0] - '0';
else
value = (int)strtol(bytes, NULL, 16);
int32 byteOffset = fTargetAddress
- fTargetBlock->BaseAddress();
if (fEditLowNybble)
value = (fEditableData[byteOffset] & 0xf0) | value;
else {
value = (fEditableData[byteOffset] & 0x0f)
| (value << 4);
}
fEditableData[byteOffset] = value;
if (fEditableData[byteOffset]
!= fTargetBlock->Data()[byteOffset]) {
fEditedOffsets.insert(byteOffset);
} else
fEditedOffsets.erase(byteOffset);
if (fEditLowNybble) {
if (newAddress < maxAddress) {
newAddress++;
fEditLowNybble = false;
}
} else
fEditLowNybble = true;
Invalidate();
} else
handled = false;
break;
}
}
if (handled) {
if (newAddress < fTargetBlock->BaseAddress())
newAddress = fTargetAddress;
else if (newAddress > maxAddress)
newAddress = maxAddress;
if (newAddress != fTargetAddress) {
fTargetAddress = newAddress;
BMessenger(this).SendMessage(MSG_TARGET_ADDRESS_CHANGED);
}
}
} else
handled = false;
if (!handled)
BView::KeyDown(bytes, numBytes);
}
void
MemoryView::MakeFocus(bool isFocused)
{
BScrollView* parent = dynamic_cast<BScrollView*>(Parent());
if (parent != NULL)
parent->SetBorderHighlighted(isFocused);
BView::MakeFocus(isFocused);
}
void
MemoryView::MessageReceived(BMessage* message)
{
switch(message->what) {
case B_COPY:
{
_CopySelectionToClipboard();
break;
}
case MSG_TARGET_ADDRESS_CHANGED:
{
_RecalcScrollBars();
ScrollToSelection();
Invalidate();
if (fListener != NULL)
fListener->TargetAddressChanged(fTargetAddress);
break;
}
case MSG_SET_HEX_MODE:
{
// while editing, hex view changes are disallowed.
if (fEditMode)
break;
int32 mode;
if (message->FindInt32("mode", &mode) == B_OK) {
if (fHexMode == mode)
break;
fHexMode = mode;
_RecalcScrollBars();
Invalidate();
if (fListener != NULL)
fListener->HexModeChanged(mode);
}
break;
}
case MSG_SET_ENDIAN_MODE:
{
int32 mode;
if (message->FindInt32("mode", &mode) == B_OK) {
if (fCurrentEndianMode == mode)
break;
fCurrentEndianMode = mode;
Invalidate();
if (fListener != NULL)
fListener->EndianModeChanged(mode);
}
break;
}
case MSG_SET_TEXT_MODE:
{
int32 mode;
if (message->FindInt32("mode", &mode) == B_OK) {
if (fTextMode == mode)
break;
fTextMode = mode;
_RecalcScrollBars();
Invalidate();
if (fListener != NULL)
fListener->TextModeChanged(mode);
}
break;
}
case MSG_VIEW_AUTOSCROLL:
{
_HandleAutoScroll();
break;
}
default:
{
BView::MessageReceived(message);
break;
}
}
}
void
MemoryView::MouseDown(BPoint point)
{
if (!IsFocus())
MakeFocus(true);
if (fTargetBlock == NULL)
return;
int32 buttons;
if (Looper()->CurrentMessage()->FindInt32("buttons", &buttons) != B_OK)
buttons = B_PRIMARY_MOUSE_BUTTON;
if (buttons == B_SECONDARY_MOUSE_BUTTON) {
_HandleContextMenu(point);
return;
}
int32 offset = _GetOffsetAt(point);
if (offset < fSelectionStart || offset > fSelectionEnd) {
BRegion oldSelectionRegion;
_GetSelectionRegion(oldSelectionRegion);
fSelectionBase = offset;
fSelectionStart = fSelectionBase;
fSelectionEnd = fSelectionBase;
Invalidate(oldSelectionRegion.Frame());
}
SetMouseEventMask(B_POINTER_EVENTS, B_NO_POINTER_HISTORY);
fTrackingMouse = true;
}
void
MemoryView::MouseMoved(BPoint point, uint32 transit, const BMessage* message)
{
if (!fTrackingMouse)
return;
BRegion oldSelectionRegion;
_GetSelectionRegion(oldSelectionRegion);
int32 offset = _GetOffsetAt(point);
if (offset < fSelectionBase) {
fSelectionStart = offset;
fSelectionEnd = fSelectionBase;
} else {
fSelectionStart = fSelectionBase;
fSelectionEnd = offset;
}
BRegion region;
_GetSelectionRegion(region);
region.Include(&oldSelectionRegion);
Invalidate(region.Frame());
switch (transit) {
case B_EXITED_VIEW:
fScrollRunner = new BMessageRunner(BMessenger(this),
new BMessage(MSG_VIEW_AUTOSCROLL), kScrollTimer);
break;
case B_ENTERED_VIEW:
delete fScrollRunner;
fScrollRunner = NULL;
break;
default:
break;
}
}
void
MemoryView::MouseUp(BPoint point)
{
fTrackingMouse = false;
delete fScrollRunner;
fScrollRunner = NULL;
}
void
MemoryView::ScrollToSelection()
{
if (fTargetBlock != NULL) {
target_addr_t offset = fTargetAddress - fTargetBlock->BaseAddress();
int32 lineNumber = 0;
if (fHexBlocksPerLine > 0) {
lineNumber = offset / (fHexBlocksPerLine
* (_GetHexDigitsPerBlock() / 2));
} else if (fTextCharsPerLine > 0)
lineNumber = offset / fTextCharsPerLine;
float y = lineNumber * fLineHeight;
if (y < Bounds().top)
ScrollTo(0.0, y);
else if (y + fLineHeight > Bounds().bottom)
ScrollTo(0.0, y + fLineHeight - Bounds().Height());
}
}
void
MemoryView::TargetedByScrollView(BScrollView* scrollView)
{
BView::TargetedByScrollView(scrollView);
scrollView->ScrollBar(B_VERTICAL)->SetRange(0.0, 0.0);
}
BSize
MemoryView::MinSize()
{
return BSize(0.0, 0.0);
}
BSize
MemoryView::PreferredSize()
{
return MinSize();
}
BSize
MemoryView::MaxSize()
{
return BSize(B_SIZE_UNLIMITED, B_SIZE_UNLIMITED);
}
void
MemoryView::_Init()
{
SetViewUIColor(B_PANEL_BACKGROUND_COLOR);
}
void
MemoryView::_RecalcScrollBars()
{
float max = 0.0;
BScrollBar *scrollBar = ScrollBar(B_VERTICAL);
if (fTargetBlock != NULL) {
int32 hexDigits = _GetHexDigitsPerBlock();
int32 sizeFactor = 1 + hexDigits;
_RecalcBounds();
float hexWidth = fHexRight - fHexLeft;
int32 nybblesPerLine = int32(hexWidth / fCharWidth);
fHexBlocksPerLine = 0;
fTextCharsPerLine = 0;
if (fHexMode != HexModeNone) {
fHexBlocksPerLine = nybblesPerLine / sizeFactor;
fHexBlocksPerLine &= ~1;
fHexRight = fHexLeft + (fHexBlocksPerLine * sizeFactor
* fCharWidth);
if (fTextMode != TextModeNone)
fTextCharsPerLine = fHexBlocksPerLine * hexDigits / 2;
} else if (fTextMode != TextModeNone)
fTextCharsPerLine = int32((fTextRight - fTextLeft) / fCharWidth);
int32 lineCount = 0;
float totalHeight = 0.0;
if (fHexBlocksPerLine > 0) {
lineCount = fTargetBlock->Size() / (fHexBlocksPerLine
* hexDigits / 2);
} else if (fTextCharsPerLine > 0)
lineCount = fTargetBlock->Size() / fTextCharsPerLine;
totalHeight = lineCount * fLineHeight;
if (totalHeight > 0.0) {
BRect bounds = Bounds();
max = totalHeight - bounds.Height();
scrollBar->SetProportion(bounds.Height() / totalHeight);
scrollBar->SetSteps(fLineHeight, bounds.Height());
}
}
scrollBar->SetRange(0.0, max);
}
void
MemoryView::_GetNextHexBlock(char* buffer, int32 bufferSize,
const char* address) const
{
switch(fHexMode) {
case HexMode8BitInt:
{
snprintf(buffer, bufferSize, "%02" B_PRIx8,
*((const uint8*)address));
break;
}
case HexMode16BitInt:
{
uint16 data = *((const uint16*)address);
switch(fCurrentEndianMode)
{
case EndianModeBigEndian:
{
data = B_HOST_TO_BENDIAN_INT16(data);
}
break;
case EndianModeLittleEndian:
{
data = B_HOST_TO_LENDIAN_INT16(data);
}
break;
}
snprintf(buffer, bufferSize, "%04" B_PRIx16,
data);
break;
}
case HexMode32BitInt:
{
uint32 data = *((const uint32*)address);
switch(fCurrentEndianMode)
{
case EndianModeBigEndian:
{
data = B_HOST_TO_BENDIAN_INT32(data);
}
break;
case EndianModeLittleEndian:
{
data = B_HOST_TO_LENDIAN_INT32(data);
}
break;
}
snprintf(buffer, bufferSize, "%08" B_PRIx32,
data);
break;
}
case HexMode64BitInt:
{
uint64 data = *((const uint64*)address);
switch(fCurrentEndianMode)
{
case EndianModeBigEndian:
{
data = B_HOST_TO_BENDIAN_INT64(data);
}
break;
case EndianModeLittleEndian:
{
data = B_HOST_TO_LENDIAN_INT64(data);
}
break;
}
snprintf(buffer, bufferSize, "%0*" B_PRIx64,
16, data);
break;
}
}
}
int32
MemoryView::_GetOffsetAt(BPoint point) const
{
if (fTargetBlock == NULL)
return -1;
// TODO: support selection in the text region as well
if (fHexMode == HexModeNone)
return -1;
int32 lineNumber = int32(point.y / fLineHeight);
int32 charsPerBlock = _GetHexDigitsPerBlock() / 2;
int32 totalHexBlocks = fTargetBlock->Size() / charsPerBlock;
int32 lineCount = totalHexBlocks / fHexBlocksPerLine;
if (lineNumber >= lineCount)
return -1;
point.x -= fHexLeft;
if (point.x < 0)
point.x = 0;
else if (point.x > fHexRight)
point.x = fHexRight;
float blockWidth = (charsPerBlock * 2 + 1) * fCharWidth;
int32 containingBlock = int32(floor(point.x / blockWidth));
return fHexBlocksPerLine * charsPerBlock * lineNumber
+ containingBlock * charsPerBlock;
}
BPoint
MemoryView::_GetPointForOffset(int32 offset) const
{
BPoint point;
if (fHexMode == HexModeNone)
return point;
int32 bytesPerLine = fHexBlocksPerLine * _GetHexDigitsPerBlock() / 2;
int32 line = offset / bytesPerLine;
int32 lineOffset = offset % bytesPerLine;
point.x = fHexLeft + (lineOffset * 2 * fCharWidth)
+ (lineOffset * 2 * fCharWidth / _GetHexDigitsPerBlock());
point.y = line * fLineHeight;
return point;
}
void
MemoryView::_RecalcBounds()
{
fHexLeft = 0;
fHexRight = 0;
fTextLeft = 0;
fTextRight = 0;
// the left bound is determined by the space taken up by the actual
// displayed addresses.
float left = _GetAddressDisplayWidth();
float width = Bounds().Width() - left;
if (fHexMode != HexModeNone) {
int32 hexDigits = _GetHexDigitsPerBlock();
int32 sizeFactor = 1 + hexDigits;
if (fTextMode != TextModeNone) {
float hexProportion = sizeFactor / (float)(sizeFactor
+ hexDigits / 2);
float hexWidth = width * hexProportion;
fTextLeft = left + hexWidth;
fHexLeft = left;
// when sharing the display between hex and text,
// we allocate a 2 character space to separate the views
hexWidth -= 2 * fCharWidth;
fHexRight = left + hexWidth;
} else {
fHexLeft = left;
fHexRight = left + width;
}
} else if (fTextMode != TextModeNone) {
fTextLeft = left;
fTextRight = left + width;
}
}
float
MemoryView::_GetAddressDisplayWidth() const
{
return (fTargetAddressSize + 2) * fCharWidth;
}
void
MemoryView::_GetEditCaretRect(BRect& rect) const
{
if (!fEditMode)
return;
int32 byteOffset = fTargetAddress - fTargetBlock->BaseAddress();
BPoint point = _GetPointForOffset(byteOffset);
if (fEditLowNybble)
point.x += fCharWidth;
rect.left = point.x;
rect.right = point.x + fCharWidth;
rect.top = point.y;
rect.bottom = point.y + fLineHeight;
}
void
MemoryView::_GetSelectionRegion(BRegion& region) const
{
if (fHexMode == HexModeNone || fTargetBlock == NULL)
return;
region.MakeEmpty();
BPoint startPoint = _GetPointForOffset(fSelectionStart);
BPoint endPoint = _GetPointForOffset(fSelectionEnd);
BRect rect;
if (startPoint.y == endPoint.y) {
// single line case
rect.left = startPoint.x;
rect.top = startPoint.y;
rect.right = endPoint.x;
rect.bottom = endPoint.y + fLineHeight;
region.Include(rect);
} else {
float currentLine = startPoint.y;
// first line
rect.left = startPoint.x;
rect.top = startPoint.y;
rect.right = fHexRight;
rect.bottom = startPoint.y + fLineHeight;
region.Include(rect);
currentLine += fLineHeight;
// middle region
if (currentLine < endPoint.y) {
rect.left = fHexLeft;
rect.top = currentLine;
rect.right = fHexRight;
rect.bottom = endPoint.y;
region.Include(rect);
}
rect.left = fHexLeft;
rect.top = endPoint.y;
rect.right = endPoint.x;
rect.bottom = endPoint.y + fLineHeight;
region.Include(rect);
}
}
void
MemoryView::_GetSelectedText(BString& text) const
{
if (fSelectionStart == fSelectionEnd)
return;
text.Truncate(0);
const uint8* dataSource = fEditMode ? fEditableData : fTargetBlock->Data();
const char* data = (const char *)dataSource + fSelectionStart;
int16 blockSize = _GetHexDigitsPerBlock() / 2;
int32 count = (fSelectionEnd - fSelectionStart)
/ blockSize;
char buffer[32];
for (int32 i = 0; i < count; i++) {
_GetNextHexBlock(buffer, sizeof(buffer), data);
data += blockSize;
text << buffer;
if (i < count - 1)
text << " ";
}
}
void
MemoryView::_CopySelectionToClipboard()
{
BString text;
_GetSelectedText(text);
if (text.Length() > 0) {
be_clipboard->Lock();
be_clipboard->Data()->RemoveData("text/plain");
be_clipboard->Data()->AddData ("text/plain",
B_MIME_TYPE, text.String(), text.Length());
be_clipboard->Commit();
be_clipboard->Unlock();
}
}
void
MemoryView::_HandleAutoScroll()
{
BPoint point;
uint32 buttons;
GetMouse(&point, &buttons);
float difference = 0.0;
int factor = 0;
BRect visibleRect = Bounds();
if (point.y < visibleRect.top)
difference = point.y - visibleRect.top;
else if (point.y > visibleRect.bottom)
difference = point.y - visibleRect.bottom;
if (difference != 0.0) {
factor = (int)(ceilf(difference / fLineHeight));
_ScrollByLines(factor);
}
MouseMoved(point, B_OUTSIDE_VIEW, NULL);
}
void
MemoryView::_ScrollByLines(int32 lineCount)
{
BScrollBar* vertical = ScrollBar(B_VERTICAL);
if (vertical == NULL)
return;
float value = vertical->Value();
vertical->SetValue(value + fLineHeight * lineCount);
}
void
MemoryView::_HandleContextMenu(BPoint point)
{
int32 offset = _GetOffsetAt(point);
if (offset < fSelectionStart || offset > fSelectionEnd)
return;
BPopUpMenu* menu = new(std::nothrow) BPopUpMenu("Options");
if (menu == NULL)
return;
ObjectDeleter<BPopUpMenu> menuDeleter(menu);
ObjectDeleter<BMenuItem> itemDeleter;
ObjectDeleter<BMessage> messageDeleter;
BMessage* message = NULL;
BMenuItem* item = NULL;
if (fSelectionEnd - fSelectionStart == fTargetAddressSize / 2) {
BMessage* message = new(std::nothrow) BMessage(MSG_INSPECT_ADDRESS);
if (message == NULL)
return;
target_addr_t address;
if (fTargetAddressSize == 8)
address = *((uint32*)(fTargetBlock->Data() + fSelectionStart));
else
address = *((uint64*)(fTargetBlock->Data() + fSelectionStart));
if (fCurrentEndianMode == EndianModeBigEndian)
address = B_HOST_TO_BENDIAN_INT64(address);
else
address = B_HOST_TO_LENDIAN_INT64(address);
messageDeleter.SetTo(message);
message->AddUInt64("address", address);
BMenuItem* item = new(std::nothrow) BMenuItem("Inspect", message);
if (item == NULL)
return;
messageDeleter.Detach();
itemDeleter.SetTo(item);
if (!menu->AddItem(item))
return;
item->SetTarget(Looper());
itemDeleter.Detach();
}
message = new(std::nothrow) BMessage(B_COPY);
if (message == NULL)
return;
messageDeleter.SetTo(message);
item = new(std::nothrow) BMenuItem("Copy", message);
if (item == NULL)
return;
messageDeleter.Detach();
itemDeleter.SetTo(item);
if (!menu->AddItem(item))
return;
item->SetTarget(this);
itemDeleter.Detach();
menuDeleter.Detach();
BPoint screenWhere(point);
ConvertToScreen(&screenWhere);
BRect mouseRect(screenWhere, screenWhere);
mouseRect.InsetBy(-4.0, -4.0);
menu->Go(screenWhere, true, false, mouseRect, true);
}
status_t
MemoryView::_SetupEditableData()
{
fEditableData = new(std::nothrow) uint8[fTargetBlock->Size()];
if (fEditableData == NULL)
return B_NO_MEMORY;
memcpy(fEditableData, fTargetBlock->Data(), fTargetBlock->Size());
if (fHexMode != HexMode8BitInt) {
fHexMode = HexMode8BitInt;
if (fListener != NULL)
fListener->HexModeChanged(fHexMode);
_RecalcScrollBars();
}
return B_OK;
}
//#pragma mark - Listener
MemoryView::Listener::~Listener()
{
}
| 10,374 |
711 | <filename>service-front/src/main/java/com/java110/front/components/businesstype/ListBusinessTypeComponent.java
package com.java110.front.components.businesstype;
import com.java110.core.context.IPageData;
import com.java110.front.smo.ICbusinessTypeServiceSMO;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Component;
@Component("listBusinessType")
public class ListBusinessTypeComponent {
@Autowired
private ICbusinessTypeServiceSMO iCbusinessTypeServiceSMOImpl;
/**
* 查询小区楼信息
*
* @param pd 页面封装对象 包含页面请求数据
* @return ResponseEntity对象返回给页面
*/
public ResponseEntity<String> list(IPageData pd) {
return iCbusinessTypeServiceSMOImpl.listBusinessType(pd);
}
}
| 340 |
382 | /*
* Copyright 2020 Coveo, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.spinnaker.clouddriver.kubernetes.config;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import lombok.Data;
import lombok.Getter;
@Data
public class RawResourcesEndpointConfig {
private Set<String> kindExpressions = new HashSet<>();
private Set<String> omitKindExpressions = new HashSet<>();
@Getter private List<Pattern> kindPatterns = new ArrayList<>();
@Getter private List<Pattern> omitKindPatterns = new ArrayList<>();
public void validate() {
if (!kindExpressions.isEmpty() && !omitKindExpressions.isEmpty()) {
throw new IllegalArgumentException(
"At most one of 'kindExpressions' and 'omitKindExpressions' can be specified");
}
for (String exp : kindExpressions) {
kindPatterns.add(Pattern.compile(exp));
}
for (String exp : omitKindExpressions) {
omitKindPatterns.add(Pattern.compile(exp));
}
}
}
| 494 |
1,056 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.java.lsp.server.protocol;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.eclipse.lsp4j.MessageActionItem;
import org.eclipse.lsp4j.MessageParams;
import org.eclipse.lsp4j.PublishDiagnosticsParams;
import org.eclipse.lsp4j.ShowMessageRequestParams;
import org.eclipse.lsp4j.services.LanguageClient;
import org.junit.Test;
import static org.junit.Assert.*;
import org.netbeans.modules.java.lsp.server.ui.AbstractLspInputOutputProvider;
import org.netbeans.modules.java.lsp.server.ui.AbstractLspInputOutputProvider.LspIO;
import org.netbeans.modules.nbcode.integration.LspInputOutputProvider;
import org.openide.util.Lookup;
import org.openide.util.test.MockLookup;
public class WorkspaceContextTest {
public WorkspaceContextTest() {
}
@Test
public void testPrintLinesNoNewLines() {
List<MessageParams> msgs = new ArrayList<>();
MockLanguageClient mlc = new MockLanguageClient(msgs);
WorkspaceIOContext wc = new WorkspaceIOContext() {
@Override
protected LanguageClient client() {
return mlc;
}
};
wc.stdOut("ahoj");
wc.stdOut("\n");
wc.stdErr("there!");
wc.stdErr("\n");
assertEquals("Two messages", 2, msgs.size());
assertEquals("ahoj", msgs.get(0).getMessage());
assertEquals("there!", msgs.get(1).getMessage());
}
/**
* WorkspaceIOContext is a dead input, but must allow to be close()d, returning -1
* from its read().
*/
@Test
public void testReadDoesntBlockClose() throws Exception {
List<MessageParams> msgs = new ArrayList<>();
MockLanguageClient mlc = new MockLanguageClient(msgs);
WorkspaceIOContext wc = new WorkspaceIOContext() {
@Override
protected LanguageClient client() {
return mlc;
}
};
//LspIO io = LspIOAccessor.createIO("Test", wc, Lookup.EMPTY);
MockLookup.setInstances(wc);
AbstractLspInputOutputProvider ioProvider = new LspInputOutputProvider();
LspIO lspIo = ioProvider.getIO("Test", true, Lookup.EMPTY);
Reader inReader = ioProvider.getIn(lspIo);
CountDownLatch closeLatch = new CountDownLatch(1);
final Thread readerThread = Thread.currentThread();
Executors.newSingleThreadScheduledExecutor().schedule(() -> {
inReader.close();
closeLatch.countDown();
return null;
}, 300, TimeUnit.MILLISECONDS);
Executors.newSingleThreadScheduledExecutor().schedule(() -> {
readerThread.interrupt();
}, 1000, TimeUnit.MILLISECONDS);
int r = inReader.read();
assert r == -1;
assertTrue(closeLatch.await(500, TimeUnit.MILLISECONDS));
}
private static final class MockLanguageClient implements LanguageClient {
private final List<MessageParams> messages;
MockLanguageClient(List<MessageParams> messages) {
this.messages = messages;
}
@Override
public void telemetryEvent(Object object) {
fail();
}
@Override
public void publishDiagnostics(PublishDiagnosticsParams diagnostics) {
fail();
}
@Override
public void showMessage(MessageParams messageParams) {
fail();
}
@Override
public CompletableFuture<MessageActionItem> showMessageRequest(ShowMessageRequestParams requestParams) {
fail();
return null;
}
@Override
public void logMessage(MessageParams message) {
messages.add(message);
}
}
}
| 1,884 |
558 | <filename>lib/util/src/rcu.c
/* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2015-2020 Micron Technology, Inc. All rights reserved.
*/
#include <hse_util/rcu.h>
#if !HSE_HAVE_RCU_BARRIER
/* RHEL 7.4 and 7.5 prereq very old urcu implementations that do not include
* rcu_barrier() nor rcu_read_ongoing().
*
* [MU_REVIST] Currently, only hse unit tests call rcu_barrier(), but it should
* probably be called by c0sk_close() and mpc_exit(). A full implementation
* of rcu_barrier() should invoke call_rcu() for each vCPU and wait for all
* to complete. The implementation here is incomplete as it only invokes
* call_rcu() on at most one vCPU.
*/
#include <hse_util/atomic.h>
#include <hse_util/delay.h>
struct rcu_barrier_data {
struct rcu_head rbd_rcu;
atomic_t rbd_count;
};
static void
rcu_barrier_cb(struct rcu_head *rh)
{
struct rcu_barrier_data *rbd;
rbd = caa_container_of(rh, struct rcu_barrier_data, rbd_rcu);
atomic_inc(&rbd->rbd_count);
}
__attribute__((__weak__)) void
rcu_barrier_bp(void)
{
struct rcu_barrier_data rbd = {};
call_rcu(&rbd.rbd_rcu, rcu_barrier_cb);
rcu_defer_barrier();
while (atomic_read(&rbd.rbd_count) == 0)
msleep(100);
synchronize_rcu();
}
#endif
| 525 |
5,290 | import os
import re
import shutil
import tempfile
from ..converter import KnowledgePostConverter
class DocxConverter(KnowledgePostConverter):
_registry_keys = ['docx']
@property
def dependencies(self):
# Dependencies required for this converter on top of core knowledge-repo dependencies
return ['pypandoc']
def from_file(self, filename, **opts):
self.tmp_dir = wd = tempfile.mkdtemp()
target_file = os.path.join(wd, 'post.md')
import pypandoc
pypandoc.convert_file(
filename,
format='docx',
to='markdown-grid_tables',
outputfile=target_file,
extra_args=[
'--standalone',
'--wrap=none',
'--extract-media={}'.format(wd)
]
)
with open(target_file) as f:
md = f.read()
# Image embeddings exported from docx files have fixed sizes in inches
# which browsers do not understand. We remove these annotations.
md = re.sub(r'(\!\[[^\]]+?\]\([^\)]+?\))\{[^\}]+?\}', lambda m: m.group(1), md)
# Write markdown content to knowledge post (images will be extracted later)
self.kp_write(md)
def cleanup(self):
if hasattr(self, 'tmp_dir'):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
| 639 |
593 | <filename>2015-10_Lecture/Lecture3/code/KerasLayer/FixedEmbedding.py
from __future__ import absolute_import
import theano
import theano.tensor as T
import keras
from keras import activations, initializations, regularizers, constraints
from keras.layers.core import Layer, MaskedLayer
from keras.utils.theano_utils import sharedX
from keras.constraints import unitnorm
class FixedEmbedding(Layer):
'''
Turn positive integers (indexes) into denses vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
@input_dim: size of vocabulary (highest input integer + 1)
@out_dim: size of dense representation
'''
input_ndim = 2
def __init__(self, input_dim, output_dim, init='uniform', input_length=None,
W_regularizer=None, activity_regularizer=None, W_constraint=None,
mask_zero=False, weights=None, **kwargs):
self.input_dim = input_dim
self.output_dim = output_dim
self.init = initializations.get(init)
self.input_length = input_length
self.mask_zero = mask_zero
self.W_constraint = constraints.get(W_constraint)
self.constraints = [self.W_constraint]
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.initial_weights = weights
kwargs['input_shape'] = (self.input_dim,)
super(FixedEmbedding, self).__init__(**kwargs)
def build(self):
self.input = T.imatrix()
self.W = self.init((self.input_dim, self.output_dim))
self.params = [] #No update of the weight
self.regularizers = []
#if self.W_regularizer:
# self.W_regularizer.set_param(self.W)
# self.regularizers.append(self.W_regularizer)
#if self.activity_regularizer:
# self.activity_regularizer.set_layer(self)
# self.regularizers.append(self.activity_regularizer)
if self.initial_weights is not None:
#self.set_weights(self.initial_weights)
self.W.set_value(self.initial_weights[0])
#self.W = self.initial_weights[0]
def get_output_mask(self, train=None):
X = self.get_input(train)
if not self.mask_zero:
return None
else:
return T.ones_like(X) * (1 - T.eq(X, 0))
@property
def output_shape(self):
return (self.input_shape[0], self.input_length, self.output_dim)
def get_output(self, train=False):
X = self.get_input(train)
out = self.W[X]
return out
def get_config(self):
config = {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"input_length": self.input_length,
"mask_zero": self.mask_zero,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 1,498 |
374 | <reponame>amanzan/android-oauth-client<gh_stars>100-1000
package com.wuman.android.auth.oauth2.store;
import android.annotation.TargetApi;
import android.content.Context;
import android.content.SharedPreferences;
import android.os.Build;
import android.text.TextUtils;
import com.google.api.client.auth.oauth2.Credential;
import com.google.api.client.auth.oauth2.CredentialStore;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.util.Beta;
import com.google.api.client.util.Preconditions;
import java.io.IOException;
/**
* {@link Beta} <br/>
* Thread-safe {@link SharedPreferences} implementation of a credential store.
*
* @author <NAME>
*/
@TargetApi(Build.VERSION_CODES.GINGERBREAD)
public class SharedPreferencesCredentialStore implements CredentialStore {
/** Json factory for serializing user credentials. */
private final JsonFactory jsonFactory;
private final SharedPreferences prefs;
/**
* @param context Context in which to store user credentials
* @param name Name by which the SharedPreferences file is stored as
* @param jsonFactory JSON factory to serialize user credentials
*/
public SharedPreferencesCredentialStore(Context context, String name, JsonFactory jsonFactory) {
Preconditions.checkNotNull(context);
Preconditions.checkNotNull(name);
this.prefs = Preconditions.checkNotNull(
context.getSharedPreferences(name, Context.MODE_PRIVATE));
this.jsonFactory = Preconditions.checkNotNull(jsonFactory);
}
@Override
public boolean load(String userId, Credential credential) throws IOException {
Preconditions.checkNotNull(userId);
String credentialJson = prefs.getString(userId, null);
if (TextUtils.isEmpty(credentialJson)) {
return false;
}
FilePersistedCredential fileCredential = jsonFactory.fromString(
credentialJson, FilePersistedCredential.class);
if (fileCredential == null) {
return false;
}
fileCredential.load(credential);
return true;
}
@Override
public void store(String userId, Credential credential) throws IOException {
Preconditions.checkNotNull(userId);
FilePersistedCredential fileCredential = new FilePersistedCredential();
fileCredential.store(credential);
String credentialJson = jsonFactory.toString(fileCredential);
prefs.edit().putString(userId, credentialJson).apply();
}
@Override
public void delete(String userId, Credential credential) throws IOException {
Preconditions.checkNotNull(userId);
prefs.edit().remove(userId).apply();
}
}
| 985 |
510 | <gh_stars>100-1000
#include "../include/GuiLite.h"
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#include "database.h"
#include "sqlite3.h"
//#define DATA_BASE_PATH "/data/monitor.db"
#define DATA_BASE_PATH ":memory:"
static char sql_create_real_data_table[] =
"create table table_real_data("
"TIME INTEGER,"
"HR INTEGER,"
"SPO2 INTEGER,"
"RR INTEGER,"
"PR INTEGER,"
"TEMP INTEGER,"
"NIBP_SYS INTEGER,"
"NIBP_DIA INTEGER,"
"NIBP_MEAN INTEGER)";
int c_database::m_real_data_rd_cnt;
int c_database::m_real_data_rd_len;
int c_database::sql_callback_read_real_data(void *arg, int nr, char **values, char **names)
{
VALUE_SET* data_set = (VALUE_SET*)arg;
int i = 0;
if((m_real_data_rd_cnt < m_real_data_rd_len) && data_set)
{
data_set[m_real_data_rd_cnt].time = atoi(values[i++]);
data_set[m_real_data_rd_cnt].hr = atoi(values[i++]);
data_set[m_real_data_rd_cnt].spo2 = atoi(values[i++]);
data_set[m_real_data_rd_cnt].rr = atoi(values[i++]);
data_set[m_real_data_rd_cnt].pr = atoi(values[i++]);
data_set[m_real_data_rd_cnt].temp = atoi(values[i++]);
data_set[m_real_data_rd_cnt].nibp_sys = atoi(values[i++]);
data_set[m_real_data_rd_cnt].nibp_dia = atoi(values[i++]);
data_set[m_real_data_rd_cnt].nibp_mean = atoi(values[i++]);
if(i > nr)
{
ASSERT(false);
}
}
//printf("%s,%s,%s,%s,%s,%s\n",values[0],values[1],values[2],values[3],values[4],values[5]);
//fflush(stdout);
m_real_data_rd_cnt++;
return 0;
}
c_database::c_database()
{
m_db_monitor = 0;
}
c_database::~c_database()
{
if(m_db_monitor)
{
sqlite3_close(m_db_monitor);
}
}
int c_database::read(long start_time, long end_time, VALUE_SET* data_set, int len)
{
char sql_buf[256];
if(start_time > end_time)
{
ASSERT(false);
}
m_real_data_rd_cnt = 0;
m_real_data_rd_len = len;
memset(sql_buf, 0, sizeof(sql_buf));
sprintf(sql_buf,
"select * from table_real_data where TIME between %lu and %lu",
start_time,
end_time);
if(sqlite3_exec(m_db_monitor, sql_buf, sql_callback_read_real_data, data_set, 0))
{
ASSERT(false);
}
return ((m_real_data_rd_cnt < m_real_data_rd_len) ? m_real_data_rd_cnt : m_real_data_rd_len);
}
int c_database::write(VALUE_SET &data_set)
{
//printf("wr time:%d\n",timer);
//fflush(stdout);
char sql_buf[256];
memset(sql_buf, 0, sizeof(sql_buf));
sprintf(sql_buf,
"insert into table_real_data values(%d,%d,%d,%d,%d,%d,%d,%d,%d)",
data_set.time,
data_set.hr,
data_set.spo2,
data_set.rr,
data_set.pr,
data_set.temp,
data_set.nibp_sys,
data_set.nibp_dia,
data_set.nibp_mean);
if(sqlite3_exec(m_db_monitor, sql_buf, 0, 0, 0))
{
ASSERT(false);
}
return 0;
}
int c_database::display_all()
{
if(sqlite3_exec(m_db_monitor, "select * from table_real_data", sql_callback_read_real_data, 0, 0))
{
ASSERT(false);
}
return 0;
}
int c_database::init()
{
int ret = -1;
if(sqlite3_open(DATA_BASE_PATH, &m_db_monitor))
{
ASSERT(false);
}
ret = sqlite3_exec(m_db_monitor,sql_create_real_data_table, 0, 0, 0);
if(ret !=0 && ret !=1)
{
ASSERT(false);
}
return 0;
}
| 1,569 |
1,160 | """
Original code by <NAME>: http://www.bryceboe.com/2010/09/01/submitting-binaries-to-virustotal/
Modified by <NAME> <elias at hex-rays.com>
"""
from __future__ import print_function
import hashlib, httplib, mimetypes, os, pprint, simplejson, sys, urlparse
# -----------------------------------------------------------------------
DEFAULT_TYPE = 'application/octet-stream'
FILE_REPORT_URL = 'https://www.virustotal.com/api/get_file_report.json'
SCAN_URL = 'https://www.virustotal.com/api/scan_file.json'
API_KEY = "" # Put API key here. Register an account in VT Community
# -----------------------------------------------------------------------
# The following function is modified from the snippet at:
# http://code.activestate.com/recipes/146306/
def _encode_multipart_formdata(fields, files=()):
"""
fields is a dictionary of name to value for regular form fields.
files is a sequence of (name, filename, value) elements for data to be
uploaded as files.
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for key, value in fields.items():
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
content_type = mimetypes.guess_type(filename)[0] or DEFAULT_TYPE
L.append('Content-Type: %s' % content_type)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
# -----------------------------------------------------------------------
def _post_multipart(url, fields, files=()):
"""
url is the full to send the post request to.
fields is a dictionary of name to value for regular form fields.
files is a sequence of (name, filename, value) elements for data to be
uploaded as files.
Return body of http response.
"""
content_type, data = _encode_multipart_formdata(fields, files)
url_parts = urlparse.urlparse(url)
if url_parts.scheme == 'http':
h = httplib.HTTPConnection(url_parts.netloc)
elif url_parts.scheme == 'https':
h = httplib.HTTPSConnection(url_parts.netloc)
else:
raise Exception('Unsupported URL scheme')
path = urlparse.urlunparse(('', '') + url_parts[2:])
h.request('POST', path, data, {'content-type':content_type})
return h.getresponse().read()
# -----------------------------------------------------------------------
def set_apikey(key, dbg = False):
"""
Set the VT API key
"""
global API_KEY
API_KEY = key
if dbg:
httplib.HTTPConnection.debuglevel = 1
# -----------------------------------------------------------------------
def scan_file(filename):
"""
Uploads a file for scanning.
@param filename: The filename to upload
@return: - None if upload failed
- scan_id value if upload succeeds
- raises an exception on IO failures
"""
files = [('file', filename, open(filename, 'rb').read())]
json = _post_multipart(SCAN_URL, {'key':API_KEY}, files)
data = simplejson.loads(json)
return str(data['scan_id']) if data['result'] == 1 else None
# -----------------------------------------------------------------------
def get_file_md5_hash(filename):
f = open(filename, 'rb')
r = hashlib.md5(f.read()).hexdigest()
f.close()
return r
# -----------------------------------------------------------------------
def get_file_report(filename=None, md5sum=None):
"""
Returns an report for a file or md5su.
@param filename: File name to get report. The file is used just
to compute its MD5Sum
@param md5sum: MD5sum string (in case filename was not passed)
@return: - None: if file was not previously analyzed
- A dictionary if report exists: key=scanner, value=reported name
"""
if filename is None and md5sum is None:
raise Exception('Either filename or md5sum should be passed!')
# Filename passed? Compute its MD5
if filename:
global LAST_FILE_HASH
LAST_FILE_HASH = md5sum = get_file_md5_hash(filename)
# Form the request
json = _post_multipart(FILE_REPORT_URL, {'resource':md5sum, 'key':API_KEY})
data = simplejson.loads(json)
if data['result'] != 1:
# No results
return None
else:
# date, result_dict = data['report']
return data['report'][1]
# -----------------------------------------------------------------------
def pretty_print(obj):
pprint.pprint(obj)
# -----------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: %s filename' % sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
if not os.path.isfile(filename):
print('%s is not a valid file' % filename)
sys.exit(1)
get_file_report(filename=filename) | 1,903 |
385 | <reponame>ceball/nbval
def test_b():
raise AssertionError
def test_c():
assert 1 == 2
| 41 |
1,085 | /*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.datastore;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.IOException;
import java.util.UUID;
import org.junit.Test;
import com.dremio.datastore.format.Format;
import com.dremio.datastore.format.compound.FormatTestArtifacts;
/**
* Validates that functioning serializers are returned from the CoreStoreSerializerFactory.
* <p>
* Validation is performed by taking some type, serializing, and deserializing it back to the original value.
* <p>
* We do not test protostuff because there is no protostuff objects in this module. We did not want to find a module
* with protostuff objects that doesn't depend on datastore nor specify a test only proto.
*/
public abstract class AbstractTestByteSerializerFactory extends FormatTestArtifacts {
@SuppressWarnings("unchecked")
protected <T> Serializer<T, byte[]> getSerializer(Format<T> format) {
return (Serializer<T, byte[]>) format.apply(ByteSerializerFactory.INSTANCE);
}
protected abstract <T> void runCircularTest(Format<T> format, T original) throws IOException;
@Test
public void stringFormat() throws DatastoreFatalException, IOException {
runCircularTest(Format.ofString(), TEST_STRING);
}
@Test
public void bytesFormat() throws DatastoreFatalException, IOException {
runCircularTest(Format.ofBytes(), TEST_STRING.getBytes(UTF_8));
}
@Test
public void uuidFormat() throws DatastoreFatalException, IOException {
runCircularTest(Format.ofUUID(), UUID.randomUUID());
}
/**
* Wraps a string so that we may test the wrapped format.
*/
private static class Nesting {
private final String inner;
Nesting(String inner) {
this.inner = inner;
}
@Override
public boolean equals(Object that) {
if (!(that instanceof Nesting)) {
return false;
}
final Nesting other = (Nesting) that;
return this.inner.equals(other.inner);
}
@Override
public int hashCode() {
// Doesn't matter. Not used.
// Just to pass checkstyle.
return -1;
}
/**
* Used by the wrapped format to convert the Nesting to a string.
*/
private static class NestingConverter extends Converter<Nesting, String> {
@Override
public String convert(Nesting n) {
return n.inner;
}
@Override
public Nesting revert(String n) {
return new Nesting(n);
}
}
}
@Test
public void wrappedFormat() throws DatastoreFatalException, IOException {
final Format<Nesting> format = Format.wrapped(Nesting.class, new Nesting.NestingConverter(), Format.ofString());
runCircularTest(format, new Nesting(TEST_STRING));
}
@Test
public void protobufFormat() throws DatastoreFatalException, IOException {
runCircularTest(PROTOBUFF_FORMAT, PROTOBUFF_ORIGINAL_STRING);
}
}
| 1,129 |
306 | <gh_stars>100-1000
/*
* Copyright (c) 2007-present, <NAME> & <NAME>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of JSR-310 nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.threeten.extra;
import static java.time.DayOfWeek.FRIDAY;
import static java.time.DayOfWeek.MONDAY;
import static java.time.DayOfWeek.SATURDAY;
import static java.time.DayOfWeek.SUNDAY;
import static java.time.Month.DECEMBER;
import static java.time.Month.JANUARY;
import static java.time.temporal.ChronoUnit.CENTURIES;
import static java.time.temporal.ChronoUnit.DAYS;
import static java.time.temporal.ChronoUnit.DECADES;
import static java.time.temporal.ChronoUnit.ERAS;
import static java.time.temporal.ChronoUnit.FOREVER;
import static java.time.temporal.ChronoUnit.HALF_DAYS;
import static java.time.temporal.ChronoUnit.HOURS;
import static java.time.temporal.ChronoUnit.MILLENNIA;
import static java.time.temporal.ChronoUnit.MINUTES;
import static java.time.temporal.ChronoUnit.MONTHS;
import static java.time.temporal.ChronoUnit.NANOS;
import static java.time.temporal.ChronoUnit.SECONDS;
import static java.time.temporal.ChronoUnit.WEEKS;
import static java.time.temporal.ChronoUnit.YEARS;
import static java.time.temporal.IsoFields.QUARTER_YEARS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.time.DateTimeException;
import java.time.LocalDate;
import java.time.Month;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.time.temporal.ChronoUnit;
import java.time.temporal.Temporal;
import java.time.temporal.TemporalAdjuster;
import java.time.temporal.TemporalUnit;
import java.time.temporal.UnsupportedTemporalTypeException;
import java.util.concurrent.TimeUnit;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import com.tngtech.junit.dataprovider.DataProvider;
import com.tngtech.junit.dataprovider.UseDataProvider;
/**
* Test Temporals.
*/
public class TestTemporals {
//-----------------------------------------------------------------------
// nextWorkingDay()
//-----------------------------------------------------------------------
@Test
public void test_nextWorkingDay_serialization() throws IOException, ClassNotFoundException {
TemporalAdjuster test = Temporals.nextWorkingDay();
assertTrue(test instanceof Serializable);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(test);
}
try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
assertSame(test, ois.readObject());
}
}
@Test
public void test_nextWorkingDay() {
for (Month month : Month.values()) {
for (int i = 1; i <= month.length(false); i++) {
LocalDate date = LocalDate.of(2007, month, i);
LocalDate test = (LocalDate) Temporals.nextWorkingDay().adjustInto(date);
assertTrue(test.isAfter(date));
assertFalse(test.getDayOfWeek().equals(SATURDAY));
assertFalse(test.getDayOfWeek().equals(SUNDAY));
switch (date.getDayOfWeek()) {
case FRIDAY:
case SATURDAY:
assertEquals(MONDAY, test.getDayOfWeek());
break;
default:
assertEquals(date.getDayOfWeek().plus(1), test.getDayOfWeek());
}
if (test.getYear() == 2007) {
int dayDiff = test.getDayOfYear() - date.getDayOfYear();
switch (date.getDayOfWeek()) {
case FRIDAY:
assertEquals(3, dayDiff);
break;
case SATURDAY:
assertEquals(2, dayDiff);
break;
default:
assertEquals(1, dayDiff);
}
} else {
assertEquals(2008, test.getYear());
assertEquals(JANUARY, test.getMonth());
assertEquals(1, test.getDayOfMonth());
}
}
}
}
@Test
public void test_nextWorkingDay_yearChange() {
LocalDate friday = LocalDate.of(2010, DECEMBER, 31);
Temporal test = Temporals.nextWorkingDay().adjustInto(friday);
assertEquals(LocalDate.of(2011, JANUARY, 3), test);
LocalDate saturday = LocalDate.of(2011, DECEMBER, 31);
test = Temporals.nextWorkingDay().adjustInto(saturday);
assertEquals(LocalDate.of(2012, JANUARY, 2), test);
}
//-----------------------------------------------------------------------
// nextWorkingDayOrSame()
//-----------------------------------------------------------------------
@Test
public void test_nextWorkingDayOrSame_serialization() throws IOException, ClassNotFoundException {
TemporalAdjuster test = Temporals.nextWorkingDayOrSame();
assertTrue(test instanceof Serializable);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(test);
}
try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
assertSame(ois.readObject(), test);
}
}
@Test
public void test_nextWorkingDayOrSame() {
for (Month month : Month.values()) {
for (int i = 1; i <= month.length(false); i++) {
LocalDate date = LocalDate.of(2007, month, i);
LocalDate test = (LocalDate) Temporals.nextWorkingDayOrSame().adjustInto(date);
assertFalse(test.getDayOfWeek().equals(SATURDAY));
assertFalse(test.getDayOfWeek().equals(SUNDAY));
switch (date.getDayOfWeek()) {
case SATURDAY:
case SUNDAY:
assertEquals(test.getDayOfWeek(), MONDAY);
break;
default:
assertEquals(date.getDayOfWeek(), test.getDayOfWeek());
}
if (test.getYear() == 2007) {
int dayDiff = test.getDayOfYear() - date.getDayOfYear();
switch (date.getDayOfWeek()) {
case SATURDAY:
assertEquals(dayDiff, 2);
break;
case SUNDAY:
assertEquals(dayDiff, 1);
break;
default:
assertEquals(dayDiff, 0);
}
} else {
assertEquals(test.getYear(), 2008);
assertEquals(test.getMonth(), JANUARY);
assertEquals(test.getDayOfMonth(), 1);
}
}
}
}
@Test
public void test_nextWorkingDayOrSame_yearChange() {
LocalDate saturday = LocalDate.of(2016, DECEMBER, 31);
Temporal test = Temporals.nextWorkingDayOrSame().adjustInto(saturday);
assertEquals(LocalDate.of(2017, JANUARY, 2), test);
LocalDate sunday = LocalDate.of(2017, DECEMBER, 31);
test = Temporals.nextWorkingDayOrSame().adjustInto(sunday);
assertEquals(LocalDate.of(2018, JANUARY, 1), test);
}
//-----------------------------------------------------------------------
// previousWorkingDay()
//-----------------------------------------------------------------------
@Test
public void test_previousWorkingDay_serialization() throws IOException, ClassNotFoundException {
TemporalAdjuster test = Temporals.previousWorkingDay();
assertTrue(test instanceof Serializable);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(test);
}
try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
assertSame(test, ois.readObject());
}
}
@Test
public void test_previousWorkingDay() {
for (Month month : Month.values()) {
for (int i = 1; i <= month.length(false); i++) {
LocalDate date = LocalDate.of(2007, month, i);
LocalDate test = (LocalDate) Temporals.previousWorkingDay().adjustInto(date);
assertTrue(test.isBefore(date));
assertFalse(test.getDayOfWeek().equals(SATURDAY));
assertFalse(test.getDayOfWeek().equals(SUNDAY));
switch (date.getDayOfWeek()) {
case MONDAY:
case SUNDAY:
assertEquals(FRIDAY, test.getDayOfWeek());
break;
default:
assertEquals(date.getDayOfWeek().minus(1), test.getDayOfWeek());
}
if (test.getYear() == 2007) {
int dayDiff = test.getDayOfYear() - date.getDayOfYear();
switch (date.getDayOfWeek()) {
case MONDAY:
assertEquals(-3, dayDiff);
break;
case SUNDAY:
assertEquals(-2, dayDiff);
break;
default:
assertEquals(-1, dayDiff);
}
} else {
assertEquals(2006, test.getYear());
assertEquals(DECEMBER, test.getMonth());
assertEquals(29, test.getDayOfMonth());
}
}
}
}
@Test
public void test_previousWorkingDay_yearChange() {
LocalDate monday = LocalDate.of(2011, JANUARY, 3);
Temporal test = Temporals.previousWorkingDay().adjustInto(monday);
assertEquals(LocalDate.of(2010, DECEMBER, 31), test);
LocalDate sunday = LocalDate.of(2011, JANUARY, 2);
test = Temporals.previousWorkingDay().adjustInto(sunday);
assertEquals(LocalDate.of(2010, DECEMBER, 31), test);
}
//-----------------------------------------------------------------------
// previousWorkingDayOrSame()
//-----------------------------------------------------------------------
@Test
public void test_previousWorkingDayOrSame_serialization() throws IOException, ClassNotFoundException {
TemporalAdjuster test = Temporals.previousWorkingDayOrSame();
assertTrue(test instanceof Serializable);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(test);
}
try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray()))) {
assertSame(ois.readObject(), test);
}
}
@Test
public void test_previousWorkingDayOrSame() {
for (Month month : Month.values()) {
for (int i = 1; i <= month.length(false); i++) {
LocalDate date = LocalDate.of(2007, month, i);
LocalDate test = (LocalDate) Temporals.previousWorkingDayOrSame().adjustInto(date);
assertFalse(test.getDayOfWeek().equals(SATURDAY));
assertFalse(test.getDayOfWeek().equals(SUNDAY));
switch (date.getDayOfWeek()) {
case SATURDAY:
case SUNDAY:
assertEquals(test.getDayOfWeek(), FRIDAY);
break;
default:
assertEquals(date.getDayOfWeek(), test.getDayOfWeek());
}
if (test.getYear() == 2007) {
int dayDiff = test.getDayOfYear() - date.getDayOfYear();
switch (date.getDayOfWeek()) {
case SATURDAY:
assertEquals(dayDiff, -1);
break;
case SUNDAY:
assertEquals(dayDiff, -2);
break;
default:
assertEquals(dayDiff, 0);
}
} else {
assertEquals(test.getYear(), 2006);
assertEquals(test.getMonth(), DECEMBER);
assertEquals(test.getDayOfMonth(), 29);
}
}
}
}
@Test
public void test_previousWorkingDayOrSame_yearChange() {
LocalDate sunday = LocalDate.of(2011, JANUARY, 2);
Temporal test = Temporals.previousWorkingDayOrSame().adjustInto(sunday);
assertEquals(test, LocalDate.of(2010, DECEMBER, 31));
LocalDate saturday = LocalDate.of(2011, JANUARY, 1);
test = Temporals.previousWorkingDayOrSame().adjustInto(saturday);
assertEquals(test, LocalDate.of(2010, DECEMBER, 31));
}
//-----------------------------------------------------------------------
// parseFirstMatching()
//-----------------------------------------------------------------------
@DataProvider
public static Object[][] data_parseFirstMatching() {
return new Object[][] {
{"2016-09-06", DateTimeFormatter.ISO_LOCAL_DATE, DateTimeFormatter.BASIC_ISO_DATE},
{"20160906", DateTimeFormatter.ISO_LOCAL_DATE, DateTimeFormatter.BASIC_ISO_DATE},
};
}
@ParameterizedTest
@UseDataProvider("data_parseFirstMatching")
public void test_parseFirstMatching(String text, DateTimeFormatter fmt1, DateTimeFormatter fmt2) {
assertEquals(LocalDate.of(2016, 9, 6), Temporals.parseFirstMatching(text, LocalDate::from, fmt1, fmt2));
}
@Test
public void test_parseFirstMatching_zero() {
assertThrows(DateTimeParseException.class, () -> Temporals.parseFirstMatching("2016-09-06", LocalDate::from));
}
@Test
public void test_parseFirstMatching_one() {
assertEquals(LocalDate.of(2016, 9, 6), Temporals.parseFirstMatching("2016-09-06", LocalDate::from, DateTimeFormatter.ISO_LOCAL_DATE));
}
@Test
public void test_parseFirstMatching_twoNoMatch() {
assertThrows(DateTimeParseException.class, () -> Temporals.parseFirstMatching("2016", LocalDate::from, DateTimeFormatter.ISO_LOCAL_DATE, DateTimeFormatter.BASIC_ISO_DATE));
}
//-----------------------------------------------------------------------
// chronoUnit() / timeUnit()
//-----------------------------------------------------------------------
@DataProvider
public static Object[][] data_timeUnitConversion() {
return new Object[][] {
{ChronoUnit.NANOS, TimeUnit.NANOSECONDS},
{ChronoUnit.MICROS, TimeUnit.MICROSECONDS},
{ChronoUnit.MILLIS, TimeUnit.MILLISECONDS},
{ChronoUnit.SECONDS, TimeUnit.SECONDS},
{ChronoUnit.MINUTES, TimeUnit.MINUTES},
{ChronoUnit.HOURS, TimeUnit.HOURS},
{ChronoUnit.DAYS, TimeUnit.DAYS},
};
}
@ParameterizedTest
@UseDataProvider("data_timeUnitConversion")
public void test_timeUnit(ChronoUnit chronoUnit, TimeUnit timeUnit) {
assertEquals(timeUnit, Temporals.timeUnit(chronoUnit));
}
@Test
public void test_timeUnit_unknown() {
assertThrows(IllegalArgumentException.class, () -> Temporals.timeUnit(ChronoUnit.MONTHS));
}
@Test
public void test_timeUnit_null() {
assertThrows(NullPointerException.class, () -> Temporals.timeUnit(null));
}
@ParameterizedTest
@UseDataProvider("data_timeUnitConversion")
public void test_chronoUnit(ChronoUnit chronoUnit, TimeUnit timeUnit) {
assertEquals(chronoUnit, Temporals.chronoUnit(timeUnit));
}
@Test
public void test_chronoUnit_null() {
assertThrows(NullPointerException.class, () -> Temporals.chronoUnit(null));
}
//-----------------------------------------------------------------------
// convertAmount()
//-------------------------------------------------------------------------
@DataProvider
public static Object[][] data_convertAmount() {
return new Object[][] {
{2L, NANOS, SECONDS, 0L, 2L},
{999_999_999L, NANOS, SECONDS, 0L, 999_999_999L},
{1_000_000_000L, NANOS, SECONDS, 1L, 0L},
{1_000_000_001L, NANOS, SECONDS, 1L, 1L},
{2L, NANOS, MINUTES, 0L, 2L},
{59_999_999_999L, NANOS, MINUTES, 0L, 59_999_999_999L},
{60_000_000_000L, NANOS, MINUTES, 1L, 0L},
{60_000_000_001L, NANOS, MINUTES, 1L, 1L},
{2L, NANOS, HOURS, 0L, 2L},
{3599_999_999_999L, NANOS, HOURS, 0L, 3599_999_999_999L},
{3600_000_000_000L, NANOS, HOURS, 1L, 0L},
{3600_000_000_001L, NANOS, HOURS, 1L, 1L},
{2L, NANOS, HALF_DAYS, 0L, 2L},
{3600_000_000_000L * 12 * 3, NANOS, HALF_DAYS, 3L, 0L},
{2L, NANOS, DAYS, 0L, 2L},
{3600_000_000_000L * 24 * 3, NANOS, DAYS, 3L, 0L},
{2L, NANOS, WEEKS, 0L, 2L},
{3600_000_000_000L * 24 * 7 * 3, NANOS, WEEKS, 3L, 0L},
{2L, SECONDS, MINUTES, 0L, 2L},
{59L, SECONDS, MINUTES, 0L, 59L},
{60L, SECONDS, MINUTES, 1L, 0L},
{61L, SECONDS, MINUTES, 1L, 1L},
{2L, SECONDS, HOURS, 0L, 2L},
{3599L, SECONDS, HOURS, 0L, 3599L},
{3600L, SECONDS, HOURS, 1L, 0L},
{3601L, SECONDS, HOURS, 1L, 1L},
{2L, SECONDS, HALF_DAYS, 0L, 2L},
{3600L * 12 * 3, SECONDS, HALF_DAYS, 3L, 0L},
{2L, SECONDS, DAYS, 0L, 2L},
{3600L * 24 * 3, SECONDS, DAYS, 3L, 0L},
{2L, SECONDS, WEEKS, 0L, 2L},
{3600L * 24 * 7 * 3, SECONDS, WEEKS, 3L, 0L},
{2L, MINUTES, HOURS, 0L, 2L},
{59L, MINUTES, HOURS, 0L, 59L},
{60L, MINUTES, HOURS, 1L, 0L},
{61L, MINUTES, HOURS, 1L, 1L},
{2L, MINUTES, HALF_DAYS, 0L, 2L},
{60L * 12 * 3 + 1, MINUTES, HALF_DAYS, 3L, 1L},
{2L, MINUTES, DAYS, 0L, 2L},
{60L * 24 * 3 + 1, MINUTES, DAYS, 3L, 1L},
{2L, MINUTES, WEEKS, 0L, 2L},
{60L * 24 * 7 * 3 + 1, MINUTES, WEEKS, 3L, 1L},
{2L, HOURS, HALF_DAYS, 0L, 2L},
{12L * 3 + 1, HOURS, HALF_DAYS, 3L, 1L},
{2L, HOURS, DAYS, 0L, 2L},
{24L * 3 + 1, HOURS, DAYS, 3L, 1L},
{2L, HOURS, WEEKS, 0L, 2L},
{24L * 7 * 3 + 1, HOURS, WEEKS, 3L, 1L},
{1L, HALF_DAYS, DAYS, 0L, 1L},
{2L * 3 + 1, HALF_DAYS, DAYS, 3L, 1L},
{1L, HALF_DAYS, WEEKS, 0L, 1L},
{2L * 7 * 3 + 1, HALF_DAYS, WEEKS, 3L, 1L},
{1L, DAYS, WEEKS, 0L, 1L},
{7L * 3 + 1, DAYS, WEEKS, 3L, 1L},
{2L, SECONDS, NANOS, 2_000_000_000L, 0L},
{2L, MINUTES, NANOS, 2_000_000_000L * 60, 0L},
{2L, HOURS, NANOS, 2_000_000_000L * 3600, 0L},
{2L, HALF_DAYS, NANOS, 2_000_000_000L * 3600 * 12, 0L},
{2L, DAYS, NANOS, 2_000_000_000L * 3600 * 24, 0L},
{2L, WEEKS, NANOS, 2_000_000_000L * 3600 * 24 * 7, 0L},
{2L, MINUTES, SECONDS, 2L * 60, 0L},
{2L, HOURS, SECONDS, 2L * 3600, 0L},
{2L, HALF_DAYS, SECONDS, 2L * 3600 * 12, 0L},
{2L, DAYS, SECONDS, 2L * 3600 * 24, 0L},
{2L, WEEKS, SECONDS, 2L * 3600 * 24 * 7, 0L},
{2L, HOURS, MINUTES, 2L * 60, 0L},
{2L, HALF_DAYS, MINUTES, 2L * 60 * 12, 0L},
{2L, DAYS, MINUTES, 2L * 60 * 24, 0L},
{2L, WEEKS, MINUTES, 2L * 60 * 24 * 7, 0L},
{2L, HALF_DAYS, HOURS, 2L * 12, 0L},
{2L, DAYS, HOURS, 2L * 24, 0L},
{2L, WEEKS, HOURS, 2L * 24 * 7, 0L},
{2L, DAYS, HALF_DAYS, 2L * 2, 0L},
{2L, WEEKS, HALF_DAYS, 2L * 2 * 7, 0L},
{2L, WEEKS, DAYS, 2L * 7, 0L},
{2L * 3 + 1, MONTHS, QUARTER_YEARS, 2L, 1L},
{2L * 12 + 1, MONTHS, YEARS, 2L, 1L},
{2L * 120 + 1, MONTHS, DECADES, 2L, 1L},
{2L * 1200 + 1, MONTHS, CENTURIES, 2L, 1L},
{2L * 12000 + 1, MONTHS, MILLENNIA, 2L, 1L},
{2L * 4 + 1, QUARTER_YEARS, YEARS, 2L, 1L},
{2L * 40 + 1, QUARTER_YEARS, DECADES, 2L, 1L},
{2L * 400 + 1, QUARTER_YEARS, CENTURIES, 2L, 1L},
{2L * 4000 + 1, QUARTER_YEARS, MILLENNIA, 2L, 1L},
{2L * 10 + 1, YEARS, DECADES, 2L, 1L},
{2L * 100 + 1, YEARS, CENTURIES, 2L, 1L},
{2L * 1000 + 1, YEARS, MILLENNIA, 2L, 1L},
{2L * 10 + 1, DECADES, CENTURIES, 2L, 1L},
{2L * 100 + 1, DECADES, MILLENNIA, 2L, 1L},
{2L * 10 + 1, CENTURIES, MILLENNIA, 2L, 1L},
{2L, QUARTER_YEARS, MONTHS, 2L * 3, 0L},
{2L, YEARS, MONTHS, 2L * 12, 0L},
{2L, DECADES, MONTHS, 2L * 120, 0L},
{2L, CENTURIES, MONTHS, 2L * 1200, 0L},
{2L, MILLENNIA, MONTHS, 2L * 12000, 0L},
{2L, YEARS, QUARTER_YEARS, 2L * 4, 0L},
{2L, DECADES, QUARTER_YEARS, 2L * 40, 0L},
{2L, CENTURIES, QUARTER_YEARS, 2L * 400, 0L},
{2L, MILLENNIA, QUARTER_YEARS, 2L * 4000, 0L},
{2L, DECADES, YEARS, 2L * 10, 0L},
{2L, CENTURIES, YEARS, 2L * 100, 0L},
{2L, MILLENNIA, YEARS, 2L * 1000, 0L},
{2L, CENTURIES, DECADES, 2L * 10, 0L},
{2L, MILLENNIA, DECADES, 2L * 100, 0L},
{2L, MILLENNIA, CENTURIES, 2L * 10, 0L},
};
}
@ParameterizedTest
@UseDataProvider("data_convertAmount")
public void test_convertAmount(
long fromAmount, TemporalUnit fromUnit, TemporalUnit resultUnit,
long resultWhole, long resultRemainder) {
long[] result = Temporals.convertAmount(fromAmount, fromUnit, resultUnit);
assertEquals(resultWhole, result[0]);
assertEquals(resultRemainder, result[1]);
}
@ParameterizedTest
@UseDataProvider("data_convertAmount")
public void test_convertAmount_negative(
long fromAmount, TemporalUnit fromUnit, TemporalUnit resultUnit,
long resultWhole, long resultRemainder) {
long[] result = Temporals.convertAmount(-fromAmount, fromUnit, resultUnit);
assertEquals(-resultWhole, result[0]);
assertEquals(-resultRemainder, result[1]);
}
@Test
public void test_convertAmountSameUnit_zero() {
for (ChronoUnit unit : ChronoUnit.values()) {
if (unit != ERAS && unit != FOREVER) {
long[] result = Temporals.convertAmount(0, unit, unit);
assertEquals(0, result[0]);
assertEquals(0, result[1]);
}
}
}
@Test
public void test_convertAmountSameUnit_nonZero() {
for (ChronoUnit unit : ChronoUnit.values()) {
if (unit != ERAS && unit != FOREVER) {
long[] result = Temporals.convertAmount(2, unit, unit);
assertEquals(2, result[0]);
assertEquals(0, result[1]);
}
}
}
@DataProvider
public static Object[][] data_convertAmountInvalid() {
return new Object[][] {
{SECONDS, MONTHS},
{SECONDS, QUARTER_YEARS},
{SECONDS, YEARS},
{SECONDS, DECADES},
{SECONDS, CENTURIES},
{SECONDS, MILLENNIA},
{MONTHS, SECONDS},
{QUARTER_YEARS, SECONDS},
{YEARS, SECONDS},
{DECADES, SECONDS},
{CENTURIES, SECONDS},
{MILLENNIA, SECONDS},
};
}
@ParameterizedTest
@UseDataProvider("data_convertAmountInvalid")
public void test_convertAmountInvalid(TemporalUnit fromUnit, TemporalUnit resultUnit) {
assertThrows(DateTimeException.class, () -> Temporals.convertAmount(1, fromUnit, resultUnit));
}
@DataProvider
public static Object[][] data_convertAmountInvalidUnsupported() {
return new Object[][] {
{SECONDS, ERAS},
{ERAS, SECONDS},
{YEARS, ERAS},
{ERAS, YEARS},
{SECONDS, FOREVER},
{FOREVER, SECONDS},
{YEARS, FOREVER},
{FOREVER, YEARS},
{FOREVER, ERAS},
{ERAS, FOREVER},
};
}
@ParameterizedTest
@UseDataProvider("data_convertAmountInvalidUnsupported")
public void test_convertAmountInvalidUnsupported(TemporalUnit fromUnit, TemporalUnit resultUnit) {
assertThrows(UnsupportedTemporalTypeException.class, () -> Temporals.convertAmount(1, fromUnit, resultUnit));
}
}
| 13,100 |
1,583 | import helper_addressbook
from bitmessageqt.support import createAddressIfNeeded
from main import TestBase
class TestAddressbook(TestBase):
"""Test case for addressbook"""
def test_add_own_address_to_addressbook(self):
"""Checking own address adding in addressbook"""
try:
address = createAddressIfNeeded(self.window)
self.assertFalse(
helper_addressbook.insert(label='test', address=address))
except IndexError:
self.fail("Can't generate addresses")
| 200 |
1,538 | <filename>src/kudu/ranger/mini_ranger.h
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <glog/logging.h>
#include "kudu/gutil/port.h"
#include "kudu/gutil/strings/substitute.h"
#include "kudu/postgres/mini_postgres.h"
#include "kudu/ranger/ranger.pb.h"
#include "kudu/util/curl_util.h"
#include "kudu/util/env.h"
#include "kudu/util/path_util.h"
#include "kudu/util/status.h"
#include "kudu/util/test_util.h"
namespace kudu {
class EasyJson;
class Subprocess;
namespace ranger {
// List of usernames to be used in PolicyItem;
typedef std::vector<std::string> UserList;
// Tuple of a vector of usernames, a vector of allowed actions and a delegate
// admin flag to be used in AuthorizationPolicy. Number of users and actions
// doesn't have to match, their cross-product is taken.
typedef std::tuple<UserList, std::vector<ActionPB>, bool> PolicyItem;
// Policy key used for searching policies_ (values are PolicyItems).
typedef std::tuple<std::vector<std::string>,
std::vector<std::string>,
std::vector<std::string>> PolicyKey;
// The AuthorizationPolicy contains a set of privileges on a resource to one or
// more users. 'items' is a vector of user-list of actions pair. This struct can
// be used to create new Ranger policies in tests. The policy name is based on
// its contents (list of databases, tables and columns).
struct AuthorizationPolicy {
std::vector<std::string> databases;
std::vector<std::string> tables;
std::vector<std::string> columns;
std::vector<PolicyItem> items;
};
// Wrapper around Apache Ranger to be used in integration tests.
class MiniRanger {
public:
explicit MiniRanger(std::string host)
: MiniRanger(GetTestDataDirectory(), std::move(host)) {}
~MiniRanger();
MiniRanger(std::string data_root, std::string host)
: data_root_(std::move(data_root)),
host_(std::move(host)),
mini_pg_(data_root_, host_),
kerberos_(false),
env_(Env::Default()) {
curl_.set_auth(CurlAuthType::BASIC, "admin", "admin");
}
// Starts Ranger and its dependencies.
Status Start() WARN_UNUSED_RESULT;
// Stops Ranger and its dependencies.
Status Stop() WARN_UNUSED_RESULT;
// Adds a new policy to Ranger.
Status AddPolicy(AuthorizationPolicy policy) WARN_UNUSED_RESULT;
// Creates the client configs files in the given directory.
Status CreateClientConfig(const std::string& client_config_dir) WARN_UNUSED_RESULT;
void EnableKerberos(std::string krb5_config,
std::string admin_ktpath,
std::string lookup_ktpath,
std::string spnego_ktpath) {
kerberos_ = true;
krb5_config_ = std::move(krb5_config);
admin_ktpath_ = std::move(admin_ktpath);
lookup_ktpath_ = std::move(lookup_ktpath);
spnego_ktpath_ = std::move(spnego_ktpath);
}
void set_policy_poll_interval_ms(uint32_t policy_poll_interval_ms) {
policy_poll_interval_ms_ = policy_poll_interval_ms;
}
std::string admin_url() const {
return ranger_admin_url_;
}
private:
// Starts the Ranger service.
Status StartRanger() WARN_UNUSED_RESULT;
// Initializes Ranger within 'admin_home' (home directory of the Ranger
// admin). Sets 'fresh_install' to true if 'admin_home' didn't exist before
// calling InitRanger().
Status InitRanger(std::string admin_home, bool* fresh_install)
WARN_UNUSED_RESULT;
// Creates configuration files.
Status CreateConfigs() WARN_UNUSED_RESULT;
// Initializes Ranger's database.
Status DbSetup(const std::string& admin_home, const std::string& ews_dir,
const std::string& web_app_dir) WARN_UNUSED_RESULT;
// Creates a Kudu service in Ranger.
Status CreateKuduService() WARN_UNUSED_RESULT;
// Sends a POST request to Ranger with 'payload'.
Status PostToRanger(std::string url, EasyJson payload) WARN_UNUSED_RESULT;
// Returns Ranger admin's home directory.
std::string ranger_admin_home() const {
return JoinPathSegments(data_root_, "ranger-admin");
}
std::string bin_dir() const {
std::string exe;
CHECK_OK(env_->GetExecutablePath(&exe));
return DirName(exe);
}
// Returns classpath for Ranger.
std::string ranger_classpath() const {
std::string admin_home = ranger_admin_home();
return strings::Substitute(
"$0:$1/lib/*:$2/lib/*:$3/*:$4:$5",
admin_home, JoinPathSegments(ranger_home_, "ews"), java_home_,
hadoop_home_, JoinPathSegments(bin_dir(), "postgresql.jar"),
JoinPathSegments(ranger_home_, "ews/webapp"));
}
// Directory in which to put all our stuff.
const std::string data_root_;
const std::string host_;
postgres::MiniPostgres mini_pg_;
std::unique_ptr<Subprocess> process_;
// URL of the Ranger admin REST API.
std::string ranger_admin_url_;
// Locations in which to find Hadoop, Ranger, and Java.
// These may be in the thirdparty build, or may be shared across tests. As
// such, their contents should be treated as read-only.
std::string hadoop_home_;
std::string ranger_home_;
std::string java_home_;
bool kerberos_;
std::string admin_ktpath_;
std::string lookup_ktpath_;
std::string spnego_ktpath_;
std::string krb5_config_;
Env* env_;
EasyCurl curl_;
uint16_t port_ = 0;
// Determines how frequently clients fetch policies from the server. The
// default is 30s which aligns with Ranger's default.
uint32_t policy_poll_interval_ms_ = 30000;
// Stores existing policies since starting the MiniRanger instance. This is
// used for adding new policy items (list of users and privileges) to existing
// policies (resources) as Ranger doesn't support this and we need to delete
// it and recreate it.
std::map<PolicyKey, std::vector<PolicyItem>> policies_;
};
} // namespace ranger
} // namespace kudu
| 2,313 |
9,782 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator;
import com.facebook.presto.common.Page;
import com.facebook.presto.common.array.AdaptiveLongBigArray;
import com.google.common.collect.ImmutableList;
import it.unimi.dsi.fastutil.ints.Int2ObjectMap;
import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap;
import it.unimi.dsi.fastutil.ints.IntArrayList;
import it.unimi.dsi.fastutil.ints.IntArrays;
import it.unimi.dsi.fastutil.ints.IntComparator;
import org.openjdk.jol.info.ClassLayout;
import java.util.Iterator;
import java.util.List;
import java.util.function.IntFunction;
import static com.facebook.presto.operator.SyntheticAddress.decodePosition;
import static com.facebook.presto.operator.SyntheticAddress.decodeSliceIndex;
import static com.google.common.base.Preconditions.checkState;
import static io.airlift.slice.SizeOf.sizeOf;
import static java.util.Objects.requireNonNull;
/**
* Maintains position links in sorted order by build side expression.
* Then iteration over position links uses set of @{code searchFunctions} which needs to be compatible
* with expression used for sorting.
* The binary search is used to quickly skip positions which would not match filter function from join condition.
*/
public final class SortedPositionLinks
implements PositionLinks
{
private static final int INSTANCE_SIZE = ClassLayout.parseClass(SortedPositionLinks.class).instanceSize();
public static class FactoryBuilder
implements PositionLinks.FactoryBuilder
{
// Cache lambda instance for use with createIfAbsent that ensures the method resolves to computeIfAbsent(int, IntFunction<T>)
// instead of computeIfAbsent(int, Int2ObjectFunction<T>) which does (slightly) more work internally
private static final IntFunction<IntArrayList> NEW_INT_LIST = (ignored) -> new IntArrayList();
private final Int2ObjectOpenHashMap<IntArrayList> positionLinks;
private final int size;
private final PositionComparator comparator;
private final PagesHashStrategy pagesHashStrategy;
private final AdaptiveLongBigArray addresses;
public FactoryBuilder(int size, PagesHashStrategy pagesHashStrategy, AdaptiveLongBigArray addresses)
{
this.size = size;
this.comparator = new PositionComparator(pagesHashStrategy, addresses);
this.pagesHashStrategy = pagesHashStrategy;
this.addresses = addresses;
positionLinks = new Int2ObjectOpenHashMap<>();
}
@Override
public int link(int from, int to)
{
// don't add _from_ row to chain if its sort channel value is null
if (isNull(from)) {
// _to_ row sort channel value might be null. However, in such
// case it will be the only element in the chain, so sorted position
// links enumeration will produce correct results.
return to;
}
// don't add _to_ row to chain if its sort channel value is null
if (isNull(to)) {
return from;
}
// make sure that from value is the smaller one
if (comparator.compare(from, to) > 0) {
// _from_ is larger so, just add to current chain _to_
positionLinks.computeIfAbsent(to, NEW_INT_LIST).add(from);
return to;
}
else {
// _to_ is larger so, move the chain to _from_
IntArrayList links = positionLinks.remove(to);
if (links == null) {
links = new IntArrayList();
}
links.add(to);
checkState(positionLinks.put(from, links) == null, "sorted links is corrupted");
return from;
}
}
private boolean isNull(int position)
{
long pageAddress = addresses.get(position);
int blockIndex = decodeSliceIndex(pageAddress);
int blockPosition = decodePosition(pageAddress);
return pagesHashStrategy.isSortChannelPositionNull(blockIndex, blockPosition);
}
@Override
public Factory build()
{
ArrayPositionLinks.FactoryBuilder arrayPositionLinksFactoryBuilder = ArrayPositionLinks.builder(size);
int[][] sortedPositionLinks = new int[size][];
Iterator<Int2ObjectMap.Entry<IntArrayList>> iterator = positionLinks.int2ObjectEntrySet().fastIterator();
while (iterator.hasNext()) {
Int2ObjectOpenHashMap.Entry<IntArrayList> entry = iterator.next();
int key = entry.getIntKey();
IntArrayList positionsList = entry.getValue();
int[] positions = positionsList.toIntArray();
sortedPositionLinks[key] = positions;
if (positions.length > 0) {
// Use the positionsList array for the merge sort temporary work buffer to avoid an extra redundant
// copy. This works because we know that initially it has the same values as the array being sorted
IntArrays.mergeSort(positions, 0, positions.length, comparator, positionsList.elements());
// add link from starting position to position links chain
arrayPositionLinksFactoryBuilder.link(key, positions[0]);
// add links for the sorted internal elements
for (int i = 0; i < positions.length - 1; i++) {
arrayPositionLinksFactoryBuilder.link(positions[i], positions[i + 1]);
}
}
}
return createFactory(sortedPositionLinks, arrayPositionLinksFactoryBuilder.build());
}
@Override
public boolean isEmpty()
{
return positionLinks.isEmpty();
}
// Separate static method to avoid embedding references to "this"
private static Factory createFactory(int[][] sortedPositionLinks, Factory arrayPositionLinksFactory)
{
requireNonNull(sortedPositionLinks, "sortedPositionLinks is null");
requireNonNull(arrayPositionLinksFactory, "arrayPositionLinksFactory is null");
return new Factory()
{
@Override
public PositionLinks create(List<JoinFilterFunction> searchFunctions)
{
return new SortedPositionLinks(
arrayPositionLinksFactory.create(ImmutableList.of()),
sortedPositionLinks,
searchFunctions);
}
@Override
public long checksum()
{
// For spill/unspill state restoration, sorted position links do not matter
return arrayPositionLinksFactory.checksum();
}
};
}
}
private final PositionLinks positionLinks;
private final int[][] sortedPositionLinks;
private final long sizeInBytes;
private final JoinFilterFunction[] searchFunctions;
private SortedPositionLinks(PositionLinks positionLinks, int[][] sortedPositionLinks, List<JoinFilterFunction> searchFunctions)
{
this.positionLinks = requireNonNull(positionLinks, "positionLinks is null");
this.sortedPositionLinks = requireNonNull(sortedPositionLinks, "sortedPositionLinks is null");
this.sizeInBytes = INSTANCE_SIZE + positionLinks.getSizeInBytes() + sizeOfPositionLinks(sortedPositionLinks);
requireNonNull(searchFunctions, "searchFunctions is null");
checkState(!searchFunctions.isEmpty(), "Using sortedPositionLinks with no search functions");
this.searchFunctions = searchFunctions.toArray(new JoinFilterFunction[0]);
}
private static long sizeOfPositionLinks(int[][] sortedPositionLinks)
{
long retainedSize = sizeOf(sortedPositionLinks);
for (int[] element : sortedPositionLinks) {
retainedSize += sizeOf(element);
}
return retainedSize;
}
public static FactoryBuilder builder(int size, PagesHashStrategy pagesHashStrategy, AdaptiveLongBigArray addresses)
{
return new FactoryBuilder(size, pagesHashStrategy, addresses);
}
@Override
public long getSizeInBytes()
{
return sizeInBytes;
}
@Override
public int next(int position, int probePosition, Page allProbeChannelsPage)
{
int nextPosition = positionLinks.next(position, probePosition, allProbeChannelsPage);
if (nextPosition < 0) {
return -1;
}
if (!applyAllSearchFunctions(nextPosition, probePosition, allProbeChannelsPage)) {
// break a position links chain if next position should be filtered out
return -1;
}
return nextPosition;
}
@Override
public int start(int startingPosition, int probePosition, Page allProbeChannelsPage)
{
if (applyAllSearchFunctions(startingPosition, probePosition, allProbeChannelsPage)) {
return startingPosition;
}
int[] links = sortedPositionLinks[startingPosition];
if (links == null) {
return -1;
}
int currentStartOffset = 0;
for (JoinFilterFunction searchFunction : searchFunctions) {
currentStartOffset = findStartPositionForFunction(searchFunction, links, currentStartOffset, probePosition, allProbeChannelsPage);
// return as soon as a mismatch is found, since we are handling only AND predicates (conjuncts)
if (currentStartOffset == -1) {
return -1;
}
}
return links[currentStartOffset];
}
private boolean applyAllSearchFunctions(int buildPosition, int probePosition, Page allProbeChannelsPage)
{
for (JoinFilterFunction searchFunction : searchFunctions) {
if (!applySearchFunction(searchFunction, buildPosition, probePosition, allProbeChannelsPage)) {
return false;
}
}
return true;
}
private static int findStartPositionForFunction(JoinFilterFunction searchFunction, int[] links, int startOffset, int probePosition, Page allProbeChannelsPage)
{
if (applySearchFunction(searchFunction, links, startOffset, probePosition, allProbeChannelsPage)) {
// MAJOR HACK: if searchFunction is of shape `f(probe) > build_symbol` it is not fit for binary search below,
// but it does not imply extra constraints on start position; so we just ignore it.
// It does not break logic for `f(probe) < build_symbol` as the binary search below would return same value.
// todo: Explicitly handle less-than and greater-than functions separately.
return startOffset;
}
// do a binary search for the first position for which filter function applies
int offset = lowerBound(searchFunction, links, startOffset, links.length - 1, probePosition, allProbeChannelsPage);
if (!applySearchFunction(searchFunction, links, offset, probePosition, allProbeChannelsPage)) {
return -1;
}
return offset;
}
/**
* Find the first element in position links that is NOT smaller than probePosition
*/
private static int lowerBound(JoinFilterFunction searchFunction, int[] links, int first, int last, int probePosition, Page allProbeChannelsPage)
{
int middle;
int step;
int count = last - first;
while (count > 0) {
step = count / 2;
middle = first + step;
if (!applySearchFunction(searchFunction, links, middle, probePosition, allProbeChannelsPage)) {
first = ++middle;
count -= step + 1;
}
else {
count = step;
}
}
return first;
}
private static boolean applySearchFunction(JoinFilterFunction searchFunction, int[] links, int linkOffset, int probePosition, Page allProbeChannelsPage)
{
return searchFunction.filter(links[linkOffset], probePosition, allProbeChannelsPage);
}
private static boolean applySearchFunction(JoinFilterFunction searchFunction, int buildPosition, int probePosition, Page allProbeChannelsPage)
{
return searchFunction.filter(buildPosition, probePosition, allProbeChannelsPage);
}
private static final class PositionComparator
implements IntComparator
{
private final PagesHashStrategy pagesHashStrategy;
private final AdaptiveLongBigArray addresses;
PositionComparator(PagesHashStrategy pagesHashStrategy, AdaptiveLongBigArray addresses)
{
this.pagesHashStrategy = pagesHashStrategy;
this.addresses = addresses;
}
@Override
public int compare(int leftPosition, int rightPosition)
{
long leftPageAddress = addresses.get(leftPosition);
int leftBlockIndex = decodeSliceIndex(leftPageAddress);
int leftBlockPosition = decodePosition(leftPageAddress);
long rightPageAddress = addresses.get(rightPosition);
int rightBlockIndex = decodeSliceIndex(rightPageAddress);
int rightBlockPosition = decodePosition(rightPageAddress);
return pagesHashStrategy.compareSortChannelPositions(leftBlockIndex, leftBlockPosition, rightBlockIndex, rightBlockPosition);
}
@Override
public int compare(Integer leftPosition, Integer rightPosition)
{
return compare(leftPosition.intValue(), rightPosition.intValue());
}
}
}
| 5,556 |
348 | {"nom":"Villers-Vermont","circ":"2ème circonscription","dpt":"Oise","inscrits":96,"abs":42,"votants":54,"blancs":10,"nuls":0,"exp":44,"res":[{"nuance":"REM","nom":"<NAME>","voix":25},{"nuance":"FN","nom":"<NAME>","voix":19}]} | 91 |
466 | //---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2014 Pivotal, Inc.
//
// @filename:
// CParseHandlerPhysicalBitmapTableScan.cpp
//
// @doc:
// SAX parse handler class for parsing bitmap table scan operator nodes
//---------------------------------------------------------------------------
#include "naucrates/dxl/parser/CParseHandlerPhysicalBitmapTableScan.h"
#include "naucrates/dxl/xml/dxltokens.h"
using namespace gpdxl;
//---------------------------------------------------------------------------
// @function:
// CParseHandlerPhysicalBitmapTableScan::StartElement
//
// @doc:
// Invoked by Xerces to process an opening tag
//
//---------------------------------------------------------------------------
void
CParseHandlerPhysicalBitmapTableScan::StartElement(
const XMLCh *const, // element_uri
const XMLCh *const element_local_name,
const XMLCh *const, // element_qname
const Attributes & // attrs
)
{
StartElementHelper(element_local_name, EdxltokenPhysicalBitmapTableScan);
}
//---------------------------------------------------------------------------
// @function:
// CParseHandlerPhysicalBitmapTableScan::EndElement
//
// @doc:
// Invoked by Xerces to process a closing tag
//
//---------------------------------------------------------------------------
void
CParseHandlerPhysicalBitmapTableScan::EndElement(
const XMLCh *const, // element_uri
const XMLCh *const element_local_name,
const XMLCh *const // element_qname
)
{
EndElementHelper(element_local_name, EdxltokenPhysicalBitmapTableScan);
}
| 414 |
435 | <reponame>allen91wu/data
{
"description": "DjangoCon 2019 - Creating a containerized Django + React + PostgreSQL development environment by <NAME>\n\nThere are many reasons to develop with containers, but getting your developer tools to work with containers can be a big challenge. In this talk we'll show how you can easily set up a fully featured containerized Django development environment using Visual Studio Code remote development extensions.\n\nThis talk was presented at: https://2019.djangocon.us/talks/creating-a-containerized-django-react/\n\nLINKS:\nFollow <NAME> \ud83d\udc47\nOn Twitter: https://twitter.com/qubitron\n\n\nFollow DjangCon US \ud83d\udc47\nhttps://twitter.com/djangocon\n\nFollow DEFNA \ud83d\udc47\nhttps://twitter.com/defnado\nhttps://www.defna.org/\n\nIntro music: \"This Is How We Quirk It\" by Avocado Junkie.\nVideo production by Confreaks TV.\nCaptions by White Coat Captioning.",
"language": "eng",
"recorded": "2019-09-24",
"speakers": [
"<NAME>"
],
"thumbnail_url": "https://i.ytimg.com/vi/hwHRI59iGlw/hqdefault.jpg",
"title": "Creating a containerized Django + React + PostgreSQL...",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=hwHRI59iGlw"
}
]
}
| 431 |
311 | package com.bitlove.fetlife.model.pojos.fetlife.db;
import com.bitlove.fetlife.model.db.FetLifeDatabase;
import com.bitlove.fetlife.model.pojos.fetlife.FriendRequestScreenModelObject;
import com.bitlove.fetlife.model.pojos.fetlife.dbjson.Member;
import com.raizlabs.android.dbflow.annotation.Column;
import com.raizlabs.android.dbflow.annotation.PrimaryKey;
import com.raizlabs.android.dbflow.annotation.Table;
import com.raizlabs.android.dbflow.structure.BaseModel;
@Table(database = FetLifeDatabase.class)
public class SharedProfile extends BaseModel implements FriendRequestScreenModelObject {
@Column
@PrimaryKey(autoincrement = false)
private String memberId;
@Column
private boolean pending;
//Load helper
public Member getMember() {
return Member.loadMember(memberId);
}
public String getMemberId() {
return memberId;
}
public void setMemberId(String memberId) {
this.memberId = memberId;
}
public boolean isPending() {
return pending;
}
public void setPending(boolean pending) {
this.pending = pending;
}
}
| 408 |
521 | <filename>src/elle/serialization/SerializerIn.cc
#include <elle/serialization/SerializerIn.hh>
namespace elle
{
namespace serialization
{
SerializerIn::SerializerIn(bool versioned)
: Super(versioned)
{}
SerializerIn::SerializerIn(Versions versions,
bool versioned)
: Super(std::move(versions), versioned)
{}
bool
SerializerIn::out() const
{
return false;
}
}
}
| 193 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/ozone/platform/flatland/flatland_connection.h"
#include <fuchsia/scenic/scheduling/cpp/fidl.h>
#include <fuchsia/ui/composition/cpp/fidl.h>
#include <string>
#include "base/fuchsia/scoped_service_publisher.h"
#include "base/fuchsia/test_component_context_for_process.h"
#include "base/logging.h"
#include "base/test/bind.h"
#include "base/test/task_environment.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/ozone/platform/flatland/tests/fake_flatland.h"
namespace ui {
namespace {
std::string GetCurrentTestName() {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
// Fixture to exercise Present() logic in FlatlandConnection.
class FlatlandConnectionTest : public ::testing::Test {
protected:
FlatlandConnectionTest()
: fake_flatland_publisher_(test_context_.additional_services(),
fake_flatland_.GetRequestHandler()) {}
~FlatlandConnectionTest() override = default;
protected:
base::test::SingleThreadTaskEnvironment task_environment_{
base::test::SingleThreadTaskEnvironment::MainThreadType::IO};
FakeFlatland fake_flatland_;
private:
base::TestComponentContextForProcess test_context_;
// Injects binding for responding to Flatland protocol connection requests.
const base::ScopedServicePublisher<fuchsia::ui::composition::Flatland>
fake_flatland_publisher_;
};
TEST_F(FlatlandConnectionTest, Initialization) {
// Create the FlatlandConnection but don't pump the loop. No FIDL calls are
// completed yet.
const std::string debug_name = GetCurrentTestName();
FlatlandConnection flatland_connection(debug_name);
EXPECT_EQ(fake_flatland_.debug_name(), "");
// Ensure the debug name is set.
task_environment_.RunUntilIdle();
EXPECT_EQ(fake_flatland_.debug_name(), debug_name);
}
TEST_F(FlatlandConnectionTest, BasicPresent) {
// Set up callbacks which allow sensing of how many presents were handled.
size_t presents_called = 0u;
fake_flatland_.SetPresentHandler(base::BindLambdaForTesting(
[&presents_called](fuchsia::ui::composition::PresentArgs present_args) {
presents_called++;
}));
// Create the FlatlandConnection but don't pump the loop. No FIDL calls are
// completed yet.
FlatlandConnection flatland_connection(GetCurrentTestName());
EXPECT_EQ(presents_called, 0u);
flatland_connection.Present();
task_environment_.RunUntilIdle();
EXPECT_EQ(presents_called, 1u);
}
TEST_F(FlatlandConnectionTest, RespectsPresentCredits) {
// Set up callbacks which allow sensing of how many presents were handled.
size_t presents_called = 0u;
fake_flatland_.SetPresentHandler(base::BindLambdaForTesting(
[&presents_called](fuchsia::ui::composition::PresentArgs present_args) {
presents_called++;
}));
// Create the FlatlandConnection but don't pump the loop. No FIDL calls are
// completed yet.
FlatlandConnection flatland_connection(GetCurrentTestName());
EXPECT_EQ(presents_called, 0u);
flatland_connection.Present();
task_environment_.RunUntilIdle();
EXPECT_EQ(presents_called, 1u);
// Next Present should fail because we dont have credits.
flatland_connection.Present();
task_environment_.RunUntilIdle();
EXPECT_EQ(presents_called, 1u);
// Give additional present credits.
fuchsia::ui::composition::OnNextFrameBeginValues on_next_frame_begin_values;
on_next_frame_begin_values.set_additional_present_credits(1);
fake_flatland_.FireOnNextFrameBeginEvent(
std::move(on_next_frame_begin_values));
task_environment_.RunUntilIdle();
flatland_connection.Present();
task_environment_.RunUntilIdle();
EXPECT_EQ(presents_called, 2u);
}
TEST_F(FlatlandConnectionTest, ReleaseFences) {
// Set up callbacks which allow sensing of how many presents were handled.
size_t presents_called = 0u;
zx_handle_t release_fence_handle;
fake_flatland_.SetPresentHandler(base::BindLambdaForTesting(
[&presents_called, &release_fence_handle](
fuchsia::ui::composition::PresentArgs present_args) {
presents_called++;
release_fence_handle =
present_args.release_fences().empty()
? ZX_HANDLE_INVALID
: present_args.release_fences().front().get();
}));
// Create the FlatlandConnection but don't pump the loop. No FIDL calls are
// completed yet.
FlatlandConnection flatland_connection(GetCurrentTestName());
EXPECT_EQ(presents_called, 0u);
zx::event first_release_fence;
zx::event::create(0, &first_release_fence);
const zx_handle_t first_release_fence_handle = first_release_fence.get();
fuchsia::ui::composition::PresentArgs present_args;
present_args.set_requested_presentation_time(0);
present_args.set_acquire_fences({});
std::vector<zx::event> fences;
fences.push_back(std::move(first_release_fence));
present_args.set_release_fences({std::move(fences)});
present_args.set_unsquashable(false);
flatland_connection.Present(std::move(present_args),
FlatlandConnection::OnFramePresentedCallback());
task_environment_.RunUntilIdle();
EXPECT_EQ(presents_called, 1u);
EXPECT_EQ(release_fence_handle, ZX_HANDLE_INVALID);
// Give additional present credits
fuchsia::ui::composition::OnNextFrameBeginValues on_next_frame_begin_values;
on_next_frame_begin_values.set_additional_present_credits(1);
fake_flatland_.FireOnNextFrameBeginEvent(
std::move(on_next_frame_begin_values));
task_environment_.RunUntilIdle();
flatland_connection.Present();
task_environment_.RunUntilIdle();
EXPECT_EQ(presents_called, 2u);
EXPECT_EQ(release_fence_handle, first_release_fence_handle);
}
} // namespace
} // namespace ui
| 2,038 |
27,296 | // Copyright (c) 2014 Intel Corp
// Copyright (c) 2014 The Chromium Authors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell co
// pies of the Software, and to permit persons to whom the Software is furnished
// to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM
// PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNES
// S FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WH
// ETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef CONTENT_NW_SRC_API_SHORTCUT_GLOBAL_SHORTCUT_LISTENER_MAC_H_
#define CONTENT_NW_SRC_API_SHORTCUT_GLOBAL_SHORTCUT_LISTENER_MAC_H_
#include "content/nw/src/api/shortcut/global_shortcut_listener.h"
#include <Carbon/Carbon.h>
#include <CoreFoundation/CoreFoundation.h>
#include <map>
#include "base/mac/scoped_nsobject.h"
namespace nwapi {
// Mac-specific implementation of the GlobalShortcutListener class that
// listens for global shortcuts. Handles basic keyboard intercepting and
// forwards its output to the base class for processing.
//
// This class does two things:
// 1. Intercepts media keys. Uses an event tap for intercepting media keys
// (PlayPause, NextTrack, PreviousTrack).
// 2. Binds keyboard shortcuts (hot keys). Carbon RegisterEventHotKey API for
// binding to non-media key global hot keys (eg. Command-Shift-1).
class GlobalShortcutListenerMac : public GlobalShortcutListener {
public:
GlobalShortcutListenerMac();
virtual ~GlobalShortcutListenerMac();
private:
typedef int KeyId;
typedef std::map<ui::Accelerator, KeyId> AcceleratorIdMap;
typedef std::map<KeyId, ui::Accelerator> IdAcceleratorMap;
typedef std::map<KeyId, EventHotKeyRef> IdHotKeyRefMap;
// Keyboard event callbacks.
void OnHotKeyEvent(EventHotKeyID hot_key_id);
bool OnMediaKeyEvent(int key_code);
// GlobalShortcutListener implementation.
virtual void StartListening() override;
virtual void StopListening() override;
virtual bool RegisterAcceleratorImpl(
const ui::Accelerator& accelerator) override;
virtual void UnregisterAcceleratorImpl(
const ui::Accelerator& accelerator) override;
// Mac-specific functions for registering hot keys with modifiers.
bool RegisterHotKey(const ui::Accelerator& accelerator, KeyId hot_key_id);
void UnregisterHotKey(const ui::Accelerator& accelerator);
// Enable and disable the media key event tap.
void StartWatchingMediaKeys();
void StopWatchingMediaKeys();
// Enable and disable the hot key event handler.
void StartWatchingHotKeys();
void StopWatchingHotKeys();
// Whether or not any media keys are currently registered.
bool IsAnyMediaKeyRegistered();
// Whether or not any hot keys are currently registered.
bool IsAnyHotKeyRegistered();
// The callback for when an event tap happens.
static CGEventRef EventTapCallback(
CGEventTapProxy proxy, CGEventType type, CGEventRef event, void* refcon);
// The callback for when a hot key event happens.
static OSStatus HotKeyHandler(
EventHandlerCallRef next_handler, EventRef event, void* user_data);
// Whether this object is listening for global shortcuts.
bool is_listening_;
// The hotkey identifier for the next global shortcut that is added.
KeyId hot_key_id_;
// A map of all hotkeys (media keys and shortcuts) mapping to their
// corresponding hotkey IDs. For quickly finding if an accelerator is
// registered.
AcceleratorIdMap accelerator_ids_;
// The inverse map for quickly looking up accelerators by hotkey id.
IdAcceleratorMap id_accelerators_;
// Keyboard shortcut IDs to hotkeys map for unregistration.
IdHotKeyRefMap id_hot_key_refs_;
// Event tap for intercepting mac media keys.
CFMachPortRef event_tap_;
CFRunLoopSourceRef event_tap_source_;
// Event handler for keyboard shortcut hot keys.
EventHandlerRef event_handler_;
DISALLOW_COPY_AND_ASSIGN(GlobalShortcutListenerMac);
};
} // namespace nwapi
#endif // CONTENT_NW_SRC_API_SHORTCUT_GLOBAL_SHORTCUT_LISTENER_MAC_H_
| 1,361 |
1,144 | <gh_stars>1000+
//
// Copyright (C) 2015 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef METRICS_CONSTANTS_H_
#define METRICS_CONSTANTS_H_
namespace metrics {
static const char kSharedMetricsDirectory[] = "/data/misc/metrics/";
static const char kMetricsdDirectory[] = "/data/misc/metricsd/";
static const char kMetricsCollectorDirectory[] =
"/data/misc/metrics_collector/";
static const char kMetricsGUIDFileName[] = "Sysinfo.GUID";
static const char kMetricsServer[] = "https://clients4.google.com/uma/v2";
static const char kConsentFileName[] = "enabled";
static const char kStagedLogName[] = "staged_log";
static const char kSavedLogName[] = "saved_log";
static const char kFailedUploadCountName[] = "failed_upload_count";
static const char kDefaultVersion[] = "0.0.0.0";
// Build time properties name.
static const char kProductId[] = "product_id";
static const char kProductVersion[] = "product_version";
// Weave configuration.
static const char kWeaveConfigurationFile[] = "/system/etc/weaved/weaved.conf";
static const char kModelManifestId[] = "model_id";
} // namespace metrics
#endif // METRICS_CONSTANTS_H_
| 511 |
1,056 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.php.editor.verification;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.prefs.Preferences;
import org.netbeans.modules.csl.api.Error;
import org.netbeans.modules.csl.api.Hint;
import org.netbeans.modules.csl.api.HintsProvider;
import org.netbeans.modules.csl.api.Rule;
import org.netbeans.modules.csl.api.Rule.AstRule;
import org.netbeans.modules.csl.api.Rule.ErrorRule;
import org.netbeans.modules.csl.api.RuleContext;
import org.netbeans.modules.csl.spi.ParserResult;
import org.netbeans.modules.php.editor.model.FileScope;
import org.netbeans.modules.php.editor.model.Model;
import org.netbeans.modules.php.editor.parser.PHPParseResult;
/**
*
* @author <NAME> <<EMAIL>>
*/
public class PHPHintsProvider implements HintsProvider {
public static final String DEFAULT_HINTS = "default.hints"; //NOI18N
public static final String DEFAULT_SUGGESTIONS = "default.suggestions"; //NOI18N
volatile boolean cancel = false;
enum ErrorType {
UNHANDLED_ERRORS,
HINT_ERRORS
}
@Override
public void computeHints(HintsManager mgr, RuleContext context, List<Hint> hints) {
resume();
Map<?, List<? extends Rule.AstRule>> allHints = mgr.getHints(false, context);
List<? extends AstRule> modelHints = allHints.get(DEFAULT_HINTS);
if (cancel) {
return;
}
RulesRunner<Hint> rulesRunner = new RulesRunnerImpl<>(mgr, initializeContext(context), hints);
if (cancel) {
return;
}
RuleAdjuster forAllAdjusters = new ForAllAdjusters(Arrays.asList(new PreferencesAdjuster(mgr), new ResetCaretOffsetAdjuster()));
if (cancel) {
return;
}
rulesRunner.run(modelHints, forAllAdjusters);
}
@Override
public void computeSuggestions(HintsManager mgr, RuleContext context, List<Hint> suggestions, int caretOffset) {
resume();
RulesRunner<Hint> rulesRunner = new RulesRunnerImpl<>(mgr, initializeContext(context), suggestions);
if (cancel) {
return;
}
RuleAdjuster forAllAdjusters = new ForAllAdjusters(Arrays.asList(new PreferencesAdjuster(mgr), new CaretOffsetAdjuster(caretOffset)));
Map<?, List<? extends AstRule>> hintsOnLine = mgr.getHints(true, context);
if (cancel) {
return;
}
List<? extends AstRule> defaultHintsOnLine = hintsOnLine.get(DEFAULT_HINTS);
if (cancel) {
return;
}
if (defaultHintsOnLine != null) {
rulesRunner.run(defaultHintsOnLine, forAllAdjusters);
}
Map<?, List<? extends Rule.AstRule>> allHints = mgr.getSuggestions();
if (cancel) {
return;
}
List<? extends AstRule> modelHints = allHints.get(DEFAULT_SUGGESTIONS);
if (cancel) {
return;
}
if (modelHints != null) {
rulesRunner.run(modelHints, forAllAdjusters);
}
}
@Override
public void computeSelectionHints(HintsManager manager, RuleContext context, List<Hint> selections, int start, int end) {
}
@Override
public void computeErrors(HintsManager manager, RuleContext context, List<Hint> hints, List<Error> unhandled) {
resume();
List<? extends Error> errors = context.parserResult.getDiagnostics();
unhandled.addAll(errors);
if (cancel) {
return;
}
Map<?, List<? extends ErrorRule>> allErrors = manager.getErrors();
if (cancel) {
return;
}
List<? extends ErrorRule> unhandledErrors = allErrors.get(ErrorType.UNHANDLED_ERRORS);
if (cancel) {
return;
}
if (unhandledErrors != null) {
RulesRunner<Error> rulesRunner = new RulesRunnerImpl<>(manager, initializeContext(context), unhandled);
rulesRunner.run(unhandledErrors, RuleAdjuster.NONE);
}
if (cancel) {
return;
}
List<? extends ErrorRule> hintErrors = allErrors.get(ErrorType.HINT_ERRORS);
if (cancel) {
return;
}
if (hintErrors != null) {
RulesRunner<Hint> rulesRunner = new RulesRunnerImpl<>(manager, initializeContext(context), hints);
rulesRunner.run(hintErrors, RuleAdjuster.NONE);
}
}
private PHPRuleContext initializeContext(RuleContext context) {
PHPRuleContext phpRuleContext = (PHPRuleContext) context;
ParserResult info = context.parserResult;
PHPParseResult result = (PHPParseResult) info;
if (cancel) {
return phpRuleContext;
}
final Model model = result.getModel();
if (cancel) {
return phpRuleContext;
}
FileScope modelScope = model.getFileScope();
phpRuleContext.fileScope = modelScope;
return phpRuleContext;
}
@Override
public void cancel() {
cancel = true;
}
private void resume() {
cancel = false;
}
@Override
public List<Rule> getBuiltinRules() {
return Collections.<Rule>emptyList();
}
@Override
public RuleContext createRuleContext() {
return new PHPRuleContext();
}
private interface RulesRunner<T> {
void run(List<? extends Rule> rules, RuleAdjuster adjuster);
}
private final class RulesRunnerImpl<T> implements RulesRunner<T> {
private final HintsManager hintManager;
private final PHPRuleContext ruleContext;
private final List<T> result;
public RulesRunnerImpl(HintsManager hintManager, PHPRuleContext ruleContext, List<T> result) {
this.hintManager = hintManager;
this.ruleContext = ruleContext;
this.result = result;
}
@Override
public void run(List<? extends Rule> rules, RuleAdjuster adjuster) {
for (Rule rule : rules) {
if (cancel) {
break;
}
if (rule instanceof AstRule) {
AstRule astRule = (AstRule) rule;
if (hintManager.isEnabled(astRule)) {
adjustAndInvoke(rule, adjuster);
}
} else if (rule instanceof ErrorRule) {
adjustAndInvoke(rule, adjuster);
}
}
}
private void adjustAndInvoke(Rule rule, RuleAdjuster adjuster) {
if (cancel) {
return;
}
if (rule instanceof InvokableRule) {
adjuster.adjust(rule);
InvokableRule<T> invokableRule = (InvokableRule<T>) rule;
if (cancel) {
return;
}
List<T> tempResult = new ArrayList<>();
invokableRule.invoke(ruleContext, tempResult);
boolean checkResult = false;
assert checkResult = true;
if (checkResult) {
for (T item : tempResult) {
assert item != null : rule;
}
}
result.addAll(tempResult);
}
}
}
private interface RuleAdjuster {
RuleAdjuster NONE = new RuleAdjuster() {
@Override
public void adjust(Rule rule) {
}
};
void adjust(Rule rule);
}
private final class ForAllAdjusters implements RuleAdjuster {
private final Collection<RuleAdjuster> adjusters;
public ForAllAdjusters(Collection<RuleAdjuster> adjusters) {
this.adjusters = adjusters;
}
@Override
public void adjust(Rule rule) {
for (RuleAdjuster hintAdjuster : adjusters) {
if (cancel) {
return;
}
hintAdjuster.adjust(rule);
}
}
}
private static final class ResetCaretOffsetAdjuster implements RuleAdjuster {
private final RuleAdjuster caretOffsetAdjuster;
public ResetCaretOffsetAdjuster() {
caretOffsetAdjuster = new CaretOffsetAdjuster(-1);
}
@Override
public void adjust(Rule rule) {
caretOffsetAdjuster.adjust(rule);
}
}
private static final class CaretOffsetAdjuster implements RuleAdjuster {
private final int caretOffset;
public CaretOffsetAdjuster(int caretOffset) {
this.caretOffset = caretOffset;
}
@Override
public void adjust(Rule rule) {
if (rule instanceof CaretSensitiveRule) {
CaretSensitiveRule icm = (CaretSensitiveRule) rule;
icm.setCaretOffset(caretOffset);
}
}
}
private static final class PreferencesAdjuster implements RuleAdjuster {
private final HintsManager hintManager;
public PreferencesAdjuster(HintsManager hintManager) {
this.hintManager = hintManager;
}
@Override
public void adjust(Rule rule) {
if (rule instanceof CustomisableRule) {
CustomisableRule icm = (CustomisableRule) rule;
Preferences preferences = hintManager.getPreferences(icm);
assert preferences != null : rule;
icm.setPreferences(preferences);
}
}
}
}
| 4,529 |
4,283 | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.internal.networking;
import com.hazelcast.client.impl.protocol.ClientMessage;
import com.hazelcast.internal.ascii.TextCommand;
import com.hazelcast.internal.nio.Packet;
import com.hazelcast.internal.serialization.Data;
/**
* Represents a payload that can be written to a {@link Channel}.
* <p>
* There are different types of OutboundFrame:
* <ol>
* <li> {@link Packet}: for member to member communication</li>
* <li> {@link TextCommand}: for memcached and REST communication</li>
* <li> {@link ClientMessage}: for the new client to member communication</li>
* </ol>
* <p>
* Currently, all communication over a single connection is of a single
* subclass. E.g. member to member only uses {@link Packet}.
* <p>
* There is no need for an InboundFrame interface.
*
* @see Data
* @see Channel#write(OutboundFrame)
*/
public interface OutboundFrame {
/**
* Checks if this OutboundFrame is urgent.
* <p>
* Frames that are urgent, have priority above regular frames. This is useful
* to implement system operations so that they can be sent faster than regular
* operations; especially when the system is under load you want these operations
* to have precedence.
*
* @return true if urgent, false otherwise.
*/
boolean isUrgent();
/**
* Returns the frame length. This includes header and payload size.
*
* @return The frame length.
*/
int getFrameLength();
}
| 625 |
457 | <gh_stars>100-1000
package io.purplejs.core.exception;
import io.purplejs.core.resource.ResourcePath;
/**
* This exception indicates that a resource was not found or could not be resolved.
*/
public final class NotFoundException
extends RuntimeException
{
/**
* Constructs the exception with a message.
*
* @param message Message indicating the problem.
*/
public NotFoundException( final String message )
{
super( message );
}
/**
* Constructs the exception with a resource path.
*
* @param path Path indicating which resource was not found.
*/
public NotFoundException( final ResourcePath path )
{
this( String.format( "Resource [%s] not found", path.toString() ) );
}
}
| 261 |
30,023 | <gh_stars>1000+
"""The Geocaching integration."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.config_entry_oauth2_flow import (
OAuth2Session,
async_get_config_entry_implementation,
)
from .const import DOMAIN
from .coordinator import GeocachingDataUpdateCoordinator
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Geocaching from a config entry."""
implementation = await async_get_config_entry_implementation(hass, entry)
oauth_session = OAuth2Session(hass, entry, implementation)
coordinator = GeocachingDataUpdateCoordinator(
hass, entry=entry, session=oauth_session
)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
del hass.data[DOMAIN][entry.entry_id]
return unload_ok
| 452 |
3,263 | /*
Copyright 2018 Immutables Authors and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.immutables.fixture.jdkonly;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.util.Map;
import java.util.Set;
import org.immutables.value.Value;
@Value.Immutable
public abstract class EmptyEnumMapAndSet {
public enum Generations {
X, Y, Z
}
@Value.Default
public Map<Generations, String> genNames() {
return ImmutableMap.of();
}
@Value.Default
public Set<Generations> gens() {
return ImmutableSet.of();
}
}
| 337 |
1,444 |
package mage.cards.f;
import java.util.UUID;
import mage.abilities.costs.mana.ManaCostsImpl;
import mage.abilities.effects.common.DamageTargetEffect;
import mage.abilities.keyword.BasicLandcyclingAbility;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.target.common.TargetCreaturePermanent;
/**
*
* @author Loki
*/
public final class FieryFall extends CardImpl {
public FieryFall (UUID ownerId, CardSetInfo setInfo) {
super(ownerId,setInfo,new CardType[]{CardType.INSTANT},"{5}{R}");
// Fiery Fall deals 5 damage to target creature.
this.getSpellAbility().addEffect(new DamageTargetEffect(5));
this.getSpellAbility().addTarget(new TargetCreaturePermanent());
// Basic landcycling {1}{R} ({1}{R}, Discard this card: Search your library for a basic land card, reveal it, and put it into your hand. Then shuffle your library.)
this.addAbility(new BasicLandcyclingAbility(new ManaCostsImpl("{1}{R}")));
}
public FieryFall (final FieryFall card) {
super(card);
}
@Override
public FieryFall copy() {
return new FieryFall(this);
}
}
| 436 |
530 | {
"manufacturer": "Good Way Technology Co., Ltd.",
"manufacturerId": "0x0068",
"label": "GATEWAY / FG2200",
"description": "FG2200 Z-Wave® Home Gateway",
"devices": [
{
"productType": "0x0002",
"productId": "0x0002",
"zwaveAllianceId": 2633
}
],
"firmwareVersion": {
"min": "0.0",
"max": "255.255"
},
"metadata": {
"reset": "The Android APP application, please see manual 5.1.4&4.1.5\n\nClick “Reset”, show “Are you sure to reset Z-Dongle?”and click OK to reset the Z-Wave network,or FG3200/FG2200 Reset button 10 seconds after the reset to reset the factory default.\n\nAnd manual P.10\nThe gateway will send DEVICE_RESET_LOCALLY_NOTIFICATION to associated Z-Wave devices when it is reset or factory-reset.\n\nmanual P.22\nIf this controller is the primary controller for your network, resetting it will result in the nodes in your network being orphaned and it will be necessary after the reset to exclude and re-include all of the nodes in the network. If this controller is being used as a secondary controller in the network, use this procedure to reset this controller only in the event that the network primary controller is missing or otherwise inoperable",
"manual": "https://products.z-wavealliance.org/ProductManual/File?folder=&filename=MarketCertificationFiles/2633/FG3200%20Z-Wave%20plus%20User%20manual_20170929.pdf"
}
}
| 445 |
1,444 | <gh_stars>1000+
package mage.cards.t;
import mage.abilities.Mode;
import mage.abilities.effects.common.DamageTargetEffect;
import mage.abilities.effects.common.DestroyTargetEffect;
import mage.abilities.keyword.FlyingAbility;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.filter.FilterPermanent;
import mage.filter.common.FilterCreaturePermanent;
import mage.filter.predicate.mageobject.AbilityPredicate;
import mage.target.TargetPermanent;
import mage.target.common.TargetArtifactPermanent;
import java.util.UUID;
/**
* @author TheElk801
*/
public final class Tangletrap extends CardImpl {
private static final FilterPermanent filter = new FilterCreaturePermanent("creature with flying");
static {
filter.add(new AbilityPredicate(FlyingAbility.class));
}
public Tangletrap(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.INSTANT}, "{1}{G}");
// Choose one —
// • Tangletrap deals 5 damage to target creature with flying.
this.getSpellAbility().addEffect(new DamageTargetEffect(5));
this.getSpellAbility().addTarget(new TargetPermanent(filter));
// • Destroy target artifact.
Mode mode = new Mode(new DestroyTargetEffect());
mode.addTarget(new TargetArtifactPermanent());
this.getSpellAbility().addMode(mode);
}
private Tangletrap(final Tangletrap card) {
super(card);
}
@Override
public Tangletrap copy() {
return new Tangletrap(this);
}
}
| 535 |
742 | <gh_stars>100-1000
/*------------------------------------
///\ Plywood C++ Framework
\\\/ https://plywood.arc80.com/
------------------------------------*/
#include <ply-web-cook-docs/Core.h>
#include <ply-cook/CookJob.h>
namespace ply {
namespace docs {
void cook_CopyStatic(cook::CookResult* cookResult, TypedPtr) {
PLY_ASSERT(!cookResult->job->id.desc.isEmpty());
// Create destination folder(s) if missing
String dstPath =
NativePath::join(PLY_WORKSPACE_FOLDER, "data/docsite/static", cookResult->job->id.desc);
FSResult r = FileSystem::native()->makeDirs(NativePath::split(dstPath).first);
if (r != FSResult::OK && r != FSResult::AlreadyExists) {
// FIXME: add reason from r
cookResult->addError(String::format("unable to create '{}'", dstPath));
return;
}
// Create Dependency on source file
String srcPath = NativePath::join(PLY_WORKSPACE_FOLDER, "repos/plywood/src/web/theme",
cookResult->job->id.desc);
cook::CookResult::FileDepScope fdScope = cookResult->createFileDependency(srcPath);
PLY_UNUSED(fdScope);
// Open source file
Owned<InPipe> inPipe = FileSystem::native()->openPipeForRead(srcPath);
if (!inPipe) {
// FIXME: add reason from lastResult()
cookResult->addError(String::format("can't open '{}'", srcPath));
return;
}
// Allocate temporary storage
String buf = String::allocate(32768);
// Open destination file
// FIXME: Copy to temporary file first, then rename it
Owned<OutPipe> outPipe = FileSystem::native()->openPipeForWrite(dstPath);
if (!outPipe) {
// FIXME: add reason from lastResult()
cookResult->addError(String::format("unable to create '{}'", dstPath));
return;
}
// Copy in chunks
for (;;) {
u32 numBytes = inPipe->readSome({buf.bytes, buf.numBytes});
// FIXME: Distinguish failed read from EOF
if (numBytes == 0)
break;
outPipe->write(buf.left(numBytes));
}
}
cook::CookJobType CookJobType_CopyStatic = {
"copyStatic",
TypeResolver<cook::CookResult>::get(),
nullptr,
cook_CopyStatic,
};
} // namespace docs
} // namespace ply
| 895 |
5,169 | <reponame>Ray0218/Specs<gh_stars>1000+
{
"name": "ADAppRater",
"version": "1.0.0",
"summary": "An AutoCAD360 component that helps you promote your app and get good reviews on the App Store",
"authors": {
"<NAME>": "<EMAIL>"
},
"social_media_url": "https://twitter.com/ashavit3",
"homepage": "http://www.autodesk.com",
"screenshots": [
"https://raw.githubusercontent.com/Autodesk/ADAppRater-iOS/master/Screenshots/Screenshot1_Satisfaction.png",
"https://raw.githubusercontent.com/Autodesk/ADAppRater-iOS/master/Screenshots/Screenshot2_Rate.png",
"https://raw.githubusercontent.com/Autodesk/ADAppRater-iOS/master/Screenshots/Screenshot3_Feedback.png"
],
"license": {
"type": "MIT",
"file": "LICENCE.md"
},
"source": {
"git": "https://github.com/Autodesk/ADAppRater-iOS.git",
"tag": "1.0.0"
},
"platforms": {
"ios": "7.0"
},
"source_files": "ADAppRater/**/*.{h,m}",
"frameworks": [
"Foundation",
"UIKit"
],
"requires_arc": true
}
| 416 |
460 | <filename>kettle-webapp/src/main/java/org/flhy/webapp/trans/steps/scriptvalues_mod/ScriptValuesModController.java<gh_stars>100-1000
package org.flhy.webapp.trans.steps.scriptvalues_mod;
import com.mxgraph.model.mxCell;
import com.mxgraph.model.mxGeometry;
import com.mxgraph.util.mxUtils;
import com.mxgraph.util.mxXmlUtils;
import org.flhy.ext.PluginFactory;
import org.flhy.ext.base.GraphCodec;
import org.flhy.ext.trans.steps.RowGenerator;
import org.flhy.ext.utils.JSONArray;
import org.flhy.ext.utils.JSONObject;
import org.flhy.ext.utils.JsonUtils;
import org.flhy.webapp.utils.SearchFieldsProgress;
import org.flhy.webapp.utils.TransPreviewProgress;
import org.mozilla.javascript.*;
import org.mozilla.javascript.ast.ScriptNode;
import org.mozilla.javascript.tools.ToolErrorReporter;
import org.pentaho.di.compatibility.Row;
import org.pentaho.di.compatibility.Value;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.row.RowMeta;
import org.pentaho.di.core.row.RowMetaInterface;
import org.pentaho.di.core.row.ValueMetaInterface;
import org.pentaho.di.core.row.value.ValueMetaBase;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.trans.TransHopMeta;
import org.pentaho.di.trans.TransMeta;
import org.pentaho.di.trans.step.StepMeta;
import org.pentaho.di.trans.steps.rowgenerator.RowGeneratorMeta;
import org.pentaho.di.trans.steps.scriptvalues_mod.ScriptValuesAddedFunctions;
import org.pentaho.di.trans.steps.scriptvalues_mod.ScriptValuesMetaMod;
import org.pentaho.di.trans.steps.scriptvalues_mod.ScriptValuesModDummy;
import org.pentaho.di.trans.steps.scriptvalues_mod.ScriptValuesScript;
import org.pentaho.di.ui.trans.steps.scriptvalues_mod.ScriptValuesHelp;
import org.springframework.stereotype.Controller;
import org.springframework.util.StringUtils;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import java.awt.*;
import java.math.BigDecimal;
import java.util.List;
import java.util.*;
@Controller
@RequestMapping(value = "/script")
public class ScriptValuesModController {
@ResponseBody
@RequestMapping(method = RequestMethod.POST, value = "/tree")
protected void tree(@RequestParam String graphXml, @RequestParam String stepName) throws Exception {
GraphCodec codec = (GraphCodec) PluginFactory.getBean(GraphCodec.TRANS_CODEC);
TransMeta transMeta = (TransMeta) codec.decode(graphXml);
StepMeta stepMeta = transMeta.findStep(stepName);
ScriptValuesMetaMod input = (ScriptValuesMetaMod) stepMeta.getStepMetaInterface();
JSONArray jsonArray = new JSONArray();
int count = 1;
JSONObject transScripts = new JSONObject();
transScripts.put("id", count++);
transScripts.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.TransformScript.Label"));
transScripts.put("expanded", true);
JSONObject transCons = new JSONObject();
transCons.put("id", count++);
transCons.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.TansformConstant.Label"));
JSONObject transFuncs = new JSONObject();
transFuncs.put("id", count++);
transFuncs.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.TransformFunctions.Label"));
JSONObject transInputs = new JSONObject();
transInputs.put("id", count++);
transInputs.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.InputFields.Label"));
transInputs.put("expanded", true);
JSONObject transOutputs = new JSONObject();
transOutputs.put("id", count++);
transOutputs.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.OutputFields.Label"));
transOutputs.put("children", new JSONArray());
// fill transforms
JSONArray items = new JSONArray();
for (String name : input.getJSScriptNames()) {
JSONObject jsonObject = new JSONObject();
jsonObject.put("id", count++);
jsonObject.put("text", name);
jsonObject.put("iconCls", "activeScript");
jsonObject.put("leaf", true);
items.add(jsonObject);
}
transScripts.put("children", items);
// fill constants
items = new JSONArray();
for (String text : new String[]{"SKIP_TRANSFORMATION", "ERROR_TRANSFORMATION", "CONTINUE_TRANSFORMATION"}) {
JSONObject jsonObject = new JSONObject();
jsonObject.put("id", count++);
jsonObject.put("text", text);
jsonObject.put("iconCls", "arrowGreen");
jsonObject.put("leaf", true);
items.add(jsonObject);
}
transCons.put("children", items);
// fill functions
Hashtable<String, String> hatFunctions = scVHelp.getFunctionList();
Vector<String> v = new Vector<String>(hatFunctions.keySet());
Collections.sort(v);
JSONArray stringFuncs = new JSONArray();
JSONArray numberFuncs = new JSONArray();
JSONArray dateFuncs = new JSONArray();
JSONArray logicFuncs = new JSONArray();
JSONArray specialFuncs = new JSONArray();
JSONArray fileFuncs = new JSONArray();
for (String strFunction : v) {
String strFunctionType = hatFunctions.get(strFunction);
int iFunctionType = Integer.valueOf(strFunctionType).intValue();
JSONObject jsonObject = new JSONObject();
jsonObject.put("id", count++);
jsonObject.put("text", strFunction);
jsonObject.put("iconCls", "arrowGreen");
jsonObject.put("leaf", true);
switch (iFunctionType) {
case ScriptValuesAddedFunctions.STRING_FUNCTION:
stringFuncs.add(jsonObject);
break;
case ScriptValuesAddedFunctions.NUMERIC_FUNCTION:
numberFuncs.add(jsonObject);
break;
case ScriptValuesAddedFunctions.DATE_FUNCTION:
dateFuncs.add(jsonObject);
break;
case ScriptValuesAddedFunctions.LOGIC_FUNCTION:
logicFuncs.add(jsonObject);
break;
case ScriptValuesAddedFunctions.SPECIAL_FUNCTION:
specialFuncs.add(jsonObject);
break;
case ScriptValuesAddedFunctions.FILE_FUNCTION:
fileFuncs.add(jsonObject);
break;
default:
break;
}
}
items = new JSONArray();
JSONObject stringFunc = new JSONObject();
stringFunc.put("id", count++);
stringFunc.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.StringFunctions.Label"));
stringFunc.put("iconCls", "underGreen");
stringFunc.put("children", stringFuncs);
items.add(stringFunc);
JSONObject numberFunc = new JSONObject();
numberFunc.put("id", count++);
numberFunc.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.NumericFunctions.Label"));
numberFunc.put("iconCls", "underGreen");
numberFunc.put("children", numberFuncs);
items.add(numberFunc);
JSONObject dateFunc = new JSONObject();
dateFunc.put("id", count++);
dateFunc.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.DateFunctions.Label"));
dateFunc.put("iconCls", "underGreen");
dateFunc.put("children", dateFuncs);
items.add(dateFunc);
JSONObject logicFunc = new JSONObject();
logicFunc.put("id", count++);
logicFunc.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.LogicFunctions.Label"));
logicFunc.put("iconCls", "underGreen");
logicFunc.put("children", logicFuncs);
items.add(logicFunc);
JSONObject fileFunc = new JSONObject();
fileFunc.put("id", count++);
fileFunc.put("text", BaseMessages.getString(ScriptValuesMetaMod.class, "ScriptValuesDialogMod.FileFunctions.Label"));
fileFunc.put("iconCls", "underGreen");
fileFunc.put("children", fileFuncs);
items.add(fileFunc);
transFuncs.put("children", items);
// end fill functions
// fill input fields
SearchFieldsProgress op = new SearchFieldsProgress(transMeta, stepMeta, true);
op.run();
RowMetaInterface rowMetaInterface = op.getFields();
items = new JSONArray();
for (int i = 0; i < rowMetaInterface.size(); i++) {
ValueMetaInterface valueMeta = rowMetaInterface.getValueMeta(i);
JSONObject jsonObject = new JSONObject();
jsonObject.put("id", count++);
jsonObject.put("text", valueMeta.getName());
jsonObject.put("iconCls", "arrowOrange");
jsonObject.put("leaf", true);
items.add(jsonObject);
}
transInputs.put("children", items);
jsonArray.add(transScripts);
jsonArray.add(transCons);
jsonArray.add(transFuncs);
jsonArray.add(transInputs);
jsonArray.add(transOutputs);
JsonUtils.response(jsonArray);
}
private static ScriptValuesHelp scVHelp;
static {
try {
scVHelp = new ScriptValuesHelp("jsFunctionHelp.xml");
} catch (Exception e) {
e.printStackTrace();
}
}
@ResponseBody
@RequestMapping(method = RequestMethod.POST, value = "/getVariables")
protected void getVariables(@RequestParam String graphXml, @RequestParam String stepName, @RequestParam String scriptName) throws Exception {
GraphCodec codec = (GraphCodec) PluginFactory.getBean(GraphCodec.TRANS_CODEC);
TransMeta transMeta = (TransMeta) codec.decode(graphXml);
ScriptValuesMetaMod input = (ScriptValuesMetaMod) transMeta.findStep(stepName).getStepMetaInterface();
Context jscx = ContextFactory.getGlobal().enterContext();
jscx.setOptimizationLevel(-1);
Scriptable jsscope = jscx.initStandardObjects(null, false);
String strStartScript = null, scr = null;
for (ScriptValuesScript script : input.getJSScripts()) {
Scriptable jsR = Context.toObject(script.getScript(), jsscope);
jsscope.put(script.getScriptName(), jsscope, jsR);
if (script.isStartScript()) {
strStartScript = script.getScript();
}
if (script.getScriptName().equals(scriptName)) {
scr = script.getScript();
}
}
jsscope.put("_TransformationName_", jsscope, stepName);
RowMetaInterface rowMeta = transMeta.getPrevStepFields(stepName);
if (rowMeta != null) {
ScriptValuesModDummy dummyStep = new ScriptValuesModDummy(rowMeta, transMeta.getStepFields(stepName));
Scriptable jsvalue = Context.toObject(dummyStep, jsscope);
jsscope.put("_step_", jsscope, jsvalue);
if (input.getAddClasses() != null) {
for (int i = 0; i < input.getAddClasses().length; i++) {
Object jsOut = Context.javaToJS(input.getAddClasses()[i].getAddObject(), jsscope);
ScriptableObject.putProperty(jsscope, input.getAddClasses()[i].getJSName(), jsOut);
}
}
Context.javaToJS(ScriptValuesAddedFunctions.class, jsscope);
((ScriptableObject) jsscope).defineFunctionProperties(ScriptValuesAddedFunctions.jsFunctionList, ScriptValuesAddedFunctions.class, ScriptableObject.DONTENUM);
jsscope.put("SKIP_TRANSFORMATION", jsscope, Integer.valueOf(1));
jsscope.put("ABORT_TRANSFORMATION", jsscope, Integer.valueOf(-1));
jsscope.put("ERROR_TRANSFORMATION", jsscope, Integer.valueOf(-2));
jsscope.put("CONTINUE_TRANSFORMATION", jsscope, Integer.valueOf(0));
Object[] row = new Object[rowMeta.size()];
Scriptable jsRowMeta = Context.toObject(rowMeta, jsscope);
jsscope.put("rowMeta", jsscope, jsRowMeta);
for (int i = 0; i < rowMeta.size(); i++) {
ValueMetaInterface valueMeta = rowMeta.getValueMeta(i);
Object valueData = null;
if (valueMeta.isDate()) {
valueData = new Date();
}
if (valueMeta.isString()) {
valueData = "test value test value test value test value test value " + "test value test value test value test value test value";
}
if (valueMeta.isInteger()) {
valueData = Long.valueOf(0L);
}
if (valueMeta.isNumber()) {
valueData = new Double(0.0);
}
if (valueMeta.isBigNumber()) {
valueData = BigDecimal.ZERO;
}
if (valueMeta.isBoolean()) {
valueData = Boolean.TRUE;
}
if (valueMeta.isBinary()) {
valueData = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9,};
}
if (valueMeta.isStorageBinaryString()) {
valueMeta.setStorageType(ValueMetaInterface.STORAGE_TYPE_NORMAL);
}
row[i] = valueData;
if (input.isCompatible()) {
Value value = valueMeta.createOriginalValue(valueData);
Scriptable jsarg = Context.toObject(value, jsscope);
jsscope.put(valueMeta.getName(), jsscope, jsarg);
} else {
Scriptable jsarg = Context.toObject(valueData, jsscope);
jsscope.put(valueMeta.getName(), jsscope, jsarg);
}
}
Scriptable jsval = Context.toObject(Value.class, jsscope);
jsscope.put("Value", jsscope, jsval);
if (input.isCompatible()) {
Row v2Row = RowMeta.createOriginalRow(rowMeta, row);
Scriptable jsV2Row = Context.toObject(v2Row, jsscope);
jsscope.put("row", jsscope, jsV2Row);
} else {
Scriptable jsRow = Context.toObject(row, jsscope);
jsscope.put("row", jsscope, jsRow);
}
if (strStartScript != null) {
jscx.evaluateString(jsscope, strStartScript, "trans_Start", 1, null);
}
Script evalScript = jscx.compileString(scr, "script", 1, null);
evalScript.exec(jscx, jsscope);
CompilerEnvirons evn = new CompilerEnvirons();
evn.setOptimizationLevel(-1);
evn.setGeneratingSource(true);
evn.setGenerateDebugInfo(true);
ErrorReporter errorReporter = new ToolErrorReporter(false);
Parser p = new Parser(evn, errorReporter);
ScriptNode tree = p.parse(scr, "", 0);
new NodeTransformer().transform(tree);
JSONArray jsonArray = new JSONArray();
for (int i = 0; i < tree.getParamAndVarCount(); i++) {
String varname = tree.getParamOrVarName(i);
if (!varname.equalsIgnoreCase("row") && !varname.equalsIgnoreCase("trans_Status")) {
int type = ValueMetaInterface.TYPE_STRING;
int length = -1, precision = -1;
Object result = jsscope.get(varname, jsscope);
if (result != null) {
String classname = result.getClass().getName();
if (classname.equalsIgnoreCase("java.lang.Byte")) {
// MAX = 127
type = ValueMetaInterface.TYPE_INTEGER;
length = 3;
precision = 0;
} else if (classname.equalsIgnoreCase("java.lang.Integer")) {
// MAX = 2147483647
type = ValueMetaInterface.TYPE_INTEGER;
length = 9;
precision = 0;
} else if (classname.equalsIgnoreCase("java.lang.Long")) {
// MAX = 9223372036854775807
type = ValueMetaInterface.TYPE_INTEGER;
length = 18;
precision = 0;
} else if (classname.equalsIgnoreCase("java.lang.Double")) {
type = ValueMetaInterface.TYPE_NUMBER;
length = 16;
precision = 2;
} else if (classname.equalsIgnoreCase("org.mozilla.javascript.NativeDate") || classname.equalsIgnoreCase("java.util.Date")) {
type = ValueMetaInterface.TYPE_DATE;
} else if (classname.equalsIgnoreCase("java.lang.Boolean")) {
type = ValueMetaInterface.TYPE_BOOLEAN;
}
}
JSONObject jsonObject = new JSONObject();
jsonObject.put("name", varname);
jsonObject.put("rename", "");
jsonObject.put("type", ValueMetaBase.getTypeDesc(type));
jsonObject.put("length", length >= 0 ? String.valueOf(length) : "");
jsonObject.put("precision", precision >= 0 ? String.valueOf(precision) : "");
jsonObject.put("replace", (rowMeta.indexOfValue(varname) >= 0) ? "Y" : "N");
jsonArray.add(jsonObject);
}
}
JsonUtils.response(jsonArray);
}
}
@ResponseBody
@RequestMapping(method = RequestMethod.POST, value = "/testData")
protected void testData(@RequestParam String graphXml, @RequestParam String stepName) throws Exception {
GraphCodec codec = (GraphCodec) PluginFactory.getBean(GraphCodec.TRANS_CODEC);
TransMeta transMeta = (TransMeta) codec.decode(graphXml);
RowMetaInterface rowMeta = transMeta.getPrevStepFields(stepName).clone();
if (rowMeta != null) {
RowGeneratorMeta genMeta = new RowGeneratorMeta();
genMeta.setRowLimit("10");
genMeta.allocate(rowMeta.size());
for (int i = 0; i < rowMeta.size(); i++) {
ValueMetaInterface valueMeta = rowMeta.getValueMeta(i);
if (valueMeta.isStorageBinaryString()) {
valueMeta.setStorageType(ValueMetaInterface.STORAGE_TYPE_NORMAL);
}
genMeta.getFieldName()[i] = valueMeta.getName();
genMeta.getFieldType()[i] = valueMeta.getTypeDesc();
genMeta.getFieldLength()[i] = valueMeta.getLength();
genMeta.getFieldPrecision()[i] = valueMeta.getPrecision();
genMeta.getCurrency()[i] = valueMeta.getCurrencySymbol();
genMeta.getDecimal()[i] = valueMeta.getDecimalSymbol();
genMeta.getGroup()[i] = valueMeta.getGroupingSymbol();
String string = null;
switch (valueMeta.getType()) {
case ValueMetaInterface.TYPE_DATE:
genMeta.getFieldFormat()[i] = "yyyy/MM/dd HH:mm:ss";
valueMeta.setConversionMask(genMeta.getFieldFormat()[i]);
string = valueMeta.getString(new Date());
break;
case ValueMetaInterface.TYPE_STRING:
string = "test value test value";
break;
case ValueMetaInterface.TYPE_INTEGER:
genMeta.getFieldFormat()[i] = "#";
valueMeta.setConversionMask(genMeta.getFieldFormat()[i]);
string = valueMeta.getString(Long.valueOf(0L));
break;
case ValueMetaInterface.TYPE_NUMBER:
genMeta.getFieldFormat()[i] = "#.#";
valueMeta.setConversionMask(genMeta.getFieldFormat()[i]);
string = valueMeta.getString(Double.valueOf(0.0D));
break;
case ValueMetaInterface.TYPE_BIGNUMBER:
genMeta.getFieldFormat()[i] = "#.#";
valueMeta.setConversionMask(genMeta.getFieldFormat()[i]);
string = valueMeta.getString(BigDecimal.ZERO);
break;
case ValueMetaInterface.TYPE_BOOLEAN:
string = valueMeta.getString(Boolean.TRUE);
break;
case ValueMetaInterface.TYPE_BINARY:
string = valueMeta.getString(new byte[]{65, 66, 67, 68, 69, 70, 71, 72, 73, 74,});
break;
default:
break;
}
genMeta.getValue()[i] = string;
}
RowGenerator rg = (RowGenerator) PluginFactory.getBean("RowGenerator");
Element e = rg.encode(genMeta);
e.setAttribute("label", "## TEST DATA ##");
e.setAttribute("ctype", "RowGenerator");
e.setAttribute("copies", "1");
JsonUtils.responseXml(mxXmlUtils.getXml(e));
}
}
@ResponseBody
@RequestMapping(method = RequestMethod.POST, value = "/test")
protected void test(@RequestParam String graphXml, @RequestParam String stepName, @RequestParam String rowGenerator) throws Exception {
GraphCodec codec = (GraphCodec) PluginFactory.getBean(GraphCodec.TRANS_CODEC);
TransMeta transMeta = (TransMeta) codec.decode(graphXml);
StepMeta scriptStep = transMeta.findStep(stepName);
Document doc = mxXmlUtils.parseXml(rowGenerator);
RowGenerator rg = (RowGenerator) PluginFactory.getBean("RowGenerator");
mxCell cell = new mxCell(doc.getDocumentElement());
cell.setGeometry(new mxGeometry(0, 0, 40, 40));
StepMeta genStep = rg.decodeStep(cell, null, null);
RowGeneratorMeta genMeta = (RowGeneratorMeta) genStep.getStepMetaInterface();
// Create a hop between both steps...
//
TransHopMeta hop = new TransHopMeta(genStep, scriptStep);
// Generate a new test transformation...
//
TransMeta newMeta = new TransMeta();
newMeta.setName(stepName + " - PREVIEW");
newMeta.addStep(genStep);
newMeta.addStep(scriptStep);
newMeta.addTransHop(hop);
int rowLimit = Const.toInt(genMeta.getRowLimit(), 10);
TransPreviewProgress tpp = new TransPreviewProgress(newMeta, new String[]{stepName}, new int[]{rowLimit});
RowMetaInterface rowMeta = tpp.getPreviewRowsMeta(stepName);
List<Object[]> rowsData = tpp.getPreviewRows(stepName);
Font f = new Font("Arial", Font.PLAIN, 12);
FontMetrics fm = Toolkit.getDefaultToolkit().getFontMetrics(f);
if (rowMeta != null) {
JSONObject jsonObject = new JSONObject();
List<ValueMetaInterface> valueMetas = rowMeta.getValueMetaList();
int width = 0;
JSONArray columns = new JSONArray();
JSONObject metaData = new JSONObject();
JSONArray fields = new JSONArray();
for (int i = 0; i < valueMetas.size(); i++) {
ValueMetaInterface valueMeta = rowMeta.getValueMeta(i);
fields.add(valueMeta.getName());
String header = valueMeta.getComments() == null ? valueMeta.getName() : valueMeta.getComments();
int hWidth = fm.stringWidth(header) + 10;
width += hWidth;
JSONObject column = new JSONObject();
column.put("dataIndex", valueMeta.getName());
column.put("header", header);
column.put("width", hWidth);
columns.add(column);
}
metaData.put("fields", fields);
metaData.put("root", "firstRecords");
JSONArray firstRecords = new JSONArray();
for (int rowNr = 0; rowNr < rowsData.size(); rowNr++) {
Object[] rowData = rowsData.get(rowNr);
JSONObject row = new JSONObject();
for (int colNr = 0; colNr < rowMeta.size(); colNr++) {
String string = null;
ValueMetaInterface valueMetaInterface;
try {
valueMetaInterface = rowMeta.getValueMeta(colNr);
if (valueMetaInterface.isStorageBinaryString()) {
Object nativeType = valueMetaInterface.convertBinaryStringToNativeType((byte[]) rowData[colNr]);
string = valueMetaInterface.getStorageMetadata().getString(nativeType);
} else {
string = rowMeta.getString(rowData, colNr);
}
} catch (Exception e) {
e.printStackTrace();
}
if (!StringUtils.hasText(string)) {
string = "<null>";
}
ValueMetaInterface valueMeta = rowMeta.getValueMeta(colNr);
row.put(valueMeta.getName(), string);
}
if (firstRecords.size() <= rowLimit) {
firstRecords.add(row);
}
}
jsonObject.put("metaData", metaData);
jsonObject.put("columns", columns);
jsonObject.put("firstRecords", firstRecords);
jsonObject.put("width", width < 1000 ? width : 1000);
JsonUtils.response(jsonObject);
}
}
}
| 12,449 |
22,688 | /******************************************************************************
* Copyright 2021 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/prediction/scenario/interaction_filter/interaction_filter.h"
#include <limits>
#include <queue>
#include <unordered_map>
#include <utility>
#include "modules/prediction/common/prediction_gflags.h"
#include "modules/prediction/common/prediction_map.h"
#include "modules/prediction/container/adc_trajectory/adc_trajectory_container.h"
#include "modules/prediction/container/obstacles/obstacle_clusters.h"
#include "modules/prediction/container/pose/pose_container.h"
#include "modules/prediction/container/storytelling/storytelling_container.h"
namespace apollo {
namespace prediction {
using apollo::common::Point3D;
using apollo::common::adapter::AdapterConfig;
using apollo::hdmap::LaneInfo;
using apollo::hdmap::OverlapInfo;
using apollo::perception::PerceptionObstacle;
using ConstLaneInfoPtr = std::shared_ptr<const LaneInfo>;
namespace {
bool IsLaneSequenceInReferenceLine(
const LaneSequence& lane_sequence,
const ADCTrajectoryContainer* ego_trajectory_container) {
for (const auto& lane_segment : lane_sequence.lane_segment()) {
std::string lane_id = lane_segment.lane_id();
if (ego_trajectory_container->IsLaneIdInTargetReferenceLine(lane_id)) {
return true;
}
}
return false;
}
int NearestFrontObstacleIdOnLaneSequence(const LaneSequence& lane_sequence) {
int nearest_front_obstacle_id = std::numeric_limits<int>::min();
double smallest_relative_s = std::numeric_limits<double>::max();
for (const auto& nearby_obs : lane_sequence.nearby_obstacle()) {
if (nearby_obs.s() < 0.0 ||
nearby_obs.s() > FLAGS_interaction_search_distance_ahead) {
continue;
}
if (nearby_obs.s() < smallest_relative_s) {
smallest_relative_s = nearby_obs.s();
nearest_front_obstacle_id = nearby_obs.id();
}
}
return nearest_front_obstacle_id;
}
int NearestBackwardObstacleIdOnLaneSequence(const LaneSequence& lane_sequence) {
int nearest_backward_obstacle_id = std::numeric_limits<int>::min();
double smallest_relative_s = std::numeric_limits<double>::max();
for (const auto& nearby_obs : lane_sequence.nearby_obstacle()) {
if (nearby_obs.s() > 0.0 ||
nearby_obs.s() < -FLAGS_interaction_search_distance_backward) {
continue;
}
if (-nearby_obs.s() < smallest_relative_s) {
smallest_relative_s = -nearby_obs.s();
nearest_backward_obstacle_id = nearby_obs.id();
}
}
return nearest_backward_obstacle_id;
}
} // namespace
InteractionFilter::InteractionFilter(
const std::shared_ptr<ContainerManager>& container_manager)
: container_manager_(container_manager) {}
void InteractionFilter::AssignInteractiveTag() {
auto obstacles_container =
container_manager_->GetContainer<ObstaclesContainer>(
AdapterConfig::PERCEPTION_OBSTACLES);
if (obstacles_container == nullptr) {
AERROR << "Obstacles container pointer is a null pointer.";
return;
}
Obstacle* ego_vehicle =
obstacles_container->GetObstacle(FLAGS_ego_vehicle_id);
if (ego_vehicle == nullptr) {
AERROR << "Ego vehicle not found";
return;
}
if (ego_vehicle->history_size() == 0) {
AERROR << "Ego vehicle has no history";
return;
}
const auto& obstacle_ids =
obstacles_container->curr_frame_movable_obstacle_ids();
for (const int obstacle_id : obstacle_ids) {
Obstacle* obstacle_ptr = obstacles_container->GetObstacle(obstacle_id);
if (obstacle_ptr == nullptr) {
AERROR << "Null obstacle pointer found.";
continue;
}
if (obstacle_ptr->history_size() == 0) {
AERROR << "Obstacle [" << obstacle_ptr->id() << "] has no feature.";
continue;
}
Feature* latest_feature_ptr = obstacle_ptr->mutable_latest_feature();
latest_feature_ptr->mutable_interactive_tag()->set_interactive_tag(
ObstacleInteractiveTag::NONINTERACTION);
}
auto storytelling_container =
container_manager_->GetContainer<StoryTellingContainer>(
AdapterConfig::STORYTELLING);
if (storytelling_container->ADCDistanceToJunction() <
FLAGS_junction_distance_threshold) {
AssignInteractiveTagInJunction(*ego_vehicle, obstacles_container,
storytelling_container->ADCJunctionId());
}
AssignInteractiveTagCruiseKeepLane(*ego_vehicle, obstacles_container);
AssignInteractiveTagCruiseChangeLane(*ego_vehicle, obstacles_container);
AssignInteractiveTagByEgoReferenceLine(*ego_vehicle, obstacles_container);
if (FLAGS_enable_rank_interactive_obstacles) {
RankingInteractiveTagObstacles(*ego_vehicle, obstacles_container);
}
}
void InteractionFilter::AssignInteractiveTagInJunction(
const Obstacle& ego_vehicle, ObstaclesContainer* obstacles_container,
const std::string& junction_id) {
const auto& obstacle_ids =
obstacles_container->curr_frame_movable_obstacle_ids();
for (const int obstacle_id : obstacle_ids) {
Obstacle* obstacle_ptr = obstacles_container->GetObstacle(obstacle_id);
if (obstacle_ptr == nullptr) {
AERROR << "Null obstacle pointer found.";
continue;
}
if (obstacle_ptr->IsInJunction(junction_id)) {
SetInteractiveIfCloseToEgo(ego_vehicle,
FLAGS_interaction_distance_threshold,
obstacle_ptr);
}
}
}
void InteractionFilter::AssignInteractiveTagCruiseKeepLane(
const Obstacle& ego_vehicle, ObstaclesContainer* obstacles_container) {
const Feature& ego_latest_feature = ego_vehicle.latest_feature();
for (const LaneSequence& lane_sequence :
ego_latest_feature.lane().lane_graph().lane_sequence()) {
int nearest_front_obstacle_id =
NearestFrontObstacleIdOnLaneSequence(lane_sequence);
if (nearest_front_obstacle_id < 0) {
continue;
}
Obstacle* obstacle_ptr =
obstacles_container->GetObstacle(nearest_front_obstacle_id);
if (obstacle_ptr == nullptr) {
AERROR << "Obstacle [" << nearest_front_obstacle_id << "] Not found";
continue;
}
SetInteractiveIfCloseToEgo(ego_vehicle,
FLAGS_interaction_distance_threshold,
obstacle_ptr);
}
}
void InteractionFilter::AssignInteractiveTagCruiseChangeLane(
const Obstacle& ego_vehicle, ObstaclesContainer* obstacles_container) {
ADCTrajectoryContainer* ego_trajectory_container =
container_manager_->GetContainer<ADCTrajectoryContainer>(
AdapterConfig::PLANNING_TRAJECTORY);
const Feature& ego_latest_feature = ego_vehicle.latest_feature();
for (const LaneSequence& lane_sequence :
ego_latest_feature.lane().lane_graph().lane_sequence()) {
if (lane_sequence.vehicle_on_lane()) {
int nearest_front_obstacle_id =
NearestFrontObstacleIdOnLaneSequence(lane_sequence);
if (nearest_front_obstacle_id < 0) {
continue;
}
Obstacle* obstacle_ptr =
obstacles_container->GetObstacle(nearest_front_obstacle_id);
if (obstacle_ptr == nullptr) {
AERROR << "Obstacle [" << nearest_front_obstacle_id << "] Not found";
continue;
}
SetInteractiveIfCloseToEgo(ego_vehicle,
FLAGS_interaction_distance_threshold,
obstacle_ptr);
} else if (IsLaneSequenceInReferenceLine(lane_sequence,
ego_trajectory_container)) {
int nearest_front_obstacle_id =
NearestFrontObstacleIdOnLaneSequence(lane_sequence);
int nearest_backward_obstacle_id =
NearestBackwardObstacleIdOnLaneSequence(lane_sequence);
if (nearest_front_obstacle_id >= 0) {
Obstacle* front_obstacle_ptr =
obstacles_container->GetObstacle(nearest_front_obstacle_id);
if (front_obstacle_ptr != nullptr) {
SetInteractiveIfCloseToEgo(ego_vehicle,
FLAGS_interaction_distance_threshold,
front_obstacle_ptr);
}
}
if (nearest_backward_obstacle_id >= 0) {
Obstacle* backward_obstacle_ptr =
obstacles_container->GetObstacle(nearest_backward_obstacle_id);
if (backward_obstacle_ptr != nullptr) {
SetInteractiveIfCloseToEgo(ego_vehicle,
FLAGS_interaction_distance_threshold,
backward_obstacle_ptr);
}
}
}
}
}
void InteractionFilter::AssignInteractiveTagByEgoReferenceLine(
const Obstacle& ego_vehicle, ObstaclesContainer* obstacles_container) {
ADCTrajectoryContainer* adc_trajectory_container =
container_manager_->GetContainer<ADCTrajectoryContainer>(
AdapterConfig::PLANNING_TRAJECTORY);
if (adc_trajectory_container == nullptr) {
AERROR << "adc_trajectory_container is nullptr";
return;
}
const std::vector<std::string>& lane_ids =
adc_trajectory_container->GetADCTargetLaneIDSequence();
if (lane_ids.empty()) {
return;
}
const Feature& ego_feature = ego_vehicle.latest_feature();
double ego_x = ego_feature.position().x();
double ego_y = ego_feature.position().y();
double ego_vehicle_s = std::numeric_limits<double>::max();
double ego_vehicle_l = std::numeric_limits<double>::max();
double accumulated_s = 0.0;
// first loop for lane_ids to findout ego_vehicle_s
for (const std::string& lane_id : lane_ids) {
std::shared_ptr<const LaneInfo> lane_info_ptr =
PredictionMap::LaneById(lane_id);
if (lane_info_ptr == nullptr) {
AERROR << "Null lane info pointer found.";
continue;
}
double s = 0.0;
double l = 0.0;
if (PredictionMap::GetProjection({ego_x, ego_y}, lane_info_ptr, &s, &l)) {
if (std::fabs(l) < std::fabs(ego_vehicle_l)) {
ego_vehicle_s = accumulated_s + s;
ego_vehicle_l = l;
ego_lane_id_ = lane_id;
ego_lane_s_ = s;
}
}
accumulated_s += lane_info_ptr->total_length();
}
// insert ego_back_lane_id
accumulated_s = 0.0;
for (const std::string& lane_id : lane_ids) {
if (lane_id == ego_lane_id_) {
break;
}
ego_back_lane_id_set_.insert(lane_id);
}
std::unordered_set<std::string> visited_lanes(lane_ids.begin(),
lane_ids.end());
// then loop through lane_ids to AssignInteractive for obstacle vehicles
for (const std::string& lane_id : lane_ids) {
if (ego_back_lane_id_set_.find(lane_id) != ego_back_lane_id_set_.end()) {
continue;
}
std::shared_ptr<const LaneInfo> lane_info_ptr =
PredictionMap::LaneById(lane_id);
if (lane_info_ptr == nullptr) {
AERROR << "Null lane info pointer found.";
continue;
}
accumulated_s += lane_info_ptr->total_length();
if (lane_id != ego_lane_id_) {
AssignInteractiveByMerge(ego_vehicle, lane_info_ptr, &visited_lanes,
obstacles_container);
}
AssignInteractiveByOverlap(ego_vehicle, lane_info_ptr, &visited_lanes,
obstacles_container);
if (accumulated_s >
FLAGS_interaction_search_distance_ahead + ego_vehicle_s) {
break;
}
}
}
void InteractionFilter::RankingInteractiveTagObstacles(
const Obstacle& ego_vehicle, ObstaclesContainer* obstacles_container) {
const Point3D& ego_position = ego_vehicle.latest_feature().position();
const auto& obstacle_ids =
obstacles_container->curr_frame_movable_obstacle_ids();
std::priority_queue<std::pair<double, Obstacle*>> interactive_obstacle_queue;
for (const int obstacle_id : obstacle_ids) {
Obstacle* obstacle_ptr = obstacles_container->GetObstacle(obstacle_id);
if (obstacle_ptr == nullptr) {
AERROR << "Obstacle [" << obstacle_id << "] Not found";
continue;
}
if (!obstacle_ptr->IsInteractiveObstacle()) {
continue;
}
const Point3D& obstacle_position =
obstacle_ptr->latest_feature().position();
double distance = std::hypot(obstacle_position.x() - ego_position.x(),
obstacle_position.y() - ego_position.y());
interactive_obstacle_queue.push({distance, obstacle_ptr});
}
while (static_cast<int>(interactive_obstacle_queue.size()) >
FLAGS_interactive_obs_max_nums) {
Obstacle* obstacle_ptr = interactive_obstacle_queue.top().second;
obstacle_ptr->mutable_latest_feature()->
mutable_interactive_tag()->set_interactive_tag(
ObstacleInteractiveTag::INTERACTION);
interactive_obstacle_queue.pop();
}
}
void InteractionFilter::AssignInteractiveByMerge(
const Obstacle& ego_vehicle, std::shared_ptr<const LaneInfo> lane_info_ptr,
std::unordered_set<std::string>* const visited_lanes,
ObstaclesContainer* obstacles_container) {
SetInteractiveBackward(FLAGS_interaction_search_distance_backward_for_merge,
ego_vehicle, lane_info_ptr, visited_lanes,
obstacles_container);
}
void InteractionFilter::AssignInteractiveByOverlap(
const Obstacle& ego_vehicle, std::shared_ptr<const LaneInfo> lane_info_ptr,
std::unordered_set<std::string>* const visited_lanes,
ObstaclesContainer* obstacles_container) {
std::string lane_id = lane_info_ptr->id().id();
const std::vector<std::shared_ptr<const OverlapInfo>> cross_lanes =
lane_info_ptr->cross_lanes();
for (const auto overlap_ptr : cross_lanes) {
bool consider_overlap = true;
for (const auto& object : overlap_ptr->overlap().object()) {
if (object.id().id() == lane_info_ptr->id().id() &&
object.lane_overlap_info().end_s() < ego_lane_s_) {
consider_overlap = false;
}
}
if (!consider_overlap) {
continue;
}
for (const auto& object : overlap_ptr->overlap().object()) {
const auto& object_id = object.id().id();
if (object_id == lane_info_ptr->id().id()) {
continue;
}
std::shared_ptr<const LaneInfo> overlap_lane_ptr =
PredictionMap::LaneById(object_id);
// ahead_s is the length in front of the overlap
double ahead_s = overlap_lane_ptr->total_length() -
object.lane_overlap_info().start_s();
SetInteractiveBackward(
ahead_s + FLAGS_interaction_search_distance_backward_for_overlap,
ego_vehicle, overlap_lane_ptr, visited_lanes, obstacles_container);
}
}
}
void InteractionFilter::SetInteractiveBackward(
const double max_distance, const Obstacle& ego_vehicle,
std::shared_ptr<const LaneInfo> start_lane_info_ptr,
std::unordered_set<std::string>* const visited_lanes,
ObstaclesContainer* obstacles_container) {
std::string start_lane_id = start_lane_info_ptr->id().id();
if (ego_back_lane_id_set_.find(start_lane_id) !=
ego_back_lane_id_set_.end()) {
return;
}
std::unordered_map<std::string, std::vector<LaneObstacle>> lane_obstacles =
obstacles_container->GetClustersPtr()->GetLaneObstacles();
std::queue<std::pair<ConstLaneInfoPtr, double>> lane_info_queue;
lane_info_queue.emplace(start_lane_info_ptr,
start_lane_info_ptr->total_length());
while (!lane_info_queue.empty()) {
ConstLaneInfoPtr curr_lane = lane_info_queue.front().first;
double cumu_distance = lane_info_queue.front().second;
lane_info_queue.pop();
const std::string& lane_id = curr_lane->id().id();
if (visited_lanes->find(lane_id) == visited_lanes->end() &&
lane_obstacles.find(lane_id) != lane_obstacles.end() &&
!lane_obstacles[lane_id].empty()) {
visited_lanes->insert(lane_id);
// find the obstacle with largest lane_s on the lane
int obstacle_id = lane_obstacles[lane_id].front().obstacle_id();
double obstacle_s = lane_obstacles[lane_id].front().lane_s();
for (const LaneObstacle& lane_obstacle : lane_obstacles[lane_id]) {
if (lane_obstacle.lane_s() > obstacle_s) {
obstacle_id = lane_obstacle.obstacle_id();
obstacle_s = lane_obstacle.lane_s();
}
}
Obstacle* obstacle_ptr = obstacles_container->GetObstacle(obstacle_id);
if (obstacle_ptr == nullptr) {
AERROR << "Obstacle [" << obstacle_id << "] Not found";
continue;
}
SetInteractiveIfCloseToEgo(ego_vehicle,
FLAGS_interaction_distance_threshold,
obstacle_ptr);
continue;
}
if (cumu_distance > max_distance) {
continue;
}
for (const auto& pre_lane_id : curr_lane->lane().predecessor_id()) {
if (ego_back_lane_id_set_.find(pre_lane_id.id()) !=
ego_back_lane_id_set_.end()) {
continue;
}
ConstLaneInfoPtr pre_lane_ptr = PredictionMap::LaneById(pre_lane_id.id());
lane_info_queue.emplace(pre_lane_ptr,
cumu_distance + pre_lane_ptr->total_length());
}
}
}
void InteractionFilter::SetInteractiveIfCloseToEgo(
const Obstacle& ego_vehicle, const double distance_threshold,
Obstacle* obstacle_ptr) {
const Point3D& obstacle_position = obstacle_ptr->latest_feature().position();
const Point3D& ego_position = ego_vehicle.latest_feature().position();
double diff_x = obstacle_position.x() - ego_position.x();
double diff_y = obstacle_position.y() - ego_position.y();
double distance = std::hypot(diff_x, diff_y);
// Add interactive tag only for vehicles
if (distance < distance_threshold &&
obstacle_ptr->latest_feature().type() == PerceptionObstacle::VEHICLE) {
obstacle_ptr->SetInteractiveTag();
} else {
obstacle_ptr->SetNonInteractiveTag();
}
}
} // namespace prediction
} // namespace apollo
| 7,444 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.