max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,831 | <filename>logdevice/common/protocol/GET_EPOCH_RECOVERY_METADATA_Message.cpp
/**
* Copyright (c) 2017-present, Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "logdevice/common/protocol/GET_EPOCH_RECOVERY_METADATA_Message.h"
namespace facebook { namespace logdevice {
GET_EPOCH_RECOVERY_METADATA_Message::GET_EPOCH_RECOVERY_METADATA_Message(
const GET_EPOCH_RECOVERY_METADATA_Header& header)
: Message(MessageType::GET_EPOCH_RECOVERY_METADATA, TrafficClass::RECOVERY),
header_(header) {}
void GET_EPOCH_RECOVERY_METADATA_Message::serialize(
ProtocolWriter& writer) const {
writer.write(header_);
}
MessageReadResult
GET_EPOCH_RECOVERY_METADATA_Message::deserialize(ProtocolReader& reader) {
GET_EPOCH_RECOVERY_METADATA_Header hdr{};
reader.read(&hdr);
return reader.result(
[&] { return new GET_EPOCH_RECOVERY_METADATA_Message(hdr); });
}
PermissionParams
GET_EPOCH_RECOVERY_METADATA_Message::getPermissionParams() const {
PermissionParams params;
params.requiresPermission = true;
params.action = ACTION::SERVER_INTERNAL;
return params;
}
}} // namespace facebook::logdevice
| 468 |
9,734 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "parquet/level_conversion.h"
#include <algorithm>
#include <limits>
#include "arrow/util/bit_run_reader.h"
#include "arrow/util/bit_util.h"
#include "arrow/util/cpu_info.h"
#include "arrow/util/logging.h"
#include "arrow/util/optional.h"
#include "parquet/exception.h"
#include "parquet/level_comparison.h"
#define PARQUET_IMPL_NAMESPACE standard
#include "parquet/level_conversion_inc.h"
#undef PARQUET_IMPL_NAMESPACE
namespace parquet {
namespace internal {
namespace {
using ::arrow::internal::CpuInfo;
using ::arrow::util::optional;
template <typename OffsetType>
void DefRepLevelsToListInfo(const int16_t* def_levels, const int16_t* rep_levels,
int64_t num_def_levels, LevelInfo level_info,
ValidityBitmapInputOutput* output, OffsetType* offsets) {
OffsetType* orig_pos = offsets;
optional<::arrow::internal::FirstTimeBitmapWriter> valid_bits_writer;
if (output->valid_bits) {
valid_bits_writer.emplace(output->valid_bits, output->valid_bits_offset,
output->values_read_upper_bound);
}
for (int x = 0; x < num_def_levels; x++) {
// Skip items that belong to empty or null ancestor lists and further nested lists.
if (def_levels[x] < level_info.repeated_ancestor_def_level ||
rep_levels[x] > level_info.rep_level) {
continue;
}
if (rep_levels[x] == level_info.rep_level) {
// A continuation of an existing list.
// offsets can be null for structs with repeated children (we don't need to know
// offsets until we get to the children).
if (offsets != nullptr) {
if (ARROW_PREDICT_FALSE(*offsets == std::numeric_limits<OffsetType>::max())) {
throw ParquetException("List index overflow.");
}
*offsets += 1;
}
} else {
if (ARROW_PREDICT_FALSE(
(valid_bits_writer.has_value() &&
valid_bits_writer->position() >= output->values_read_upper_bound) ||
(offsets - orig_pos) >= output->values_read_upper_bound)) {
std::stringstream ss;
ss << "Definition levels exceeded upper bound: "
<< output->values_read_upper_bound;
throw ParquetException(ss.str());
}
// current_rep < list rep_level i.e. start of a list (ancestor empty lists are
// filtered out above).
// offsets can be null for structs with repeated children (we don't need to know
// offsets until we get to the children).
if (offsets != nullptr) {
++offsets;
// Use cumulative offsets because variable size lists are more common then
// fixed size lists so it should be cheaper to make these cumulative and
// subtract when validating fixed size lists.
*offsets = *(offsets - 1);
if (def_levels[x] >= level_info.def_level) {
if (ARROW_PREDICT_FALSE(*offsets == std::numeric_limits<OffsetType>::max())) {
throw ParquetException("List index overflow.");
}
*offsets += 1;
}
}
if (valid_bits_writer.has_value()) {
// the level_info def level for lists reflects element present level.
// the prior level distinguishes between empty lists.
if (def_levels[x] >= level_info.def_level - 1) {
valid_bits_writer->Set();
} else {
output->null_count++;
valid_bits_writer->Clear();
}
valid_bits_writer->Next();
}
}
}
if (valid_bits_writer.has_value()) {
valid_bits_writer->Finish();
}
if (offsets != nullptr) {
output->values_read = offsets - orig_pos;
} else if (valid_bits_writer.has_value()) {
output->values_read = valid_bits_writer->position();
}
if (output->null_count > 0 && level_info.null_slot_usage > 1) {
throw ParquetException(
"Null values with null_slot_usage > 1 not supported."
"(i.e. FixedSizeLists with null values are not supported)");
}
}
} // namespace
#if defined(ARROW_HAVE_RUNTIME_BMI2)
// defined in level_conversion_bmi2.cc for dynamic dispatch.
void DefLevelsToBitmapBmi2WithRepeatedParent(const int16_t* def_levels,
int64_t num_def_levels, LevelInfo level_info,
ValidityBitmapInputOutput* output);
#endif
void DefLevelsToBitmap(const int16_t* def_levels, int64_t num_def_levels,
LevelInfo level_info, ValidityBitmapInputOutput* output) {
// It is simpler to rely on rep_level here until PARQUET-1899 is done and the code
// is deleted in a follow-up release.
if (level_info.rep_level > 0) {
#if defined(ARROW_HAVE_RUNTIME_BMI2)
if (CpuInfo::GetInstance()->HasEfficientBmi2()) {
return DefLevelsToBitmapBmi2WithRepeatedParent(def_levels, num_def_levels,
level_info, output);
}
#endif
standard::DefLevelsToBitmapSimd</*has_repeated_parent=*/true>(
def_levels, num_def_levels, level_info, output);
} else {
standard::DefLevelsToBitmapSimd</*has_repeated_parent=*/false>(
def_levels, num_def_levels, level_info, output);
}
}
uint64_t TestOnlyExtractBitsSoftware(uint64_t bitmap, uint64_t select_bitmap) {
return standard::ExtractBitsSoftware(bitmap, select_bitmap);
}
void DefRepLevelsToList(const int16_t* def_levels, const int16_t* rep_levels,
int64_t num_def_levels, LevelInfo level_info,
ValidityBitmapInputOutput* output, int32_t* offsets) {
DefRepLevelsToListInfo<int32_t>(def_levels, rep_levels, num_def_levels, level_info,
output, offsets);
}
void DefRepLevelsToList(const int16_t* def_levels, const int16_t* rep_levels,
int64_t num_def_levels, LevelInfo level_info,
ValidityBitmapInputOutput* output, int64_t* offsets) {
DefRepLevelsToListInfo<int64_t>(def_levels, rep_levels, num_def_levels, level_info,
output, offsets);
}
void DefRepLevelsToBitmap(const int16_t* def_levels, const int16_t* rep_levels,
int64_t num_def_levels, LevelInfo level_info,
ValidityBitmapInputOutput* output) {
// DefReplevelsToListInfo assumes it for the actual list method and this
// method is for parent structs, so we need to bump def and ref level.
level_info.rep_level += 1;
level_info.def_level += 1;
DefRepLevelsToListInfo<int32_t>(def_levels, rep_levels, num_def_levels, level_info,
output, /*offsets=*/nullptr);
}
} // namespace internal
} // namespace parquet
| 3,053 |
877 | // This test covers Issue345 at:
// https://github.com/typetools/checker-framework/issues/345
public class AssignmentDuringInitialization {
String f1;
String f2;
String f3;
String f4;
String f5;
String f6;
{
// :: error: (assignment)
f1 = f2;
f2 = f1;
f2.toString(); // Null pointer exception here
}
public AssignmentDuringInitialization() {
// :: error: (assignment)
f3 = f4;
f4 = f3;
f4.toString(); // Null pointer exception here
f5 = "hello";
f6 = f5;
}
public void goodBehavior() {
// This isn't a constructor or initializer.
// The receiver of this method should already be initialized
// and therefore f1 and f2 should already be initialized.
f5 = f6;
f6 = f5;
f6.toString(); // No exception here
}
public static void main(String[] args) {
AssignmentDuringInitialization a = new AssignmentDuringInitialization();
}
}
| 331 |
1,720 | #pragma once
#include "Iop_SifMan.h"
namespace Iop
{
class CSifManNull : public CSifMan
{
public:
void RegisterModule(uint32, CSifModule*) override;
bool IsModuleRegistered(uint32) override;
void UnregisterModule(uint32) override;
void SendPacket(void*, uint32) override;
void SetDmaBuffer(uint32, uint32) override;
void SetCmdBuffer(uint32, uint32) override;
void SendCallReply(uint32, const void*) override;
void GetOtherData(uint32, uint32, uint32) override;
void SetModuleResetHandler(const ModuleResetHandler&) override;
void SetCustomCommandHandler(const CustomCommandHandler&) override;
};
}
| 234 |
1,445 | from datetime import datetime
from tapiriik.database import redis
import pickle
class SessionCache:
def __init__(self, scope, lifetime, freshen_on_get=False):
self._lifetime = lifetime
self._autorefresh = freshen_on_get
self._scope = scope
self._cacheKey = "sessioncache:%s:%s" % (self._scope, "%s")
def Get(self, pk, freshen=False):
res = redis.get(self._cacheKey % pk)
if res:
try:
res = pickle.loads(res)
except pickle.UnpicklingError:
self.Delete(pk)
res = None
else:
if self._autorefresh or freshen:
redis.expire(self._cacheKey % pk, self._lifetime)
return res
def Set(self, pk, value, lifetime=None):
lifetime = lifetime or self._lifetime
redis.setex(self._cacheKey % pk, pickle.dumps(value), lifetime)
def Delete(self, pk):
redis.delete(self._cacheKey % pk)
| 475 |
777 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_TEST_RUNNER_PIXEL_DUMP_H_
#define COMPONENTS_TEST_RUNNER_PIXEL_DUMP_H_
#include "base/callback_forward.h"
#include "components/test_runner/test_runner_export.h"
class SkBitmap;
namespace blink {
class WebView;
} // namespace blink
namespace test_runner {
class LayoutTestRuntimeFlags;
// Dumps image snapshot of |web_view|. Exact dump mode depends on |flags| (i.e.
// dump_selection_rect and/or is_printing). Caller needs to ensure that
// |layout_test_runtime_flags| stays alive until |callback| gets called.
TEST_RUNNER_EXPORT void DumpPixelsAsync(
blink::WebView* web_view,
const LayoutTestRuntimeFlags& layout_test_runtime_flags,
float device_scale_factor_for_test,
const base::Callback<void(const SkBitmap&)>& callback);
// Copy to clipboard the image present at |x|, |y| coordinates in |web_view|
// and pass the captured image to |callback|.
void CopyImageAtAndCapturePixels(
blink::WebView* web_view,
int x,
int y,
const base::Callback<void(const SkBitmap&)>& callback);
} // namespace test_runner
#endif // COMPONENTS_TEST_RUNNER_PIXEL_DUMP_H_
| 445 |
879 | <gh_stars>100-1000
package org.zstack.sdk;
public class VpcFirewallVRouterRefInventory {
public long id;
public void setId(long id) {
this.id = id;
}
public long getId() {
return this.id;
}
public java.lang.String vpcFirewallUuid;
public void setVpcFirewallUuid(java.lang.String vpcFirewallUuid) {
this.vpcFirewallUuid = vpcFirewallUuid;
}
public java.lang.String getVpcFirewallUuid() {
return this.vpcFirewallUuid;
}
public java.lang.String vRouterUuid;
public void setVRouterUuid(java.lang.String vRouterUuid) {
this.vRouterUuid = vRouterUuid;
}
public java.lang.String getVRouterUuid() {
return this.vRouterUuid;
}
public java.sql.Timestamp createDate;
public void setCreateDate(java.sql.Timestamp createDate) {
this.createDate = createDate;
}
public java.sql.Timestamp getCreateDate() {
return this.createDate;
}
public java.sql.Timestamp lastOpDate;
public void setLastOpDate(java.sql.Timestamp lastOpDate) {
this.lastOpDate = lastOpDate;
}
public java.sql.Timestamp getLastOpDate() {
return this.lastOpDate;
}
}
| 514 |
581 | <gh_stars>100-1000
int g1 = 10;
int g2 = 1;
double g3 = 3.1415;
int main() {
int i = 666;
g1 = 42;
g2 = g1;
return 0;
} | 67 |
478 | <reponame>aerys/minko
/*
Copyright (c) 2014 Aerys
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "minko/Types.hpp"
#include "minko/file/Dependency.hpp"
#include "minko/file/GeometryWriter.hpp"
#include "minko/file/TextureWriter.hpp"
#include "minko/geometry/Geometry.hpp"
#include "minko/file/LinkedAsset.hpp"
#include "minko/file/MaterialWriter.hpp"
#include "minko/file/WriterOptions.hpp"
#include "minko/material/Material.hpp"
#include "minko/serialize/TypeSerializer.hpp"
using namespace minko;
using namespace minko::file;
using namespace minko::serialize;
std::unordered_map<uint, Dependency::GeometryTestFunc> Dependency::_geometryTestFunctions;
std::unordered_map<uint, Dependency::GeometryWriterFunction> Dependency::_geometryWriteFunctions;
Dependency::TextureWriterFunction Dependency::_textureWriteFunction;
Dependency::MaterialWriterFunction Dependency::_materialWriteFunction;
Dependency::Dependency() :
_parent()
{
_currentId = 1;
setGeometryFunction(std::bind(&Dependency::serializeGeometry,
std::placeholders::_1,
std::placeholders::_2,
std::placeholders::_3,
std::placeholders::_4,
std::placeholders::_5,
std::placeholders::_6,
std::placeholders::_7),
[=](std::shared_ptr<geometry::Geometry> geometry) -> bool
{
return true;
},
0);
if (_textureWriteFunction == nullptr)
{
_textureWriteFunction = std::bind(&Dependency::serializeTexture,
std::placeholders::_1,
std::placeholders::_2,
std::placeholders::_3,
std::placeholders::_4,
std::placeholders::_5);
}
if (_materialWriteFunction == nullptr)
{
_materialWriteFunction = std::bind(&Dependency::serializeMaterial,
std::placeholders::_1,
std::placeholders::_2,
std::placeholders::_3,
std::placeholders::_4,
std::placeholders::_5,
std::placeholders::_6);
}
}
bool
Dependency::hasDependency(std::shared_ptr<render::Effect> effect)
{
return _effectDependencies.find(effect) != _effectDependencies.end();
}
DependencyId
Dependency::registerDependency(std::shared_ptr<render::Effect> effect)
{
if (!hasDependency(effect))
_effectDependencies[effect] = _currentId++;
return _effectDependencies[effect];
}
bool
Dependency::hasDependency(std::shared_ptr<geometry::Geometry> geometry)
{
return _geometryDependencies.find(geometry) != _geometryDependencies.end();
}
DependencyId
Dependency::registerDependency(std::shared_ptr<geometry::Geometry> geometry)
{
if (!hasDependency(geometry))
_geometryDependencies[geometry] = _currentId++;
return _geometryDependencies[geometry];
}
bool
Dependency::hasDependency(std::shared_ptr<material::Material> material)
{
return _materialDependencies.find(material) != _materialDependencies.end();
}
DependencyId
Dependency::registerDependency(std::shared_ptr<material::Material> material)
{
if (!hasDependency(material))
_materialDependencies[material] = _currentId++;
return _materialDependencies[material];
}
bool
Dependency::hasDependency(AbsTexturePtr texture)
{
return _textureDependencies.find(texture) != _textureDependencies.end();
}
DependencyId
Dependency::registerDependency(AbsTexturePtr texture, const std::string& textureType)
{
auto dependencyIt = _textureDependencies.find(texture);
if (dependencyIt == _textureDependencies.end())
{
const auto dependencyId = _currentId++;
auto& textureDependency = _textureDependencies.emplace(texture, TextureDependency()).first->second;
textureDependency.dependencyId = dependencyId;
textureDependency.texture = texture;
textureDependency.textureType = textureType;
return textureDependency.dependencyId;
}
return dependencyIt->second.dependencyId;
}
bool
Dependency::hasDependency(std::shared_ptr<scene::Node> subScene)
{
return _subSceneDependencies.find(subScene) != _subSceneDependencies.end();
}
DependencyId
Dependency::registerDependency(std::shared_ptr<scene::Node> subScene)
{
if (!hasDependency(subScene))
_subSceneDependencies[subScene] = _currentId++;
return _subSceneDependencies[subScene];
}
bool
Dependency::hasDependency(std::shared_ptr<LinkedAsset> linkedAsset)
{
return _linkedAssetDependencies.find(linkedAsset) != _linkedAssetDependencies.end();
}
DependencyId
Dependency::registerDependency(std::shared_ptr<LinkedAsset> linkedAsset)
{
if (!hasDependency(linkedAsset))
_linkedAssetDependencies[linkedAsset] = _currentId++;
return _linkedAssetDependencies[linkedAsset];
}
std::shared_ptr<geometry::Geometry>
Dependency::getGeometryReference(DependencyId geometryId)
{
auto referenceIt = _geometryReferences.find(geometryId);
return referenceIt != _geometryReferences.end() ? referenceIt->second :
(_parent.expired() ? nullptr : _parent.lock()->getGeometryReference(geometryId));
}
void
Dependency::registerReference(DependencyId referenceId, std::shared_ptr<geometry::Geometry> geometry)
{
_geometryReferences[referenceId] = geometry;
}
std::shared_ptr<material::Material>
Dependency::getMaterialReference(DependencyId materialId)
{
auto referenceIt = _materialReferences.find(materialId);
return referenceIt != _materialReferences.end() ? referenceIt->second :
(_parent.expired() ? nullptr : _parent.lock()->getMaterialReference(materialId));
}
void
Dependency::registerReference(DependencyId referenceId, std::shared_ptr<material::Material> material)
{
_materialReferences[referenceId] = material;
}
Dependency::TextureReference*
Dependency::getTextureReference(DependencyId textureId)
{
auto referenceIt = _textureReferences.find(textureId);
return referenceIt != _textureReferences.end() ? &referenceIt->second :
(_parent.expired() ? nullptr : _parent.lock()->getTextureReference(textureId));
}
void
Dependency::registerReference(DependencyId referenceId, AbsTexturePtr texture)
{
auto textureReference = _textureReferences.emplace(referenceId, TextureReference());
textureReference.first->second.texture = texture;
}
std::shared_ptr<scene::Node>
Dependency::getSubsceneReference(DependencyId subSceneId)
{
auto referenceIt = _subSceneReferences.find(subSceneId);
return referenceIt != _subSceneReferences.end() ? referenceIt->second :
(_parent.expired() ? nullptr : _parent.lock()->getSubsceneReference(subSceneId));
}
void
Dependency::registerReference(DependencyId referenceId, std::shared_ptr<scene::Node> subScene)
{
_subSceneReferences[referenceId] = subScene;
}
void
Dependency::registerReference(DependencyId referenceId, std::shared_ptr<render::Effect> effect)
{
_effectReferences[referenceId] = effect;
}
void
Dependency::registerReference(DependencyId referenceId, std::shared_ptr<LinkedAsset> linkedAsset)
{
_linkedAssetReferences[referenceId] = linkedAsset;
}
std::shared_ptr<render::Effect>
Dependency::getEffectReference(DependencyId effectId)
{
auto referenceIt = _effectReferences.find(effectId);
return referenceIt != _effectReferences.end() ? referenceIt->second :
(_parent.expired() ? nullptr : _parent.lock()->getEffectReference(effectId));
}
std::shared_ptr<LinkedAsset>
Dependency::getLinkedAssetReference(DependencyId referenceId)
{
auto referenceIt = _linkedAssetReferences.find(referenceId);
return referenceIt != _linkedAssetReferences.end() ? referenceIt->second :
(_parent.expired() ? nullptr : _parent.lock()->getLinkedAssetReference(referenceId));
}
Dependency::SerializedAsset
Dependency::serializeGeometry(std::shared_ptr<Dependency> dependency,
std::shared_ptr<file::AssetLibrary> assetLibrary,
std::shared_ptr<geometry::Geometry> geometry,
DependencyId resourceId,
std::shared_ptr<file::Options> options,
std::shared_ptr<file::WriterOptions> writerOptions,
std::vector<Dependency::SerializedAsset>& userDefinedDependency)
{
GeometryWriter::Ptr geometryWriter = GeometryWriter::create();
serialize::AssetType assetType;
std::string content;
const auto filename = geometry->name();
const auto outputFilename = writerOptions->geometryNameFunction()(filename);
const auto writeFilename = writerOptions->geometryUriFunction()(outputFilename);
auto targetGeometry = writerOptions->geometryFunction()(filename, geometry);
const auto assetIsNull = writerOptions->assetIsNull(targetGeometry->uuid());
geometryWriter->data(writerOptions->geometryFunction()(filename, targetGeometry));
if (!assetIsNull && writerOptions->embedMode() & WriterOptions::EmbedMode::Geometry)
{
assetType = serialize::AssetType::EMBED_GEOMETRY_ASSET;
content = geometryWriter->embedAll(assetLibrary, options, writerOptions, dependency, userDefinedDependency);
}
else
{
assetType = serialize::AssetType::GEOMETRY_ASSET;
if (!assetIsNull)
{
auto embeddedHeaderData = std::vector<unsigned char>();
geometryWriter->write(writeFilename, assetLibrary, options, writerOptions, dependency, userDefinedDependency, embeddedHeaderData);
}
content = outputFilename;
}
SerializedAsset res(assetType, resourceId, content);
return res;
}
Dependency::SerializedAsset
Dependency::serializeTexture(std::shared_ptr<Dependency> dependency,
std::shared_ptr<file::AssetLibrary> assetLibrary,
const TextureDependency& textureDependency,
std::shared_ptr<file::Options> options,
std::shared_ptr<file::WriterOptions> writerOptions)
{
auto writer = TextureWriter::create();
const auto dependencyId = textureDependency.dependencyId;
auto texture = textureDependency.texture;
auto filename = assetLibrary->textureName(texture);
auto assetType = serialize::AssetType();
auto content = std::string();
const auto outputFilename = writerOptions->textureNameFunction()(filename);
const auto writeFilename = writerOptions->textureUriFunction()(outputFilename);
auto targetTexture = writerOptions->textureFunction()(filename, texture);
const auto assetIsNull = writerOptions->assetIsNull(targetTexture->uuid());
auto hasHeaderSize = !assetIsNull;
writer->data(writerOptions->textureFunction()(filename, targetTexture));
writer->textureType(*textureDependency.textureType);
if (!assetIsNull && writerOptions->embedMode() & WriterOptions::EmbedMode::Texture)
{
assetType = serialize::AssetType::EMBED_TEXTURE_PACK_ASSET;
content = writer->embedAll(assetLibrary, options, writerOptions, dependency);
}
else
{
hasHeaderSize = false;
assetType = serialize::AssetType::TEXTURE_PACK_ASSET;
if (!assetIsNull)
{
writer->write(writeFilename, assetLibrary, options, writerOptions, dependency);
}
content = outputFilename;
}
const auto headerSize = writer->headerSize();
const auto metadata = static_cast<unsigned int>(hasHeaderSize ? 1u << 31 : 0u) +
static_cast<unsigned int>((headerSize & 0x0fff) << 16) +
static_cast<unsigned int>(assetType);
SerializedAsset res(metadata, dependencyId, content);
return res;
}
Dependency::SerializedAsset
Dependency::serializeMaterial(std::shared_ptr<Dependency> dependency,
std::shared_ptr<file::AssetLibrary> assetLibrary,
std::shared_ptr<material::Material> material,
DependencyId resourceId,
std::shared_ptr<file::Options> options,
std::shared_ptr<file::WriterOptions> writerOptions)
{
auto writer = MaterialWriter::create();
auto filename = assetLibrary->materialName(material);
auto assetType = serialize::AssetType();
auto content = std::string();
const auto outputFilename = writerOptions->materialNameFunction()(filename);
const auto writeFilename = writerOptions->materialUriFunction()(outputFilename);
auto targetMaterial = writerOptions->materialFunction()(filename, material);
const auto assetIsNull = writerOptions->assetIsNull(targetMaterial->uuid());
writer->data(writerOptions->materialFunction()(filename, targetMaterial));
if (!assetIsNull && writerOptions->embedMode() & WriterOptions::EmbedMode::Material)
{
assetType = serialize::AssetType::EMBED_MATERIAL_ASSET;
content = writer->embedAll(assetLibrary, options, writerOptions, dependency);
}
else
{
assetType = serialize::AssetType::MATERIAL_ASSET;
if (!assetIsNull)
{
writer->write(writeFilename, assetLibrary, options, writerOptions, dependency);
}
content = outputFilename;
}
SerializedAsset res(assetType, resourceId, content);
return res;
}
Dependency::SerializedAsset
Dependency::serializeEffect(std::shared_ptr<Dependency> dependency,
std::shared_ptr<file::AssetLibrary> assetLibrary,
std::shared_ptr<render::Effect> effect,
DependencyId resourceId,
std::shared_ptr<file::Options> options,
std::shared_ptr<file::WriterOptions> writerOptions)
{
auto filename = assetLibrary->effectName(effect);
auto assetType = serialize::AssetType();
auto content = std::string();
assetType = serialize::AssetType::EFFECT_ASSET;
content = File::removePrefixPathFromFilename(filename);
SerializedAsset res(assetType, resourceId, content);
return res;
}
std::vector<Dependency::SerializedAsset>
Dependency::serialize(const std::string& parentFilename,
std::shared_ptr<file::AssetLibrary> assetLibrary,
std::shared_ptr<file::Options> options,
std::shared_ptr<file::WriterOptions> writerOptions,
std::vector<std::vector<unsigned char>>& internalLinkedAssets)
{
std::vector<SerializedAsset> serializedAsset;
for (const auto& itGeometry : _geometryDependencies)
{
uint maxPriority = 0;
for (auto testGeomFunc : _geometryTestFunctions)
if (testGeomFunc.second(itGeometry.first) && maxPriority < testGeomFunc.first)
maxPriority = testGeomFunc.first;
std::vector<SerializedAsset> includeDependencies;
auto res = _geometryWriteFunctions[maxPriority](
shared_from_this(),
assetLibrary,
itGeometry.first,
itGeometry.second,
options,
writerOptions,
includeDependencies
);
serializedAsset.push_back(res);
}
for (const auto& itMaterial : _materialDependencies)
{
auto res = _materialWriteFunction(
shared_from_this(),
assetLibrary,
itMaterial.first,
itMaterial.second,
options,
writerOptions
);
serializedAsset.push_back(res);
}
for (const auto& effectDependency : _effectDependencies)
{
auto result = serializeEffect(
shared_from_this(),
assetLibrary,
effectDependency.first,
effectDependency.second,
options,
writerOptions
);
serializedAsset.push_back(result);
}
for (const auto& itTexture : _textureDependencies)
{
auto res = _textureWriteFunction(
shared_from_this(),
assetLibrary,
itTexture.second,
options,
writerOptions
);
serializedAsset.insert(serializedAsset.begin(), res);
}
auto internalLinkedAssetDataOffset = 0;
for (const auto& internalLinkedAsset : internalLinkedAssets)
internalLinkedAssetDataOffset += internalLinkedAsset.size();
for (const auto& linkedAssetToIdPair : _linkedAssetDependencies)
{
const auto& linkedAsset = *linkedAssetToIdPair.first;
const auto id = linkedAssetToIdPair.second;
msgpack::type::tuple<int, int, std::string, std::vector<unsigned char>, int> linkedAssetData(
linkedAsset.offset(),
linkedAsset.length(),
linkedAsset.filename(),
{},
static_cast<int>(linkedAsset.linkType())
);
switch (linkedAsset.linkType())
{
case LinkedAsset::LinkType::Copy:
linkedAssetData.get<3>() = linkedAsset.data();
break;
case LinkedAsset::LinkType::Internal:
{
linkedAssetData.get<0>() = internalLinkedAssetDataOffset;
internalLinkedAssets.emplace_back(linkedAsset.data().begin(), linkedAsset.data().end());
internalLinkedAssetDataOffset += linkedAsset.length();
break;
}
case LinkedAsset::LinkType::External:
{
const auto validFilename = File::removePrefixPathFromFilename(linkedAssetData.get<2>());
linkedAssetData.get<2>() = validFilename;
break;
}
default:
break;
}
std::stringstream linkedAssetSerializedData;
msgpack::pack(linkedAssetSerializedData, linkedAssetData);
const auto serializedLinkedAsset = SerializedAsset(
LINKED_ASSET,
id,
linkedAssetSerializedData.str()
);
serializedAsset.insert(serializedAsset.begin(), serializedLinkedAsset);
}
return serializedAsset;
}
| 6,951 |
7,883 | // Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma
// de Barcelona (UAB).
//
// This work is licensed under the terms of the MIT license.
// For a copy, see <https://opensource.org/licenses/MIT>.
#pragma once
#include <atomic>
#include <chrono>
#include <mutex>
#include <thread>
#include <vector>
#include "carla/client/detail/EpisodeProxy.h"
#include "carla/client/TrafficLight.h"
#include "carla/client/World.h"
#include "carla/Memory.h"
#include "carla/rpc/Command.h"
#include "carla/trafficmanager/AtomicActorSet.h"
#include "carla/trafficmanager/InMemoryMap.h"
#include "carla/trafficmanager/Parameters.h"
#include "carla/trafficmanager/RandomGenerator.h"
#include "carla/trafficmanager/SimulationState.h"
#include "carla/trafficmanager/TrackTraffic.h"
#include "carla/trafficmanager/TrafficManagerBase.h"
#include "carla/trafficmanager/TrafficManagerServer.h"
#include "carla/trafficmanager/ALSM.h"
#include "carla/trafficmanager/LocalizationStage.h"
#include "carla/trafficmanager/CollisionStage.h"
#include "carla/trafficmanager/TrafficLightStage.h"
#include "carla/trafficmanager/MotionPlanStage.h"
namespace carla {
namespace traffic_manager {
namespace chr = std::chrono;
using namespace std::chrono_literals;
using TimePoint = chr::time_point<chr::system_clock, chr::nanoseconds>;
using TLGroup = std::vector<carla::SharedPtr<carla::client::TrafficLight>>;
using LocalMapPtr = std::shared_ptr<InMemoryMap>;
using constants::HybridMode::HYBRID_MODE_DT;
/// The function of this class is to integrate all the various stages of
/// the traffic manager appropriately using messengers.
class TrafficManagerLocal : public TrafficManagerBase {
private:
/// PID controller parameters.
std::vector<float> longitudinal_PID_parameters;
std::vector<float> longitudinal_highway_PID_parameters;
std::vector<float> lateral_PID_parameters;
std::vector<float> lateral_highway_PID_parameters;
/// Carla's client connection object.
carla::client::detail::EpisodeProxy episode_proxy;
/// Carla client and object.
cc::World world;
/// Set of all actors registered with traffic manager.
AtomicActorSet registered_vehicles;
/// State counter to track changes in registered actors.
int registered_vehicles_state;
/// List of vehicles registered with the traffic manager in
/// current update cycle.
std::vector<ActorId> vehicle_id_list;
/// Pointer to local map cache.
LocalMapPtr local_map;
/// Structures to hold waypoint buffers for all vehicles.
BufferMap buffer_map;
/// Object for tracking paths of the traffic vehicles.
TrackTraffic track_traffic;
/// Type containing the current state of all actors involved in the simulation.
SimulationState simulation_state;
/// Time instance used to calculate dt in asynchronous mode.
TimePoint previous_update_instance;
/// Parameterization object.
Parameters parameters;
/// Array to hold output data of localization stage.
LocalizationFrame localization_frame;
/// Array to hold output data of collision avoidance.
CollisionFrame collision_frame;
/// Array to hold output data of traffic light response.
TLFrame tl_frame;
/// Array to hold output data of motion planning.
ControlFrame control_frame;
/// Variable to keep track of currently reserved array space for frames.
uint64_t current_reserved_capacity {0u};
/// Various stages representing core operations of traffic manager.
LocalizationStage localization_stage;
CollisionStage collision_stage;
TrafficLightStage traffic_light_stage;
MotionPlanStage motion_plan_stage;
ALSM alsm;
/// Traffic manager server instance.
TrafficManagerServer server;
/// Switch to turn on / turn off traffic manager.
std::atomic<bool> run_traffic_manger{true};
/// Flags to signal step begin and end.
std::atomic<bool> step_begin{false};
std::atomic<bool> step_end{false};
/// Mutex for progressing synchronous execution.
std::mutex step_execution_mutex;
/// Condition variables for progressing synchronous execution.
std::condition_variable step_begin_trigger;
std::condition_variable step_end_trigger;
/// Single worker thread for sequential execution of sub-components.
std::unique_ptr<std::thread> worker_thread;
/// Structure holding random devices per vehicle.
RandomGeneratorMap random_devices;
/// Randomization seed.
uint64_t seed {static_cast<uint64_t>(time(NULL))};
bool is_custom_seed {false};
std::vector<ActorId> marked_for_removal;
/// Mutex to prevent vehicle registration during frame array re-allocation.
std::mutex registration_mutex;
/// Method to check if all traffic lights are frozen in a group.
bool CheckAllFrozen(TLGroup tl_to_freeze);
public:
/// Private constructor for singleton lifecycle management.
TrafficManagerLocal(std::vector<float> longitudinal_PID_parameters,
std::vector<float> longitudinal_highway_PID_parameters,
std::vector<float> lateral_PID_parameters,
std::vector<float> lateral_highway_PID_parameters,
float perc_decrease_from_limit,
cc::detail::EpisodeProxy &episode_proxy,
uint16_t &RPCportTM);
/// Destructor.
virtual ~TrafficManagerLocal();
/// Method to setup InMemoryMap.
void SetupLocalMap();
/// To start the TrafficManager.
void Start();
/// Initiates thread to run the TrafficManager sequentially.
void Run();
/// To stop the TrafficManager.
void Stop();
/// To release the traffic manager.
void Release();
/// To reset the traffic manager.
void Reset();
/// This method registers a vehicle with the traffic manager.
void RegisterVehicles(const std::vector<ActorPtr> &actor_list);
/// This method unregisters a vehicle from traffic manager.
void UnregisterVehicles(const std::vector<ActorPtr> &actor_list);
/// Method to set a vehicle's % decrease in velocity with respect to the speed limit.
/// If less than 0, it's a % increase.
void SetPercentageSpeedDifference(const ActorPtr &actor, const float percentage);
/// Methos to set a global % decrease in velocity with respect to the speed limit.
/// If less than 0, it's a % increase.
void SetGlobalPercentageSpeedDifference(float const percentage);
/// Method to set collision detection rules between vehicles.
void SetCollisionDetection(const ActorPtr &reference_actor, const ActorPtr &other_actor, const bool detect_collision);
/// Method to force lane change on a vehicle.
/// Direction flag can be set to true for left and false for right.
void SetForceLaneChange(const ActorPtr &actor, const bool direction);
/// Enable/disable automatic lane change on a vehicle.
void SetAutoLaneChange(const ActorPtr &actor, const bool enable);
/// Method to specify how much distance a vehicle should maintain to
/// the leading vehicle.
void SetDistanceToLeadingVehicle(const ActorPtr &actor, const float distance);
/// Method to specify the % chance of ignoring collisions with any walker.
void SetPercentageIgnoreWalkers(const ActorPtr &actor, const float perc);
/// Method to specify the % chance of ignoring collisions with any vehicle.
void SetPercentageIgnoreVehicles(const ActorPtr &actor, const float perc);
/// Method to specify the % chance of running any traffic light.
void SetPercentageRunningLight(const ActorPtr &actor, const float perc);
/// Method to specify the % chance of running any traffic sign.
void SetPercentageRunningSign(const ActorPtr &actor, const float perc);
/// Method to switch traffic manager into synchronous execution.
void SetSynchronousMode(bool mode);
/// Method to set Tick timeout for synchronous execution.
void SetSynchronousModeTimeOutInMiliSecond(double time);
/// Method to provide synchronous tick.
bool SynchronousTick();
/// Get CARLA episode information.
carla::client::detail::EpisodeProxy &GetEpisodeProxy();
/// Get list of all registered vehicles.
std::vector<ActorId> GetRegisteredVehiclesIDs();
/// Method to specify how much distance a vehicle should maintain to
/// the Global leading vehicle.
void SetGlobalDistanceToLeadingVehicle(const float distance);
/// Method to set probabilistic preference to keep on the right lane.
void SetKeepRightPercentage(const ActorPtr &actor, const float percentage);
/// Method to set hybrid physics mode.
void SetHybridPhysicsMode(const bool mode_switch);
/// Method to set hybrid physics radius.
void SetHybridPhysicsRadius(const float radius);
/// Method to set randomization seed.
void SetRandomDeviceSeed(const uint64_t _seed);
/// Method to set Open Street Map mode.
void SetOSMMode(const bool mode_switch);
/// Method to set automatic respawn of dormant vehicles.
void SetRespawnDormantVehicles(const bool mode_switch);
// Method to set boundaries to respawn of dormant vehicles.
void SetBoundariesRespawnDormantVehicles(const float lower_bound, const float upper_bound);
// Method to set limits for boundaries when respawning dormant vehicles.
void SetMaxBoundaries(const float lower, const float upper);
void ShutDown() {};
};
} // namespace traffic_manager
} // namespace carla
| 2,642 |
346 | """add metadata in run table
Revision ID: edcd10edf11d
Revises: <PASSWORD>
Create Date: 2021-09-16 11:37:16.502609
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "edcd10edf11d"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("runs", sa.Column("os", sa.String))
op.add_column("runs", sa.Column("python_version", sa.String))
op.add_column("runs", sa.Column("cpu_count", sa.Integer))
op.add_column("runs", sa.Column("cpu_model", sa.String))
op.add_column("runs", sa.Column("gpu_count", sa.Integer))
op.add_column("runs", sa.Column("gpu_model", sa.String))
op.add_column("runs", sa.Column("longitude", sa.Float))
op.add_column("runs", sa.Column("latitude", sa.Float))
op.add_column("runs", sa.Column("region", sa.String))
op.add_column("runs", sa.Column("provider", sa.String))
op.add_column("runs", sa.Column("ram_total_size", sa.Float))
op.add_column("runs", sa.Column("tracking_mode", sa.String))
def downgrade():
op.drop_column("runs", "os")
op.drop_column("runs", "python_version")
op.drop_column("runs", "cpu_count")
op.drop_column("runs", "cpu_model")
op.drop_column("runs", "gpu_count")
op.drop_column("runs", "gpu_model")
op.drop_column("runs", "longitude")
op.drop_column("runs", "latitude")
op.drop_column("runs", "region")
op.drop_column("runs", "provider")
op.drop_column("runs", "ram_total_size")
op.drop_column("runs", "tracking_mode")
| 607 |
372 | <gh_stars>100-1000
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.containeranalysis.v1alpha1.model;
/**
* A SourceContext is a reference to a tree of files. A SourceContext together with a path point to
* a unique revision of a single file or directory.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Container Analysis API. For a detailed explanation
* see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class GoogleDevtoolsContaineranalysisV1alpha1SourceContext extends com.google.api.client.json.GenericJson {
/**
* A SourceContext referring to a revision in a Google Cloud Source Repo.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GoogleDevtoolsContaineranalysisV1alpha1CloudRepoSourceContext cloudRepo;
/**
* A SourceContext referring to a Gerrit project.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GoogleDevtoolsContaineranalysisV1alpha1GerritSourceContext gerrit;
/**
* A SourceContext referring to any third party Git repo (e.g., GitHub).
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private GoogleDevtoolsContaineranalysisV1alpha1GitSourceContext git;
/**
* Labels with user defined metadata.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.Map<String, java.lang.String> labels;
/**
* A SourceContext referring to a revision in a Google Cloud Source Repo.
* @return value or {@code null} for none
*/
public GoogleDevtoolsContaineranalysisV1alpha1CloudRepoSourceContext getCloudRepo() {
return cloudRepo;
}
/**
* A SourceContext referring to a revision in a Google Cloud Source Repo.
* @param cloudRepo cloudRepo or {@code null} for none
*/
public GoogleDevtoolsContaineranalysisV1alpha1SourceContext setCloudRepo(GoogleDevtoolsContaineranalysisV1alpha1CloudRepoSourceContext cloudRepo) {
this.cloudRepo = cloudRepo;
return this;
}
/**
* A SourceContext referring to a Gerrit project.
* @return value or {@code null} for none
*/
public GoogleDevtoolsContaineranalysisV1alpha1GerritSourceContext getGerrit() {
return gerrit;
}
/**
* A SourceContext referring to a Gerrit project.
* @param gerrit gerrit or {@code null} for none
*/
public GoogleDevtoolsContaineranalysisV1alpha1SourceContext setGerrit(GoogleDevtoolsContaineranalysisV1alpha1GerritSourceContext gerrit) {
this.gerrit = gerrit;
return this;
}
/**
* A SourceContext referring to any third party Git repo (e.g., GitHub).
* @return value or {@code null} for none
*/
public GoogleDevtoolsContaineranalysisV1alpha1GitSourceContext getGit() {
return git;
}
/**
* A SourceContext referring to any third party Git repo (e.g., GitHub).
* @param git git or {@code null} for none
*/
public GoogleDevtoolsContaineranalysisV1alpha1SourceContext setGit(GoogleDevtoolsContaineranalysisV1alpha1GitSourceContext git) {
this.git = git;
return this;
}
/**
* Labels with user defined metadata.
* @return value or {@code null} for none
*/
public java.util.Map<String, java.lang.String> getLabels() {
return labels;
}
/**
* Labels with user defined metadata.
* @param labels labels or {@code null} for none
*/
public GoogleDevtoolsContaineranalysisV1alpha1SourceContext setLabels(java.util.Map<String, java.lang.String> labels) {
this.labels = labels;
return this;
}
@Override
public GoogleDevtoolsContaineranalysisV1alpha1SourceContext set(String fieldName, Object value) {
return (GoogleDevtoolsContaineranalysisV1alpha1SourceContext) super.set(fieldName, value);
}
@Override
public GoogleDevtoolsContaineranalysisV1alpha1SourceContext clone() {
return (GoogleDevtoolsContaineranalysisV1alpha1SourceContext) super.clone();
}
}
| 1,474 |
34,359 | <filename>src/cascadia/Remoting/WindowActivatedArgs.cpp
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
#include "pch.h"
#include "WindowActivatedArgs.h"
#include "WindowActivatedArgs.g.cpp"
| 74 |
2,338 | <reponame>mkinsner/llvm
#include <cstdio>
#include <iostream>
#include <string>
#include <map>
int main (int argc, char const *argv[])
{
std::string hello_world ("Hello World!");
std::cout << hello_world << std::endl;
std::cout << hello_world.length() << std::endl;
std::cout << hello_world[11] << std::endl;
std::map<std::string, int> associative_array;
std::cout << "size of upon construction associative_array: " << associative_array.size() << std::endl;
associative_array[hello_world] = 1;
associative_array["hello"] = 2;
associative_array["world"] = 3;
std::cout << "size of associative_array: " << associative_array.size() << std::endl;
printf("associative_array[\"hello\"]=%d\n", associative_array["hello"]);
printf("before returning....\n"); // Set break point at this line.
}
| 312 |
1,526 | <reponame>Hakunata/servicecomb-pack<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.pack.alpha.fsm.channel.redis;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import java.lang.invoke.MethodHandles;
import org.apache.servicecomb.pack.alpha.core.NodeStatus;
import org.apache.servicecomb.pack.alpha.core.fsm.event.base.BaseEvent;
import org.apache.servicecomb.pack.alpha.fsm.channel.AbstractEventConsumer;
import org.apache.servicecomb.pack.alpha.fsm.channel.memory.MemoryActorEventChannel;
import org.apache.servicecomb.pack.alpha.fsm.metrics.MetricsService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.redis.connection.Message;
import org.springframework.data.redis.connection.MessageListener;
public class RedisSagaEventConsumer extends AbstractEventConsumer implements MessageListener {
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private NodeStatus nodeStatus;
private MessageSerializer messageSerializer = new MessageSerializer();
public RedisSagaEventConsumer(ActorSystem actorSystem, ActorRef sagaShardRegionActor,
MetricsService metricsService,
NodeStatus nodeStatus) {
super(actorSystem, sagaShardRegionActor, metricsService);
this.nodeStatus = nodeStatus;
}
@Override
public void onMessage(Message message, byte[] pattern) {
if (nodeStatus.isMaster()) {
messageSerializer.deserialize(message.getBody()).ifPresent(data -> {
BaseEvent event = (BaseEvent) data;
if (LOG.isDebugEnabled()) {
LOG.debug("event = [{}]", event);
}
try {
long begin = System.currentTimeMillis();
metricsService.metrics().doActorReceived();
sagaShardRegionActor.tell(event, sagaShardRegionActor);
long end = System.currentTimeMillis();
metricsService.metrics().doActorAccepted();
metricsService.metrics().doActorAvgTime(end - begin);
} catch (Exception e) {
metricsService.metrics().doActorRejected();
LOG.error("subscriber Exception = [{}]", e.getMessage(), e);
}
});
}
}
}
| 951 |
918 | <filename>gobblin-data-management/src/test/java/org/apache/gobblin/data/management/source/LoopingDatasetFinderSourceTest.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.data.management.source;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.gobblin.metastore.DatasetStateStore;
import org.apache.gobblin.source.extractor.WatermarkInterval;
import org.apache.hadoop.conf.Configuration;
import org.apache.gobblin.configuration.ConfigurationKeys;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.dataset.Dataset;
import org.apache.gobblin.dataset.IterableDatasetFinder;
import org.apache.gobblin.dataset.PartitionableDataset;
import org.apache.gobblin.dataset.test.SimpleDatasetForTesting;
import org.apache.gobblin.dataset.test.SimpleDatasetPartitionForTesting;
import org.apache.gobblin.dataset.test.SimplePartitionableDatasetForTesting;
import org.apache.gobblin.dataset.test.StaticDatasetsFinderForTesting;
import org.apache.gobblin.runtime.FsDatasetStateStore;
import org.apache.gobblin.runtime.JobState;
import org.apache.gobblin.runtime.TaskState;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.extractor.extract.LongWatermark;
import org.apache.gobblin.source.workunit.WorkUnit;
import org.apache.gobblin.source.workunit.WorkUnitStream;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class LoopingDatasetFinderSourceTest {
private static final String TEST_JOB_NAME_1 = "TestJob1";
private static final String TEST_JOB_NAME_2 = "TestJob2";
private static final String TEST_JOB_ID = "TestJob11";
private static final String TEST_TASK_ID_PREFIX = "TestTask-";
private static final String TEST_STATE_STORE_ROOT_DIR = "/tmp/LoopingSourceTest";
private FsDatasetStateStore fsDatasetStateStore;
private long startTime = System.currentTimeMillis();
@BeforeClass
public void setUp()
throws IOException {
this.fsDatasetStateStore = new FsDatasetStateStore(ConfigurationKeys.LOCAL_FS_URI, TEST_STATE_STORE_ROOT_DIR);
// clear data that may have been left behind by a prior test run
this.fsDatasetStateStore.delete(TEST_JOB_NAME_1);
this.fsDatasetStateStore.delete(TEST_JOB_NAME_2);
}
@Test
public void testNonDrilldown() {
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2",
Lists.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2")));
Dataset dataset3 = new SimpleDatasetForTesting("dataset3");
Dataset dataset4 = new SimpleDatasetForTesting("dataset4");
Dataset dataset5 = new SimpleDatasetForTesting("dataset5");
IterableDatasetFinder finder =
new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset5, dataset4, dataset3, dataset2, dataset1));
MySource mySource = new MySource(false, finder);
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset3", null, false, false);
// Second run should continue where it left off
List<WorkUnitState> workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 3);
verifyWorkUnitState(workUnits, "dataset5", null, true, false);
// Loop around
workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset3", null, false, false);
}
@Test
public void testDrilldown() {
// Create three datasets, two of them partitioned
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
Dataset dataset3 = new SimplePartitionableDatasetForTesting("dataset3", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
IterableDatasetFinder finder = new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset3, dataset2, dataset1));
MySource mySource = new MySource(true, finder);
// Limit to 3 wunits per run
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
// first run, get three first work units
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset2", "p2", false, false);
// Second run should continue where it left off
List<WorkUnitState> workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset3", "p2", false, false);
// third run, continue from where it left off
workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 2);
verifyWorkUnitState(workUnits, "dataset3", "p3", true, false);
// fourth run, finished all work units, loop around
workUnitStates = workUnits.stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
verifyWorkUnitState(workUnits, "dataset2", "p2", false, false);
}
@Test
public void testNonDrilldownDatasetState()
throws IOException {
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2",
Lists.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2")));
Dataset dataset3 = new SimpleDatasetForTesting("dataset3");
Dataset dataset4 = new SimpleDatasetForTesting("dataset4");
Dataset dataset5 = new SimpleDatasetForTesting("dataset5");
IterableDatasetFinder finder =
new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset5, dataset4, dataset3, dataset2, dataset1));
MySource mySource = new MySource(false, finder, fsDatasetStateStore, TEST_JOB_NAME_1);
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
sourceState.setProp(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, TEST_STATE_STORE_ROOT_DIR);
sourceState.setProp(ConfigurationKeys.JOB_NAME_KEY, TEST_JOB_NAME_1);
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState, true);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
List<LongWatermark> watermarks1 = new ArrayList<>();
List<Dataset> datasets1 = new ArrayList<>();
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset1);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset2);
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset3);
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY),
ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Dataset globalWmDataset = new SimpleDatasetForTesting(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets1.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3", null, false, true);
persistDatasetState(datasets1, watermarks1, TEST_JOB_NAME_1);
testDatasetStates(datasets1, watermarks1, TEST_JOB_NAME_1);
// Second run should continue where it left off
List<LongWatermark> watermarks2 = new ArrayList<>();
List<Dataset> datasets2 = new ArrayList<>();
int workUnitSize = workUnits.size();
List<WorkUnitState> workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 3);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset4");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(dataset4);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset5");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(dataset5);
Assert.assertTrue(workUnits.get(2).getPropAsBoolean(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets2.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset5",null,true, true);
persistDatasetState(datasets2, watermarks2, TEST_JOB_NAME_1);
testDatasetStates(datasets2, watermarks2, TEST_JOB_NAME_1);
// Loop around
List<LongWatermark> watermarks3 = new ArrayList<>();
List<Dataset> datasets3 = new ArrayList<>();
workUnitSize = workUnits.size();
workUnitStates = workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(0).getValue());
watermarks3.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(dataset1);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(1).getValue());
watermarks3.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(dataset2);
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(2).getValue());
watermarks3.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(dataset3);
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets3.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3",null,false, true);
persistDatasetState(datasets3, watermarks3, TEST_JOB_NAME_1);
testDatasetStates(datasets3, watermarks3, TEST_JOB_NAME_1);
}
@Test
public void testDrilldownDatasetState()
throws IOException {
// Create three datasets, two of them partitioned
Dataset dataset1 = new SimpleDatasetForTesting("dataset1");
Dataset dataset2 = new SimplePartitionableDatasetForTesting("dataset2", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
Dataset dataset3 = new SimplePartitionableDatasetForTesting("dataset3", Lists
.newArrayList(new SimpleDatasetPartitionForTesting("p1"), new SimpleDatasetPartitionForTesting("p2"),
new SimpleDatasetPartitionForTesting("p3")));
IterableDatasetFinder finder = new StaticDatasetsFinderForTesting(Lists.newArrayList(dataset3, dataset2, dataset1));
MySource mySource = new MySource(true, finder, fsDatasetStateStore, TEST_JOB_NAME_2);
// Limit to 3 wunits per run
SourceState sourceState = new SourceState();
sourceState.setProp(LoopingDatasetFinderSource.MAX_WORK_UNITS_PER_RUN_KEY, 3);
sourceState.setProp(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, TEST_STATE_STORE_ROOT_DIR);
sourceState.setProp(ConfigurationKeys.JOB_NAME_KEY, TEST_JOB_NAME_2);
// first run, get three first work units
WorkUnitStream workUnitStream = mySource.getWorkunitStream(sourceState,true);
List<WorkUnit> workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
List<LongWatermark> watermarks1 = new ArrayList<>();
List<Dataset> datasets1 = new ArrayList<>();
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(dataset1);
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p1");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(new SimpleDatasetForTesting("dataset2@p1"));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p2");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks1.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets1.add(new SimpleDatasetForTesting("dataset2@p2"));
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.DATASET_URN), "dataset2");
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.PARTITION_URN), "p2");
Dataset globalWmDataset = new SimpleDatasetForTesting(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets1.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset2","p2",false, true);
persistDatasetState(datasets1, watermarks1, TEST_JOB_NAME_2);
testDatasetStates(datasets1, watermarks1, TEST_JOB_NAME_2);
// Second run should continue where it left off
int workUnitSize = workUnits.size();
List<WorkUnitState> workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
List<LongWatermark> watermarks2 = new ArrayList<>();
List<Dataset> datasets2 = new ArrayList<>();
SourceState sourceStateSpy = Mockito.spy(sourceState);
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p3");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(new SimpleDatasetForTesting("dataset2@p3"));
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3@p1");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(new SimpleDatasetForTesting("dataset3@p1"));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3@p2");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks2.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets2.add(new SimpleDatasetForTesting("dataset3@p2"));
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.DATASET_URN), "dataset3");
Assert.assertEquals(workUnits.get(3).getProp(LoopingDatasetFinderSource.PARTITION_URN), "p2");
datasets2.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3","p2",false, true);
persistDatasetState(datasets2, watermarks2, TEST_JOB_NAME_2);
testDatasetStates(datasets2, watermarks2, TEST_JOB_NAME_2);
// third run, continue from where it left off
workUnitSize = workUnits.size();
workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
List<LongWatermark> watermarks3 = new ArrayList<>();
List<Dataset> datasets3 = new ArrayList<>();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 2);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset3@p3");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), 0);
watermarks3.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets3.add(new SimpleDatasetForTesting("dataset3@p3"));
Assert.assertTrue(workUnits.get(1).getPropAsBoolean(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
Assert.assertEquals(workUnits.get(1).getProp(LoopingDatasetFinderSource.DATASET_URN), "dataset3");
Assert.assertEquals(workUnits.get(1).getProp(LoopingDatasetFinderSource.PARTITION_URN), "p3");
datasets3.add(globalWmDataset);
verifyWorkUnitState(workUnits,"dataset3","p3",true, true);
persistDatasetState(datasets3, watermarks3, TEST_JOB_NAME_2);
testDatasetStates(datasets3, watermarks3, TEST_JOB_NAME_2);
// fourth run, finished all work units, loop around
workUnitSize = workUnits.size();
workUnitStates =
workUnits.subList(workUnitSize - 1, workUnitSize).stream().map(WorkUnitState::new).collect(Collectors.toList());
Mockito.doReturn(workUnitStates).when(sourceStateSpy).getPreviousWorkUnitStates(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
List<LongWatermark> watermarks4 = new ArrayList<>();
List<Dataset> datasets4 = new ArrayList<>();
workUnitStream = mySource.getWorkunitStream(sourceStateSpy,true);
workUnits = Lists.newArrayList(workUnitStream.getWorkUnits());
Assert.assertEquals(workUnits.size(), 4);
Assert.assertEquals(workUnits.get(0).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset1");
Assert.assertEquals(workUnits.get(0).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(0).getValue());
watermarks4.add(workUnits.get(0).getExpectedHighWatermark(LongWatermark.class));
datasets4.add(new SimpleDatasetForTesting("dataset1"));
Assert.assertEquals(workUnits.get(1).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p1");
Assert.assertEquals(workUnits.get(1).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(1).getValue());
watermarks4.add(workUnits.get(1).getExpectedHighWatermark(LongWatermark.class));
datasets4.add(new SimpleDatasetForTesting("dataset2@p1"));
Assert.assertEquals(workUnits.get(2).getProp(ConfigurationKeys.DATASET_URN_KEY), "dataset2@p2");
Assert.assertEquals(workUnits.get(2).getLowWatermark(LongWatermark.class).getValue(), watermarks1.get(2).getValue());
watermarks4.add(workUnits.get(2).getExpectedHighWatermark(LongWatermark.class));
datasets4.add(new SimpleDatasetForTesting("dataset2@p2"));
Assert.assertEquals(workUnits.get(3).getProp(ConfigurationKeys.DATASET_URN_KEY), ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN);
datasets4.add(new SimpleDatasetForTesting(ConfigurationKeys.GLOBAL_WATERMARK_DATASET_URN));
verifyWorkUnitState(workUnits,"dataset2","p2",false,true);
persistDatasetState(datasets4, watermarks4, TEST_JOB_NAME_2);
testDatasetStates(datasets4, watermarks4, TEST_JOB_NAME_2);
}
public void verifyWorkUnitState(List<WorkUnit> workUnits, String datasetUrn, String partitionUrn,
boolean endOfDatasets, boolean isDatasetStateStoreEnabled) {
int i;
for (i = 0; i < workUnits.size() - 1; i++) {
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.DATASET_URN));
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.PARTITION_URN));
if(!isDatasetStateStoreEnabled) {
Assert.assertNull(workUnits.get(i).getProp(ConfigurationKeys.DATASET_URN_KEY));
}
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.GLOBAL_WATERMARK_DATASET_KEY));
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
}
Assert.assertEquals(workUnits.get(i).getProp(LoopingDatasetFinderSource.DATASET_URN), datasetUrn);
if (partitionUrn != null) {
Assert.assertEquals(workUnits.get(i).getProp(LoopingDatasetFinderSource.PARTITION_URN), partitionUrn);
} else {
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.PARTITION_URN));
}
if (!endOfDatasets) {
Assert.assertNull(workUnits.get(i).getProp(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
} else {
Assert.assertTrue(workUnits.get(i).getPropAsBoolean(LoopingDatasetFinderSource.END_OF_DATASETS_KEY));
}
Assert
.assertEquals(workUnits.get(i).getPropAsBoolean(LoopingDatasetFinderSource.GLOBAL_WATERMARK_DATASET_KEY), true);
}
public void persistDatasetState(List<Dataset> datasets, List<LongWatermark> watermarks, String jobName)
throws IOException {
Preconditions.checkArgument(datasets.size() >= 2);
for (int i = 0; i < datasets.size(); i++) {
String datasetUrn = datasets.get(i).getUrn();
JobState.DatasetState datasetState = new JobState.DatasetState(jobName, TEST_JOB_ID);
datasetState.setDatasetUrn(datasetUrn);
datasetState.setState(JobState.RunningState.COMMITTED);
datasetState.setId(datasetUrn);
datasetState.setStartTime(this.startTime);
datasetState.setEndTime(this.startTime + 1000);
datasetState.setDuration(1000);
TaskState taskState = new TaskState();
taskState.setJobId(TEST_JOB_ID);
taskState.setTaskId(TEST_TASK_ID_PREFIX + i);
taskState.setId(TEST_TASK_ID_PREFIX + i);
taskState.setWorkingState(WorkUnitState.WorkingState.COMMITTED);
if (i < datasets.size() - 1) {
taskState.setActualHighWatermark(watermarks.get(i));
}
datasetState.addTaskState(taskState);
this.fsDatasetStateStore.persistDatasetState(datasetUrn, datasetState);
}
}
private void testDatasetStates(List<Dataset> datasets, List<LongWatermark> watermarks, String jobName)
throws IOException {
Preconditions.checkArgument(datasets.size() >= 2);
for (int i = 0; i < datasets.size(); i++) {
JobState.DatasetState datasetState =
this.fsDatasetStateStore.getLatestDatasetState(jobName, datasets.get(i).getUrn());
Assert.assertEquals(datasetState.getDatasetUrn(), datasets.get(i).getUrn());
Assert.assertEquals(datasetState.getJobName(), jobName);
Assert.assertEquals(datasetState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(datasetState.getState(), JobState.RunningState.COMMITTED);
Assert.assertEquals(datasetState.getStartTime(), this.startTime);
Assert.assertEquals(datasetState.getEndTime(), this.startTime + 1000);
Assert.assertEquals(datasetState.getDuration(), 1000);
Assert.assertEquals(datasetState.getCompletedTasks(), 1);
TaskState taskState = datasetState.getTaskStates().get(0);
Assert.assertEquals(taskState.getJobId(), TEST_JOB_ID);
Assert.assertEquals(taskState.getTaskId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getId(), TEST_TASK_ID_PREFIX + i);
Assert.assertEquals(taskState.getWorkingState(), WorkUnitState.WorkingState.COMMITTED);
if (i < datasets.size() - 1) {
Assert.assertEquals(taskState.getActualHighWatermark(LongWatermark.class).getValue(),
watermarks.get(i).getValue());
}
}
}
public static class MySource extends LoopingDatasetFinderSource<String, String> {
private final IterableDatasetFinder datasetsFinder;
private boolean isDatasetStateStoreEnabled;
private DatasetStateStore fsDatasetStateStore;
private String jobName;
private Long LAST_PROCESSED_TS = System.currentTimeMillis();
MySource(boolean drilldownIntoPartitions, IterableDatasetFinder datasetsFinder) {
super(drilldownIntoPartitions);
this.datasetsFinder = datasetsFinder;
this.isDatasetStateStoreEnabled = false;
}
MySource(boolean drilldownIntoPartitions, IterableDatasetFinder datasetsFinder,
FsDatasetStateStore fsDatasetStateStore, String jobName) {
super(drilldownIntoPartitions);
this.datasetsFinder = datasetsFinder;
this.isDatasetStateStoreEnabled = true;
this.fsDatasetStateStore = fsDatasetStateStore;
this.jobName = jobName;
}
@Override
public Extractor<String, String> getExtractor(WorkUnitState state)
throws IOException {
return null;
}
@Override
protected WorkUnit workUnitForDataset(Dataset dataset) {
WorkUnit workUnit = new WorkUnit();
if(isDatasetStateStoreEnabled) {
JobState.DatasetState datasetState = null;
try {
datasetState =
(JobState.DatasetState) this.fsDatasetStateStore.getLatestDatasetState(this.jobName, dataset.getUrn());
} catch (IOException e) {
throw new RuntimeException(e);
}
LongWatermark previousWatermark;
if(datasetState != null) {
previousWatermark = datasetState.getTaskStatesAsWorkUnitStates().get(0).getActualHighWatermark(LongWatermark.class);
} else {
previousWatermark = new LongWatermark(0);
}
workUnit.setWatermarkInterval(new WatermarkInterval(previousWatermark, new LongWatermark(LAST_PROCESSED_TS)));
}
return workUnit;
}
@Override
protected WorkUnit workUnitForDatasetPartition(PartitionableDataset.DatasetPartition partition) {
WorkUnit workUnit = new WorkUnit();
if(isDatasetStateStoreEnabled) {
String datasetUrn = partition.getDataset().getUrn()+"@"+partition.getUrn();
JobState.DatasetState datasetState = null;
try {
datasetState =
(JobState.DatasetState) this.fsDatasetStateStore.getLatestDatasetState(this.jobName, datasetUrn);
} catch (IOException e) {
throw new RuntimeException(e);
}
LongWatermark previousWatermark;
if(datasetState != null) {
previousWatermark = datasetState.getTaskStatesAsWorkUnitStates().get(0).getActualHighWatermark(LongWatermark.class);
} else {
previousWatermark = new LongWatermark(0);
}
workUnit.setWatermarkInterval(new WatermarkInterval(previousWatermark, new LongWatermark(LAST_PROCESSED_TS)));
}
return workUnit;
}
@Override
public void shutdown(SourceState state) {
}
@Override
protected IterableDatasetFinder createDatasetsFinder(SourceState state)
throws IOException {
return this.datasetsFinder;
}
}
@AfterClass
public void tearDown()
throws IOException {
FileSystem fs = FileSystem.getLocal(new Configuration(false));
Path rootDir = new Path(TEST_STATE_STORE_ROOT_DIR);
if (fs.exists(rootDir)) {
fs.delete(rootDir, true);
}
}
}
| 12,006 |
763 | <reponame>zabrewer/batfish<filename>projects/batfish/src/main/java/org/batfish/representation/cisco_xr/MatchSemantics.java
package org.batfish.representation.cisco_xr;
public enum MatchSemantics {
MATCH_ALL,
MATCH_ANY
}
| 85 |
4,339 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.yarn;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
/**
* Information about launched task.
*/
public class IgniteContainer {
/** */
public final ContainerId id;
/** */
public final NodeId nodeId;
/** */
public final double cpuCores;
/** */
public final double mem;
/**
* Ignite launched task.
*
* @param id Container ID.
* @param nodeId Node id.
* @param cpuCores Cpu cores count.
* @param mem Memory
*/
public IgniteContainer(ContainerId id, NodeId nodeId, double cpuCores, double mem) {
this.id = id;
this.nodeId = nodeId;
this.cpuCores = cpuCores;
this.mem = mem;
}
/**
* @return Id.
*/
public ContainerId id() {
return id;
}
/**
* @return Host.
*/
public NodeId nodeId() {
return nodeId;
}
/**
* @return Cores count.
*/
public double cpuCores() {
return cpuCores;
}
/**
* @return Memory.
*/
public double mem() {
return mem;
}
/** {@inheritDoc} */
@Override public String toString() {
return "IgniteTask [host=" + nodeId.getHost() + ", cpuCores=" + cpuCores + ", mem=" + mem + ']';
}
}
| 775 |
713 | <filename>core/src/test/java/org/infinispan/functional/distribution/rehash/FunctionalNonTxBackupOwnerBecomingPrimaryOwnerTest.java<gh_stars>100-1000
package org.infinispan.functional.distribution.rehash;
import org.infinispan.distribution.rehash.NonTxBackupOwnerBecomingPrimaryOwnerTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.op.TestFunctionalWriteOperation;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.FunctionalNonTxBackupOwnerBecomingPrimaryOwnerTest")
@CleanupAfterMethod
public class FunctionalNonTxBackupOwnerBecomingPrimaryOwnerTest extends NonTxBackupOwnerBecomingPrimaryOwnerTest {
// TODO: Add more tests, e.g. read-write key operation
public void testPrimaryOwnerChangingDuringReplaceBasedOnMeta() throws Exception {
doTest(TestFunctionalWriteOperation.REPLACE_META_FUNCTIONAL);
}
@Override
public void testPrimaryOwnerChangingDuringPut() throws Exception {
doTest(TestFunctionalWriteOperation.PUT_CREATE_FUNCTIONAL);
}
@Override
public void testPrimaryOwnerChangingDuringPutOverwrite() throws Exception {
doTest(TestFunctionalWriteOperation.PUT_OVERWRITE_FUNCTIONAL);
}
@Override
public void testPrimaryOwnerChangingDuringPutIfAbsent() throws Exception {
doTest(TestFunctionalWriteOperation.PUT_IF_ABSENT_FUNCTIONAL);
}
@Override
public void testPrimaryOwnerChangingDuringReplace() throws Exception {
doTest(TestFunctionalWriteOperation.REPLACE_FUNCTIONAL);
}
@Override
public void testPrimaryOwnerChangingDuringRemove() throws Exception {
doTest(TestFunctionalWriteOperation.REMOVE_FUNCTIONAL);
}
@Override
public void testPrimaryOwnerChangingDuringReplaceExact() throws Exception {
doTest(TestFunctionalWriteOperation.REPLACE_EXACT_FUNCTIONAL);
}
@Override
public void testPrimaryOwnerChangingDuringRemoveExact() throws Exception {
doTest(TestFunctionalWriteOperation.REMOVE_EXACT_FUNCTIONAL);
}
}
| 636 |
3,212 | <filename>nifi-nar-bundles/nifi-aws-bundle/nifi-aws-processors/src/main/java/org/apache/nifi/processors/aws/kinesis/stream/record/KinesisRecordProcessorRecord.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.aws.kinesis.stream.record;
import com.amazonaws.services.kinesis.model.Record;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.flowfile.attributes.CoreAttributes;
import org.apache.nifi.logging.ComponentLog;
import org.apache.nifi.processor.ProcessSession;
import org.apache.nifi.processor.ProcessSessionFactory;
import org.apache.nifi.processors.aws.kinesis.stream.ConsumeKinesisStream;
import org.apache.nifi.schema.access.SchemaNotFoundException;
import org.apache.nifi.serialization.MalformedRecordException;
import org.apache.nifi.serialization.RecordReader;
import org.apache.nifi.serialization.RecordReaderFactory;
import org.apache.nifi.serialization.RecordSetWriter;
import org.apache.nifi.serialization.RecordSetWriterFactory;
import org.apache.nifi.serialization.WriteResult;
import org.apache.nifi.serialization.record.PushBackRecordSet;
import org.apache.nifi.serialization.record.RecordSchema;
import org.apache.nifi.util.StopWatch;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.time.format.DateTimeFormatter;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
public class KinesisRecordProcessorRecord extends AbstractKinesisRecordProcessor {
final RecordReaderFactory readerFactory;
final RecordSetWriterFactory writerFactory;
final Map<String, String> schemaRetrievalVariables;
private RecordSetWriter writer;
private OutputStream outputStream;
public KinesisRecordProcessorRecord(final ProcessSessionFactory sessionFactory, final ComponentLog log, final String streamName,
final String endpointPrefix, final String kinesisEndpoint,
final long checkpointIntervalMillis, final long retryWaitMillis,
final int numRetries, final DateTimeFormatter dateTimeFormatter,
final RecordReaderFactory readerFactory, final RecordSetWriterFactory writerFactory) {
super(sessionFactory, log, streamName, endpointPrefix, kinesisEndpoint, checkpointIntervalMillis, retryWaitMillis,
numRetries, dateTimeFormatter);
this.readerFactory = readerFactory;
this.writerFactory = writerFactory;
schemaRetrievalVariables = Collections.singletonMap(KINESIS_RECORD_SCHEMA_KEY, streamName);
}
@Override
void startProcessingRecords() {
super.startProcessingRecords();
outputStream = null;
writer = null;
}
@Override
void processRecord(final List<FlowFile> flowFiles, final Record kinesisRecord, final boolean lastRecord,
final ProcessSession session, final StopWatch stopWatch) {
boolean firstOutputRecord = true;
int recordCount = 0;
final byte[] data = kinesisRecord.getData() != null ? kinesisRecord.getData().array() : new byte[0];
FlowFile flowFile = null;
try (final InputStream in = new ByteArrayInputStream(data);
final RecordReader reader = readerFactory.createRecordReader(schemaRetrievalVariables, in, data.length, getLogger())
) {
org.apache.nifi.serialization.record.Record outputRecord;
final PushBackRecordSet recordSet = new PushBackRecordSet(reader.createRecordSet());
while ((outputRecord = recordSet.next()) != null) {
if (flowFiles.isEmpty()) {
flowFile = session.create();
flowFiles.add(flowFile);
// initialize the writer when the first record is read.
createWriter(flowFile, session, outputRecord);
}
final WriteResult writeResult = writer.write(outputRecord);
recordCount += writeResult.getRecordCount();
// complete the FlowFile if there are no more incoming Kinesis Records and no more records in this RecordSet
if (lastRecord && !recordSet.isAnotherRecord()) {
completeFlowFile(flowFiles, session, recordCount, writeResult, kinesisRecord, stopWatch);
}
firstOutputRecord = false;
}
} catch (final MalformedRecordException | IOException | SchemaNotFoundException e) {
// write raw Kinesis Record to the parse failure relationship
getLogger().error("Failed to parse message from Kinesis Stream using configured Record Reader and Writer due to {}",
e.getLocalizedMessage(), e);
outputRawRecordOnException(firstOutputRecord, flowFile, flowFiles, session, data, kinesisRecord, e);
}
if (getLogger().isDebugEnabled()) {
getLogger().debug("Sequence No: {}, Partition Key: {}, Data: {}",
kinesisRecord.getSequenceNumber(), kinesisRecord.getPartitionKey(), BASE_64_ENCODER.encodeToString(data));
}
}
private void createWriter(final FlowFile flowFile, final ProcessSession session,
final org.apache.nifi.serialization.record.Record outputRecord)
throws IOException, SchemaNotFoundException {
final RecordSchema readerSchema = outputRecord.getSchema();
final RecordSchema writeSchema = writerFactory.getSchema(schemaRetrievalVariables, readerSchema);
outputStream = session.write(flowFile);
writer = writerFactory.createWriter(getLogger(), writeSchema, outputStream, flowFile);
writer.beginRecordSet();
}
private void completeFlowFile(final List<FlowFile> flowFiles, final ProcessSession session, final int recordCount,
final WriteResult writeResult, final Record lastRecord, final StopWatch stopWatch)
throws IOException {
try {
writer.finishRecordSet();
} catch (IOException e) {
getLogger().error("Failed to finish record output due to {}", e.getLocalizedMessage(), e);
session.remove(flowFiles.get(0));
flowFiles.remove(0);
throw e;
} finally {
try {
writer.close();
outputStream.close();
} catch (final IOException e) {
getLogger().warn("Failed to close Record Writer due to {}", e.getLocalizedMessage(), e);
}
}
reportProvenance(session, flowFiles.get(0), null, null, stopWatch);
final Map<String, String> attributes = getDefaultAttributes(lastRecord);
attributes.put("record.count", String.valueOf(recordCount));
attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType());
attributes.putAll(writeResult.getAttributes());
flowFiles.set(0, session.putAllAttributes(flowFiles.get(0), attributes));
writer = null;
outputStream = null;
}
private void outputRawRecordOnException(final boolean firstOutputRecord, final FlowFile flowFile,
final List<FlowFile> flowFiles, final ProcessSession session,
final byte[] data, final Record kinesisRecord, final Exception e) {
if (firstOutputRecord && flowFile != null) {
session.remove(flowFile);
flowFiles.remove(0);
if (writer != null) {
try {
writer.close();
outputStream.close();
} catch (IOException ioe) {
getLogger().warn("Failed to close Record Writer due to {}", ioe.getLocalizedMessage(), ioe);
}
}
}
FlowFile failed = session.create();
session.write(failed, o -> o.write(data));
final Map<String, String> attributes = getDefaultAttributes(kinesisRecord);
final Throwable c = e.getCause() != null ? e.getCause() : e;
attributes.put("record.error.message", (c.getLocalizedMessage() != null) ? c.getLocalizedMessage() : c.getClass().getCanonicalName() + " Thrown");
failed = session.putAllAttributes(failed, attributes);
transferTo(ConsumeKinesisStream.REL_PARSE_FAILURE, session, 0, 0, Collections.singletonList(failed));
}
private Map<String, String> getDefaultAttributes(final Record kinesisRecord) {
final String partitionKey = kinesisRecord.getPartitionKey();
final String sequenceNumber = kinesisRecord.getSequenceNumber();
final Date approximateArrivalTimestamp = kinesisRecord.getApproximateArrivalTimestamp();
return getDefaultAttributes(sequenceNumber, partitionKey, approximateArrivalTimestamp);
}
} | 3,697 |
593 | /**
* TLS-Attacker - A Modular Penetration Testing Framework for TLS
*
* Copyright 2014-2022 Ruhr University Bochum, Paderborn University, Hackmanit GmbH
*
* Licensed under Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0.txt
*/
package de.rub.nds.tlsattacker.core.protocol.handler.extension;
import de.rub.nds.tlsattacker.core.exceptions.AdjustmentException;
import de.rub.nds.tlsattacker.core.config.Config;
import de.rub.nds.tlsattacker.core.constants.RunningModeType;
import de.rub.nds.tlsattacker.core.protocol.message.extension.RecordSizeLimitExtensionMessage;
import de.rub.nds.tlsattacker.core.protocol.parser.extension.RecordSizeLimitExtensionParser;
import de.rub.nds.tlsattacker.core.protocol.preparator.extension.RecordSizeLimitExtensionPreparator;
import de.rub.nds.tlsattacker.core.protocol.serializer.extension.RecordSizeLimitExtensionSerializer;
import de.rub.nds.tlsattacker.core.state.TlsContext;
import de.rub.nds.tlsattacker.transport.ConnectionEndType;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
public class RecordSizeLimitExtensionHandlerTest {
private RecordSizeLimitExtensionHandler handler;
private TlsContext context;
@Before
public void setUp() {
Config config = Config.createConfig();
config.setDefaultRunningMode(RunningModeType.SERVER);
context = new TlsContext(config);
handler = new RecordSizeLimitExtensionHandler(context);
}
/**
* Test of adjustTLSContext method, of class RecordSizeLimitExtensionHandler.
*/
@Test
public void testAdjustTLSContextConnectionPeer() {
context.setTalkingConnectionEndType(ConnectionEndType.CLIENT);
RecordSizeLimitExtensionMessage msg = new RecordSizeLimitExtensionMessage();
msg.setRecordSizeLimit(new byte[] { (byte) 0x05, (byte) 0x39 });
assertNull(context.getOutboundRecordSizeLimit());
handler.adjustTLSContext(msg);
assertTrue(context.getOutboundRecordSizeLimit() == 1337);
}
@Test(expected = AdjustmentException.class)
public void testAdjustTLSContextInvalidSize() {
RecordSizeLimitExtensionMessage msg = new RecordSizeLimitExtensionMessage();
msg.setRecordSizeLimit(new byte[] { (byte) 0x05, (byte) 0x39, (byte) 0x00 });
assertNull(context.getOutboundRecordSizeLimit());
handler.adjustTLSContext(msg);
}
public void testAdjustTLSContextSizeTooSmall() {
RecordSizeLimitExtensionMessage msg = new RecordSizeLimitExtensionMessage();
msg.setRecordSizeLimit(new byte[] { (byte) 0x00, (byte) 0x2A });
assertNull(context.getOutboundRecordSizeLimit());
handler.adjustTLSContext(msg);
assertNull(context.getOutboundRecordSizeLimit());
}
/**
* Test of getParser method, of class RecordSizeLimitExtensionHandler.
*/
@Test
public void testGetParser() {
assertTrue(handler.getParser(new byte[] { 0, 1, 2, 3 }, 0,
context.getConfig()) instanceof RecordSizeLimitExtensionParser);
}
/**
* Test of getPreparator method, of class RecordSizeLimitExtensionHandler.
*/
@Test
public void testGetPreparator() {
assertTrue(
handler.getPreparator(new RecordSizeLimitExtensionMessage()) instanceof RecordSizeLimitExtensionPreparator);
}
/**
* Test of getSerializer method, of class RecordSizeLimitExtensionHandler.
*/
@Test
public void testGetSerializer() {
assertTrue(
handler.getSerializer(new RecordSizeLimitExtensionMessage()) instanceof RecordSizeLimitExtensionSerializer);
}
}
| 1,306 |
323 | <filename>examples/solv/mirror.h
char *findmetalinkurl(FILE *fp, unsigned char *chksump, Id *chksumtypep);
char *findmirrorlisturl(FILE *fp);
| 54 |
515 | <gh_stars>100-1000
{
"name": “easyMesh”,
"keywords": “esp8266 mesh network networking”,
"description": “A simple mesh networking library for esp8266. easyMesh is very simple to implement and establishes a simple and highly adaptable mesh network for any distributed application. Network synchronization, network mapping, time synchronization, package delivery, and dynamic network adaptation are all handled behind the scenes in the library. Just run the init() function and set up the callbacks, then you are off to the races with a dynamic, adaptable, masterless mesh network.“,
"repository":
{
"type": "git",
"url": "https://github.com/Coopdis/easyMesh"
},
"version": “1.0.0”,
"frameworks": “*”,
"platforms": "esp8266"
}
| 227 |
317 | // Released under the MIT License. See LICENSE for details.
#include "ballistica/networking/telnet_server.h"
#include "ballistica/app/app_globals.h"
#include "ballistica/core/context.h"
#include "ballistica/game/game.h"
#include "ballistica/networking/networking.h"
#include "ballistica/networking/networking_sys.h"
#include "ballistica/platform/platform.h"
#include "ballistica/python/python_command.h"
#include "ballistica/python/python_sys.h"
namespace ballistica {
TelnetServer::TelnetServer(int port) : port_(port) {
thread_ = new std::thread(RunThreadStatic, this);
assert(g_app_globals->telnet_server == nullptr);
g_app_globals->telnet_server = this;
// NOTE: we consider access implicitly granted on headless builds
// since we can't pop up the request dialog.
// There is still password protection and we now don't even spin
// up the telnet socket by default on servers.
if (HeadlessMode()) {
user_has_granted_access_ = true;
}
}
void TelnetServer::Pause() {
assert(InMainThread());
assert(!paused_);
{
std::unique_lock<std::mutex> lock(paused_mutex_);
paused_ = true;
}
// FIXME - need a way to kill these sockets;
// On iOS they die automagically but not android.
// attempted to force-close at some point but it didn't work (on android at
// least)
}
void TelnetServer::Resume() {
assert(InMainThread());
assert(paused_);
{
std::unique_lock<std::mutex> lock(paused_mutex_);
paused_ = false;
}
// Poke our thread so it can go on its way.
paused_cv_.notify_all();
}
#pragma clang diagnostic push
#pragma ide diagnostic ignored "ConstantFunctionResult"
auto TelnetServer::RunThread() -> int {
// Do this whole thing in a loop.
// If we get put to sleep we just start over.
while (true) {
// Sleep until we're unpaused.
if (paused_) {
std::unique_lock<std::mutex> lock(paused_mutex_);
paused_cv_.wait(lock, [this] { return (!paused_); });
}
sd_ = socket(AF_INET, SOCK_STREAM, 0);
if (sd_ < 0) {
Log("Error: Unable to open host socket; errno " + std::to_string(errno));
return 1;
}
// Make it reusable.
int on = 1;
int status =
setsockopt(sd_, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on));
if (-1 == status) {
Log("Error setting SO_REUSEADDR on telnet server");
}
// Bind to local server port.
struct sockaddr_in serv_addr {};
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = htonl(INADDR_ANY); // NOLINT
int result;
serv_addr.sin_port = htons(port_); // NOLINT
result = ::bind(sd_, (struct sockaddr*)&serv_addr, sizeof(serv_addr));
if (result != 0) {
return 1;
}
char buffer[10000];
const char* prompt = "ballisticacore> ";
const char* password_prompt = "password:";
// Now just listen and forward msg along to people.
while (true) {
struct sockaddr_storage from {};
socklen_t from_size = sizeof(from);
if (listen(sd_, 0) == 0) {
client_sd_ = accept(sd_, (struct sockaddr*)&from, &from_size);
if (client_sd_ < 0) {
break;
}
// If we dont have access and havnt asked the user for it yet, ask them.
if (!user_has_granted_access_ && g_game
&& !have_asked_user_for_access_) {
g_game->PushAskUserForTelnetAccessCall();
have_asked_user_for_access_ = true;
}
// Require password for each connection if we have one
reading_password_ = require_password_;
if (g_game) {
if (reading_password_) {
PushPrint(password_prompt);
} else {
PushPrint(prompt);
}
}
while (true) {
result =
static_cast<int>(recv(client_sd_, buffer, sizeof(buffer) - 1, 0));
// Socket closed/disconnected.
if (result == 0 || result == -1) {
// We got closed for whatever reason.
if (client_sd_ != -1) {
g_platform->CloseSocket(client_sd_);
}
client_sd_ = -1;
break;
} else {
buffer[result] = 0;
// Looks like these come in with '\r\n' at the end.. lets strip
// that.
if (result > 0 && (buffer[result - 1] == '\n')) {
buffer[result - 1] = 0;
if (result > 1 && (buffer[result - 2] == '\r'))
buffer[result - 2] = 0;
}
if (g_game) {
if (user_has_granted_access_) {
if (reading_password_) {
if (GetRealTime() - last_try_time_ < 2000) {
PushPrint(
std::string("retried too soon; please wait a moment "
"and try again.\n")
+ password_prompt);
} else if (buffer == password_) {
reading_password_ = false;
PushPrint(prompt);
} else {
last_try_time_ = GetRealTime();
PushPrint(std::string("incorrect.\n") + password_prompt);
}
} else {
PushTelnetScriptCommand(buffer);
}
} else {
PushPrint(g_game->GetResourceString("telnetAccessDeniedText"));
}
}
}
}
} else {
// Listening failed; abort.
if (sd_ != -1) {
g_platform->CloseSocket(sd_);
}
break;
}
}
// Sleep for a moment to keep us from running wild if we're unable to block.
Platform::SleepMS(1000);
}
}
#pragma clang diagnostic pop
void TelnetServer::PushTelnetScriptCommand(const std::string& command) {
assert(g_game);
if (g_game == nullptr) {
return;
}
g_game->PushCall([this, command] {
// These are always run in whichever context is 'visible'.
ScopedSetContext cp(g_game->GetForegroundContext());
if (!g_app_globals->user_ran_commands) {
g_app_globals->user_ran_commands = true;
}
PythonCommand cmd(command, "<telnet>");
if (cmd.CanEval()) {
PyObject* obj = cmd.RunReturnObj(true, nullptr);
if (obj && obj != Py_None) {
PyObject* s = PyObject_Repr(obj);
if (s) {
const char* c = PyUnicode_AsUTF8(s);
PushPrint(std::string(c) + "\n");
Py_DECREF(s);
}
Py_DECREF(obj);
}
} else {
// Not eval-able; just run it.
cmd.Run();
}
PushPrint("ballisticacore> ");
});
}
void TelnetServer::PushPrint(const std::string& s) {
assert(g_game);
g_game->PushCall([this, s] { Print(s); });
}
void TelnetServer::Print(const std::string& s) {
// Currently we make the assumption that *only* the game thread writes to our
// socket.
assert(InGameThread());
if (client_sd_ != -1) {
send(client_sd_, s.c_str(),
static_cast_check_fit<socket_send_length_t>(s.size()), 0);
}
}
TelnetServer::~TelnetServer() = default;
void TelnetServer::SetAccessEnabled(bool v) { user_has_granted_access_ = v; }
void TelnetServer::SetPassword(const char* password) {
if (password != nullptr) {
password_ = password;
require_password_ = true;
} else {
require_password_ = false;
}
}
} // namespace ballistica
| 3,325 |
1,801 | <gh_stars>1000+
#include "c4-pedestrian-detector.h"
/*****************************************/
// Pedestrian_ICRA.cpp
/*****************************************/
// ---------------------------------------------------------------------
// Helper functions
// compute the Sobel image "ct" from "original"
void ComputeCT(IntImage<double>& original,IntImage<int>& ct)
{
ct.Create(original.nrow,original.ncol);
for(int i=2; i<original.nrow-2; i++)
{
double* p1 = original.p[i-1];
double* p2 = original.p[i];
double* p3 = original.p[i+1];
int* ctp = ct.p[i];
for(int j=2; j<original.ncol-2; j++)
{
int index = 0;
if(p2[j]<=p1[j-1]) index += 0x80;
if(p2[j]<=p1[j]) index += 0x40;
if(p2[j]<=p1[j+1]) index += 0x20;
if(p2[j]<=p2[j-1]) index += 0x10;
if(p2[j]<=p2[j+1]) index += 0x08;
if(p2[j]<=p3[j-1]) index += 0x04;
if(p2[j]<=p3[j]) index += 0x02;
if(p2[j]<=p3[j+1]) index ++;
ctp[j] = index;
}
}
}
// Load SVM models -- linear SVM trained using LIBLINEAR
double UseSVM_CD_FastEvaluationStructure(const char* modelfile, const int m, Array2dC<double>& result)
{
std::ifstream in(modelfile);
if(in.good()==false)
{
std::cout<<"SVM model "<<modelfile<<" can not be loaded."<<std::endl;
exit(-1);
}
std::string buffer;
std::getline(in,buffer); // first line
std::getline(in,buffer); // second line
std::getline(in,buffer); // third line
in>>buffer;
assert(buffer=="nr_feature");
int num_dim = m;
in>>num_dim;
assert(num_dim>0 && num_dim==m);
std::getline(in,buffer); // end of line 4
in>>buffer;
assert(buffer=="bias");
int bias;
in>>bias;
std::getline(in,buffer); //end of line 5;
in>>buffer;
assert(buffer=="w");
std::getline(in,buffer); //end of line 6
result.Create(1,num_dim);
for(int i=0; i<num_dim; i++) in>>result.buf[i];
double rho = 0;
if(bias>=0) in>>rho;
in.close();
return rho;
}
// Load SVM models -- Histogram Intersectin Kernel SVM trained by libHIK
double UseSVM_CD_FastEvaluationStructure(const char* modelfile, const int m, const int upper_bound, Array2dC<double>& result)
{
std::ifstream fs(modelfile, std::fstream::binary);
if( !fs.is_open() )
{
std::cout << "SVM model " << modelfile << " can not be loaded." << std::endl;
exit(-1);
}
// Header
int rows, cols, type, channels;
fs.read((char*)&rows, sizeof(int)); // rows
fs.read((char*)&cols, sizeof(int)); // cols
fs.read((char*)&type, sizeof(int)); // type
fs.read((char*)&channels, sizeof(int)); // channels
// Data
cv::Mat mat(rows, cols, type);
fs.read((char*)mat.data, CV_ELEM_SIZE(type) * static_cast<size_t>(rows) * static_cast<size_t>(cols));
int num_dim = m;
result.Create(num_dim, upper_bound);
for(int i=0; i<num_dim; i++)
for (int j = 0; j < upper_bound; j++)
{
result.p[i][j]= mat.at<double>(i, j);
}
return -0.00455891;
}
// End of Helper functions
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Functions that load the two classifiers
void LoadCascade(std::string cascade1, std::string cascade2, DetectionScanner& ds)
{
std::vector<NodeDetector::NodeType> types;
std::vector<int> upper_bounds;
std::vector<std::string> filenames;
types.push_back(NodeDetector::CD_LIN); // first node
upper_bounds.push_back(100);
filenames.push_back(cascade1);
types.push_back(NodeDetector::CD_HIK); // second node
upper_bounds.push_back(353);
filenames.push_back(cascade2);
ds.LoadDetector(types,upper_bounds,filenames);
// You can adjust these parameters for different speed, accuracy etc
ds.cascade->nodes[0]->thresh += 0.8;
ds.cascade->nodes[1]->thresh -= 0.095;
}
void DetectionScanner::LoadDetector(std::vector<NodeDetector::NodeType>& types,std::vector<int>& upper_bounds,std::vector<std::string>& filenames)
{
size_t depth = types.size();
assert(depth>0 && depth==upper_bounds.size() && depth==filenames.size());
if(cascade)
delete cascade;
cascade = new CascadeDetector;
assert(xdiv>0 && ydiv>0);
for(size_t i=0; i<depth; i++)
cascade->AddNode(types[i],(xdiv-EXT)*(ydiv-EXT)*baseflength,upper_bounds[i],filenames[i].c_str());
hist.Create(1,baseflength*(xdiv-EXT)*(ydiv-EXT));
}
void NodeDetector::Load(const NodeType _type,const int _featurelength,const int _upper_bound,const int _index,const char* _filename)
{
type = _type;
index = _index;
filename = _filename;
featurelength = _featurelength;
upper_bound = _upper_bound;
if(type==CD_LIN)
thresh = UseSVM_CD_FastEvaluationStructure(_filename,_featurelength,classifier);
else if(type==CD_HIK)
thresh = UseSVM_CD_FastEvaluationStructure(_filename,_featurelength,upper_bound,classifier);
if(type==CD_LIN) type = LINEAR;
if(type==CD_HIK) type = HISTOGRAM;
}
void CascadeDetector::AddNode(const NodeDetector::NodeType _type,const int _featurelength,const int _upper_bound,const char* _filename)
{
if(length==size)
{
int newsize = size * 2;
NodeDetector** p = new NodeDetector*[newsize];
assert(p!=NULL);
std::copy(nodes,nodes+size,p);
size = newsize;
delete[] nodes;
nodes = p;
}
nodes[length] = new NodeDetector(_type,_featurelength,_upper_bound,length,_filename);
length++;
}
// End of functions that load the two classifiers
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Detection functions
// initialization -- compute the Census Tranform image for CENTRIST
void DetectionScanner::InitImage(IntImage<double>& original)
{
image = original;
image.Sobel(sobel,false,false);
ComputeCT(sobel,ct);
}
// combine the (xdiv-1)*(ydiv-1) integral images into a single one
void DetectionScanner::InitIntegralImages(const int stepsize)
{
if(cascade->nodes[0]->type!=NodeDetector::LINEAR)
return; // No need to prepare integral images
const int hd = height/xdiv*2-2;
const int wd = width/ydiv*2-2;
scores.Create(ct.nrow,ct.ncol);
scores.Zero(cascade->nodes[0]->thresh/hd/wd);
double* linearweights = cascade->nodes[0]->classifier.buf;
for(int i=0; i<xdiv-EXT; i++)
{
const int xoffset = height/xdiv*i;
for(int j=0; j<ydiv-EXT; j++)
{
const int yoffset = width/ydiv*j;
for(int x=2; x<ct.nrow-2-xoffset; x++)
{
int* ctp = ct.p[x+xoffset]+yoffset;
double* tempp = scores.p[x];
for(int y=2; y<ct.ncol-2-yoffset; y++)
tempp[y] += linearweights[ctp[y]];
}
linearweights += baseflength;
}
}
scores.CalcIntegralImageInPlace();
for(int i=2; i<ct.nrow-2-height; i+=stepsize)
{
double* p1 = scores.p[i];
double* p2 = scores.p[i+hd];
for(int j=2; j<ct.ncol-2-width; j+=stepsize)
p1[j] += (p2[j+wd] - p2[j] - p1[j+wd]);
}
}
// Resize the input image and then re-compute Sobel image etc
void DetectionScanner::ResizeImage()
{
image.Resize(sobel,ratio);
image.Swap(sobel);
image.Sobel(sobel,false,false);
ComputeCT(sobel,ct);
}
// The function that does the real detection
int DetectionScanner::FastScan(IntImage<double>& original,std::vector<cv::Rect>& results,const int stepsize)
{
if(original.nrow<height+5 || original.ncol<width+5) return 0;
const int hd = height/xdiv;
const int wd = width/ydiv;
InitImage(original);
results.clear();
hist.Create(1,baseflength*(xdiv-EXT)*(ydiv-EXT));
NodeDetector* node = cascade->nodes[1];
double** pc = node->classifier.p;
int oheight = original.nrow, owidth = original.ncol;
cv::Rect rect;
while(image.nrow>=height && image.ncol>=width)
{
InitIntegralImages(stepsize);
for(int i=2; i+height<image.nrow-2; i+=stepsize)
{
const double* sp = scores.p[i];
for(int j=2; j+width<image.ncol-2; j+=stepsize)
{
if(sp[j]<=0) continue;
int* p = hist.buf;
hist.Zero();
for(int k=0; k<xdiv-EXT; k++)
{
for(int t=0; t<ydiv-EXT; t++)
{
for(int x=i+k*hd+1; x<i+(k+1+EXT)*hd-1; x++)
{
int* ctp = ct.p[x];
for(int y=j+t*wd+1; y<j+(t+1+EXT)*wd-1; y++)
p[ctp[y]]++;
}
p += baseflength;
}
}
double score = node->thresh;
for(int k=0; k<node->classifier.nrow; k++) score += pc[k][hist.buf[k]];
if(score>0)
{
rect.y = i * oheight / image.nrow;
rect.height = (oheight * height) / image.nrow + 1;
rect.x = j * owidth / image.ncol;
rect.width = (width * owidth) /image.ncol + 1;
results.push_back(rect);
}
}
}
ResizeImage();
}
return 0;
}
// End of Detection functions
// ---------------------------------------------------------------------
| 4,531 |
1,126 | /*
* first --> First number
* second --> Second number
* There are two implementations:
* Recursive(euclideanGCDRecursive) and Non-Recursive(euclideanGCD)
*/
public class EuclideanGCD {
static int euclideanGCD(int first, int second) {
while(second != 0) { // Iterate till second becomes zero
int temp = second; // Temporary variable to hold value of second
second = first % second;
first = temp;
}
return first; // When second becomes 0, first becomes gcd of both
}
static int euclideanGCDRecursive(int first, int second) {
return (second == 0) ? first : euclideanGCDRecursive(second, (first % second));
// First becomes GCD when second becomes zero
}
public static void main(String[] args) {
int first = 25;
int second = 5;
int answerIterative = EuclideanGCD.euclideanGCD(first, second);
int answerRecursive = EuclideanGCD.euclideanGCDRecursive(first, second);
System.out.printf("GCD of %d and %d is : %d by recursive algo.\n", first,
second, answerRecursive);
System.out.printf("GCD of %d and %d is : %d by iterative algo.\n", first,
second, answerIterative);
}
}
| 639 |
373 | package com.dianrong.common.uniauth.server.service;
import com.dianrong.common.uniauth.common.bean.dto.AttributeExtendDto;
import com.dianrong.common.uniauth.common.bean.dto.PageDto;
import com.dianrong.common.uniauth.server.data.entity.AttributeExtend;
import com.dianrong.common.uniauth.server.data.entity.AttributeExtendExample;
import com.dianrong.common.uniauth.server.data.mapper.AttributeExtendMapper;
import com.dianrong.common.uniauth.server.datafilter.DataFilter;
import com.dianrong.common.uniauth.server.service.cache.AttributeExtendCache;
import com.dianrong.common.uniauth.server.service.common.TenancyBasedService;
import com.dianrong.common.uniauth.server.service.inner.AttributeExtendInnerService;
import com.dianrong.common.uniauth.server.util.BeanConverter;
import com.dianrong.common.uniauth.server.util.CheckEmpty;
import com.dianrong.common.uniauth.server.util.ParamCheck;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Resource;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
public class AttributeExtendService extends TenancyBasedService {
@Autowired
private AttributeExtendMapper attributeExtendMapper;
@Autowired
private AttributeExtendCache attributeExtendCache;
@Autowired
private AttributeExtendInnerService attributeExtendInnerService;
@Resource(name = "attributeExtendDataFilter")
private DataFilter dataFilter;
/**
* 新增扩展数据.
*/
public AttributeExtendDto add(String code, String category, String subcategory,
String description) {
return attributeExtendInnerService.add(code, category, subcategory, description);
}
/**
* 根据id获取数据.
*/
public AttributeExtendDto getById(Long id) {
return attributeExtendCache.getById(id);
}
/**
* 根据id修改扩展数据.
*/
public int updateByKey(Long id, String code, String category, String subcategory,
String description) {
CheckEmpty.checkEmpty(id, "id");
return attributeExtendCache.updateByKey(id, code, category, subcategory, description);
}
/**
* 根据code模糊分页查询数据.
*/
public PageDto<AttributeExtendDto> search(String code, Integer pageNumber, Integer pageSize) {
CheckEmpty.checkEmpty(pageNumber, "pageNumber");
CheckEmpty.checkEmpty(pageSize, "pageSize");
AttributeExtendExample example = new AttributeExtendExample();
example.setPageOffSet(pageNumber * pageSize);
example.setPageSize(pageSize);
example.setOrderByClause("id desc");
AttributeExtendExample.Criteria criteria = example.createCriteria();
if (StringUtils.isNotBlank(code)) {
criteria.andCodeLike('%' + code + '%');
}
criteria.andTenancyIdEqualTo(tenancyService.getTenancyIdWithCheck());
// 查询
int count = attributeExtendMapper.countByExample(example);
ParamCheck.checkPageParams(pageNumber, pageSize, count);
List<AttributeExtend> attributeExtends = attributeExtendMapper.selectByExample(example);
// 转换
List<AttributeExtendDto> attributeExtendDtos = new ArrayList<AttributeExtendDto>();
for (AttributeExtend attributeExtend : attributeExtends) {
attributeExtendDtos.add(BeanConverter.convert(attributeExtend, AttributeExtendDto.class));
}
// 生成分页对象
PageDto<AttributeExtendDto> pageDto =
new PageDto<AttributeExtendDto>(pageNumber, pageSize, count, attributeExtendDtos);
return pageDto;
}
/**
* 根据ProfileId获取关联的扩展属性.
*/
public List<AttributeExtend> getAttributesByProfileId(Long profileId) {
return attributeExtendInnerService.getAttributesByProfileId(profileId);
}
}
| 1,358 |
556 | <reponame>mmaaz60/DCL<filename>models/Asoftmax_linear.py
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn import Parameter
import math
def myphi(x,m):
x = x * m
return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
x**8/math.factorial(8) - x**9/math.factorial(9)
class AngleLinear(nn.Module):
def __init__(self, in_features, out_features, m = 4, phiflag=True):
super(AngleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features,out_features))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.phiflag = phiflag
self.m = m
self.mlambda = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x
]
def forward(self, input):
x = input # size=(B,F) F is feature len
w = self.weight # size=(F,Classnum) F=in_features Classnum=out_features
ww = w.renorm(2,1,1e-5).mul(1e5)
xlen = x.pow(2).sum(1).pow(0.5) # size=B
wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
cos_theta = x.mm(ww) # size=(B,Classnum)
cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
cos_theta = cos_theta.clamp(-1,1)
if self.phiflag:
cos_m_theta = self.mlambda[self.m](cos_theta)
theta = Variable(cos_theta.data.acos())
k = (self.m*theta/3.14159265).floor()
n_one = k*0.0 - 1
phi_theta = (n_one**k) * cos_m_theta - 2*k
else:
theta = cos_theta.acos()
phi_theta = myphi(theta,self.m)
phi_theta = phi_theta.clamp(-1*self.m,1)
cos_theta = cos_theta * xlen.view(-1,1)
phi_theta = phi_theta * xlen.view(-1,1)
output = (cos_theta,phi_theta)
return output # size=(B,Classnum,2)
| 1,140 |
2,663 | {
"type": "Profit Loss",
"definition": {
"text": "Profit loss is the difference between the balance at the end and the balance at the beginning of a certain period, given by the context."
},
"paragraphs": [
{
"style": "Block",
"text": "Content"
},
{
"style": "Text",
"text": "In the context of the base asset or the quoted asset, the calculation is done by subtracting the balances in the corresponding assets, using the variable appropriate to the larger context (i.e.: episode, position, etc.)."
},
{
"style": "Text",
"text": "In general terms:"
},
{
"style": "Javascript",
"text": "base asset profit loss = base asset end balance - base asset begin balance"
},
{
"style": "Javascript",
"text": "quoted asset profit loss = quoted asset end balance - quoted asset begin balance"
},
{
"style": "Text",
"text": "In the case of the Episode Base Asset and Episode Quoted Asset:",
"updated": 1609880629828
},
{
"style": "Javascript",
"text": "tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value =\n tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.balance.value -\n sessionParameters.sessionBaseAsset.config.initialBalance\n \ntradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value =\n tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.balance.value -\n sessionParameters.sessionQuotedAsset.config.initialBalance",
"updated": 1609880593226
},
{
"style": "Text",
"text": " "
},
{
"style": "Text",
"text": "In the case of the Position Base Asset and Position Quoted Asset:",
"updated": 1609880656759
},
{
"style": "Javascript",
"text": "tradingEngine.tradingCurrent.position.positionBaseAsset.profitLoss.value =\n tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.balance.value -\n tradingEngine.tradingCurrent.position.positionBaseAsset.beginBalance\n \ntradingEngine.tradingCurrent.position.positionQuotedAsset.profitLoss.value =\n tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.balance.value -\n tradingEngine.tradingCurrent.position.positionQuotedAsset.beginBalance",
"updated": 1609880668611
},
{
"style": "Text",
"text": " "
},
{
"style": "Text",
"text": "In the context of the episode statistics or the position statistics, the calculation is done consolidating the profits of both assets."
},
{
"style": "Note",
"text": "When the context does not refer to either of the assets in particular, then both assets are taken into account in the calculation."
},
{
"style": "Text",
"text": "In the context of the episode statistics:"
},
{
"style": "Javascript",
"text": "tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.profitLoss.value =\n tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value * \n tradingEngine.tradingCurrent.tradingEpisode.candle.close.value +\n tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value",
"updated": 1609880717964
},
{
"style": "Text",
"text": "In the context of the position statistics:"
},
{
"style": "Javascript",
"text": "tradingEngine.tradingCurrent.position.positionStatistics.profitLoss.value =\n tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value * \n tradingEngine.tradingCurrent.position.endRate.value +\n tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value",
"updated": 1609880790493
},
{
"style": "Block",
"text": "Configuring",
"updated": 1609880799536
},
{
"style": "Title",
"text": "Profit Loss Configuration",
"updated": 1609880814504
},
{
"style": "Subtitle",
"text": "Properties",
"updated": 1609880843742
},
{
"style": "List",
"text": "initialValue allows resetting the initial state of the node to an arbitrary value.",
"updated": 1609880851919
}
]
} | 2,063 |
538 | // Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// Full license terms provided in LICENSE.md file.
#ifndef CAFFE_ROS_CAFFE_ROS_H
#define CAFFE_ROS_CAFFE_ROS_H
#include <ros/ros.h>
#include <sensor_msgs/Image.h>
#include "caffe_ros/tensor_net.h"
namespace caffe_ros
{
// Implements caffe_ros node.
class CaffeRos
{
public:
CaffeRos();
~CaffeRos() = default;
void spin();
private:
// Specifies whether to apply any post processing to the output of the DNN.
enum class PostProc
{
None = 0,
YOLO // Compute object boxes from the output of YOLO DNN.
};
// Default camera queue size. Recommended value is one as to make
// sure we process most recent image from the camera.
const int DEFAULT_CAMERA_QUEUE_SIZE = 1;
// DNN output (publisher) queue. Value of 1 makes sure only most recent
// output gets published.
const int DEFAULT_DNN_QUEUE_SIZE = 1;
// Current image being worked on.
sensor_msgs::Image::ConstPtr cur_img_;
// Publisher for the DNN output.
ros::Publisher output_pub_;
// Subscriber to camera capture topic (gscam).
ros::Subscriber image_sub_;
// DNN predictor.
TensorNet net_;
bool debug_mode_;
std::string debug_dir_;
PostProc post_proc_;
// Probability and IOU thresholds used in object detection net (YOLO).
float obj_det_threshold_;
float iou_threshold_;
// Max rate to run the node at (in Hz).
float max_rate_hz_;
private:
sensor_msgs::Image::ConstPtr computeOutputs();
void imageCallback(const sensor_msgs::Image::ConstPtr& msg);
void setPostProcessing(const std::string& postProc)
{
if (postProc.size() == 0)
post_proc_ = PostProc::None;
else if (postProc == "YOLO")
post_proc_ = PostProc::YOLO;
else
{
ROS_FATAL("Post processing %s is not supported. Supported: YOLO", postProc.c_str());
ros::shutdown();
}
}
};
}
#endif | 851 |
473 | <reponame>zheli-1/crete-dev<gh_stars>100-1000
#include "io.h"
int main(void)
{
long long rd, rs, rt, result, dsp, dspresult;
rt = 0x80003698CE8F9201;
rs = 0x800034634BCDE321;
result = 0x7fff16587a530313;
dspresult = 0x01;
__asm
("mulq_rs.qh %0, %2, %3\n\t"
"rddsp %1\n\t"
: "=r"(rd), "=r"(dsp)
: "r"(rt), "r"(rs)
);
if (rd != result) {
printf("mulq_rs.qh error\n");
return -1;
}
dsp = (dsp >> 21) & 0x01;
if (dsp != dspresult) {
printf("mulq_rs.qh DSPControl Reg ouflag error\n");
return -1;
}
return 0;
}
| 370 |
5,852 | <filename>proxygen/lib/http/codec/HQStreamCodec.cpp
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <proxygen/lib/http/codec/HQStreamCodec.h>
#include <folly/Format.h>
#include <folly/ScopeGuard.h>
#include <folly/SingletonThreadLocal.h>
#include <folly/io/Cursor.h>
#include <proxygen/lib/http/HTTP3ErrorCode.h>
#include <proxygen/lib/http/codec/HQUtils.h>
#include <proxygen/lib/http/codec/compress/QPACKCodec.h>
namespace {
using namespace proxygen;
void logIfFieldSectionExceedsPeerMax(const HTTPHeaderSize& encodedSize,
uint32_t maxHeaderListSize,
const HTTPHeaders& fields) {
if (encodedSize.uncompressed > maxHeaderListSize) {
// The remote side told us they don't want headers this large, but try
// anyways
std::string serializedFields;
fields.forEach(
[&serializedFields](const std::string& name, const std::string& value) {
serializedFields =
folly::to<std::string>(serializedFields, "\\n", name, ":", value);
});
LOG(ERROR) << "generating HEADERS frame larger than peer maximum nHeaders="
<< fields.size() << " all headers=" << serializedFields;
}
}
} // namespace
namespace proxygen { namespace hq {
using namespace folly;
using namespace folly::io;
HQStreamCodec::HQStreamCodec(StreamID streamId,
TransportDirection direction,
QPACKCodec& headerCodec,
folly::IOBufQueue& encoderWriteBuf,
folly::IOBufQueue& decoderWriteBuf,
folly::Function<uint64_t()> qpackEncoderMaxData,
HTTPSettings& ingressSettings)
: HQFramedCodec(streamId, direction),
headerCodec_(headerCodec),
qpackEncoderWriteBuf_(encoderWriteBuf),
qpackDecoderWriteBuf_(decoderWriteBuf),
qpackEncoderMaxDataFn_(std::move(qpackEncoderMaxData)),
ingressSettings_(ingressSettings) {
VLOG(4) << "creating " << getTransportDirectionString(direction)
<< " HQ stream codec for stream " << streamId_;
}
HQStreamCodec::~HQStreamCodec() {
}
ParseResult HQStreamCodec::checkFrameAllowed(FrameType type) {
if (isConnect_ && type != hq::FrameType::DATA) {
return HTTP3::ErrorCode::HTTP_FRAME_UNEXPECTED;
}
switch (type) {
case hq::FrameType::SETTINGS:
case hq::FrameType::GOAWAY:
case hq::FrameType::MAX_PUSH_ID:
case hq::FrameType::CANCEL_PUSH:
case hq::FrameType::PRIORITY_UPDATE:
case hq::FrameType::PUSH_PRIORITY_UPDATE:
return HTTP3::ErrorCode::HTTP_FRAME_UNEXPECTED;
case hq::FrameType::PUSH_PROMISE:
if (transportDirection_ == TransportDirection::DOWNSTREAM) {
return HTTP3::ErrorCode::HTTP_FRAME_UNEXPECTED;
}
default:
break;
}
return folly::none;
}
ParseResult HQStreamCodec::parseData(Cursor& cursor,
const FrameHeader& header) {
// NOTE: If an error path is added to this method, it needs to setParserPaused
// It's possible the data is in the wrong place per HTTP semantics, but it
// will be caught by HTTPTransaction
std::unique_ptr<IOBuf> outData;
VLOG(10) << "parsing all frame DATA bytes for stream=" << streamId_
<< " length=" << header.length;
auto res = hq::parseData(cursor, header, outData);
CHECK(!res);
// no need to do deliverCallbackIfAllowed
// the HQSession can trap this and stop reading.
// i.e we can immediately reset in onNewStream if we get a stream id
// higher than MAXID advertised in the goaway
if (callback_ && (outData && !outData->empty())) {
callback_->onBody(streamId_, std::move(outData), 0);
}
return res;
}
ParseResult HQStreamCodec::parseHeaders(Cursor& cursor,
const FrameHeader& header) {
setParserPaused(true);
if (finalIngressHeadersSeen_) {
if (parsingTrailers_) {
VLOG(4) << "Unexpected HEADERS frame for stream=" << streamId_;
if (callback_) {
HTTPException ex(HTTPException::Direction::INGRESS_AND_EGRESS,
"Invalid HEADERS frame");
ex.setHttp3ErrorCode(HTTP3::ErrorCode::HTTP_FRAME_UNEXPECTED);
callback_->onError(streamId_, ex, false);
}
return folly::none;
} else {
parsingTrailers_ = true;
}
}
std::unique_ptr<IOBuf> outHeaderData;
auto res = hq::parseHeaders(cursor, header, outHeaderData);
if (res) {
VLOG(4) << "Invalid HEADERS frame for stream=" << streamId_;
return res;
}
VLOG(4) << "Parsing HEADERS frame for stream=" << streamId_
<< " length=" << outHeaderData->computeChainDataLength();
if (callback_ && !parsingTrailers_) {
// H2 performs the decompression/semantic validation first. Also, this
// should really only be called once per this whole codec, not per header
// block -- think info status. This behavior mirrors HTTP2Codec at present.
callback_->onMessageBegin(streamId_, nullptr);
}
decodeInfo_.init(transportDirection_ == TransportDirection::DOWNSTREAM,
parsingTrailers_,
/*validate=*/true,
strictValidation_,
/*allowEmptyPath=*/false);
headerCodec_.decodeStreaming(
streamId_, std::move(outHeaderData), header.length, this);
// decodeInfo_.msg gets moved in onHeadersComplete. If it is still around,
// parsing is incomplete, leave the parser paused.
if (!decodeInfo_.msg) {
setParserPaused(false);
}
return res;
}
ParseResult HQStreamCodec::parsePushPromise(Cursor& cursor,
const FrameHeader& header) {
setParserPaused(true);
PushId outPushId;
std::unique_ptr<IOBuf> outHeaderData;
auto res = hq::parsePushPromise(cursor, header, outPushId, outHeaderData);
if (res) {
return res;
}
// Notify the callback on beginning of a push promise.
// The callback will be further notified when the header block
// is fully parsed, via a call to `onHeadersComplete`.
// It is up to the callback to match the push promise
// with the headers block, via using same stream id
if (callback_) {
callback_->onPushMessageBegin(outPushId, streamId_, nullptr);
}
decodeInfo_.init(true /* isReq */,
false /* isRequestTrailers */,
/*validate=*/true,
strictValidation_,
/*allowEmptyPath=*/false);
auto headerDataLength = outHeaderData->computeChainDataLength();
headerCodec_.decodeStreaming(
streamId_, std::move(outHeaderData), headerDataLength, this);
if (!decodeInfo_.msg) {
setParserPaused(false);
} // else parsing incomplete, see comment in parseHeaders
return res;
}
void HQStreamCodec::onHeader(const HPACKHeaderName& name,
const folly::fbstring& value) {
if (decodeInfo_.onHeader(name, value)) {
if (userAgent_.empty() && name.getHeaderCode() == HTTP_HEADER_USER_AGENT) {
userAgent_ = value.toStdString();
}
} else {
VLOG(4) << "dir=" << uint32_t(transportDirection_)
<< decodeInfo_.parsingError << " codec=" << headerCodec_;
}
}
void HQStreamCodec::onHeadersComplete(HTTPHeaderSize decodedSize,
bool acknowledge) {
CHECK(parserPaused_);
decodeInfo_.onHeadersComplete(decodedSize);
auto resumeParser = folly::makeGuard([this] { setParserPaused(false); });
auto g2 = folly::makeGuard(activationHook_());
// Check parsing error
DCHECK_EQ(decodeInfo_.decodeError, HPACK::DecodeError::NONE);
// Leave msg in decodeInfo_ for now, to keep the parser paused
if (!decodeInfo_.parsingError.empty()) {
LOG(ERROR) << "Failed parsing header list for stream=" << streamId_
<< ", error=" << decodeInfo_.parsingError;
if (!decodeInfo_.headerErrorValue.empty()) {
std::cerr << " value=" << decodeInfo_.headerErrorValue << std::endl;
}
HTTPException err(
HTTPException::Direction::INGRESS,
folly::format(
"HQStreamCodec stream error: stream={} status={} error:{}",
streamId_,
400,
decodeInfo_.parsingError)
.str());
if (parsingTrailers_) {
err.setHttp3ErrorCode(HTTP3::ErrorCode::HTTP_MESSAGE_ERROR);
} else {
err.setHttpStatusCode(400);
}
callback_->onError(streamId_, err, true);
resumeParser.dismiss();
return;
}
std::unique_ptr<HTTPMessage> msg = std::move(decodeInfo_.msg);
msg->setAdvancedProtocolString(getCodecProtocolString(CodecProtocol::HQ));
if (curHeader_.type == hq::FrameType::HEADERS) {
if (!finalIngressHeadersSeen_ &&
(msg->isRequest() || !msg->is1xxResponse())) {
finalIngressHeadersSeen_ = true;
}
}
if (transportDirection_ == TransportDirection::DOWNSTREAM &&
msg->getMethod() == HTTPMethod::CONNECT) {
isConnect_ = true;
}
if (acknowledge) {
qpackDecoderWriteBuf_.append(headerCodec_.encodeHeaderAck(streamId_));
}
// Report back what we've parsed
if (callback_) {
if (parsingTrailers_) {
auto trailerHeaders =
std::make_unique<HTTPHeaders>(msg->extractHeaders());
callback_->onTrailersComplete(streamId_, std::move(trailerHeaders));
} else {
// TODO: should we treat msg as chunked like H2?
callback_->onHeadersComplete(streamId_, std::move(msg));
}
}
}
void HQStreamCodec::onDecodeError(HPACK::DecodeError decodeError) {
// the parser may be paused, but this codec is dead.
CHECK(parserPaused_);
decodeInfo_.decodeError = decodeError;
DCHECK_NE(decodeInfo_.decodeError, HPACK::DecodeError::NONE);
LOG(ERROR) << "Failed decoding header block for stream=" << streamId_
<< " decodeError=" << uint32_t(decodeError);
if (decodeInfo_.msg) {
// print the partial message
decodeInfo_.msg->dumpMessage(3);
}
if (callback_) {
auto g = folly::makeGuard(activationHook_());
HTTPException ex(
HTTPException::Direction::INGRESS,
folly::to<std::string>("Stream headers decompression error=",
uint32_t(decodeError)));
ex.setHttp3ErrorCode(HTTP3::ErrorCode::HTTP_QPACK_DECOMPRESSION_FAILED);
// HEADERS_TOO_LARGE is a stream error, everything else is a session error
callback_->onError(decodeError == HPACK::DecodeError::HEADERS_TOO_LARGE
? streamId_
: kSessionStreamId,
ex,
false);
}
// leave the partial msg in decodeInfo, it keeps the parser paused
}
void HQStreamCodec::generateHeader(
folly::IOBufQueue& writeBuf,
StreamID stream,
const HTTPMessage& msg,
bool /*eom*/,
HTTPHeaderSize* size,
const folly::Optional<HTTPHeaders>& extraHeaders) {
DCHECK_EQ(stream, streamId_);
generateHeaderImpl(writeBuf, msg, folly::none, size, extraHeaders);
// For requests, set final header seen flag right away.
// For responses, header is final only if response code is >= 200.
if (msg.isRequest() || (msg.isResponse() && msg.getStatusCode() >= 200)) {
finalEgressHeadersSeen_ = true;
}
}
void HQStreamCodec::generatePushPromise(folly::IOBufQueue& writeBuf,
StreamID stream,
const HTTPMessage& msg,
StreamID pushId,
bool /*eom*/,
HTTPHeaderSize* size) {
DCHECK_EQ(stream, streamId_);
DCHECK(transportDirection_ == TransportDirection::DOWNSTREAM);
generateHeaderImpl(
writeBuf, msg, pushId, size, folly::none /* extraHeaders */);
}
void HQStreamCodec::generateHeaderImpl(
folly::IOBufQueue& writeBuf,
const HTTPMessage& msg,
folly::Optional<StreamID> pushId,
HTTPHeaderSize* size,
const folly::Optional<HTTPHeaders>& extraHeaders) {
auto result = headerCodec_.encodeHTTP(qpackEncoderWriteBuf_,
msg,
true,
streamId_,
maxEncoderStreamData(),
extraHeaders);
if (size) {
*size = headerCodec_.getEncodedSize();
}
logIfFieldSectionExceedsPeerMax(
headerCodec_.getEncodedSize(),
ingressSettings_.getSetting(SettingsId::MAX_HEADER_LIST_SIZE,
std::numeric_limits<uint32_t>::max()),
msg.getHeaders());
// HTTP/2 serializes priority here, but HQ priorities need to go on the
// control stream
WriteResult res;
if (pushId) {
res = hq::writePushPromise(writeBuf, *pushId, std::move(result));
} else {
res = hq::writeHeaders(writeBuf, std::move(result));
}
if (res.hasError()) {
LOG(ERROR) << __func__ << ": failed to write "
<< ((pushId) ? "push promise: " : "headers: ") << res.error();
}
}
size_t HQStreamCodec::generateBodyImpl(folly::IOBufQueue& writeBuf,
std::unique_ptr<folly::IOBuf> chain) {
auto result = hq::writeData(writeBuf, std::move(chain));
if (result) {
return *result;
}
LOG(FATAL) << "frame exceeded 2^62-1 limit";
return 0;
}
size_t HQStreamCodec::generateBody(folly::IOBufQueue& writeBuf,
StreamID stream,
std::unique_ptr<folly::IOBuf> chain,
folly::Optional<uint8_t> /*padding*/,
bool /*eom*/) {
DCHECK_EQ(stream, streamId_);
size_t bytesWritten = generateBodyImpl(writeBuf, std::move(chain));
return bytesWritten;
}
size_t HQStreamCodec::generateTrailers(folly::IOBufQueue& writeBuf,
StreamID stream,
const HTTPHeaders& trailers) {
DCHECK_EQ(stream, streamId_);
std::vector<compress::Header> allTrailers;
CodecUtil::appendHeaders(trailers, allTrailers, HTTP_HEADER_NONE);
auto encodeRes =
headerCodec_.encode(allTrailers, streamId_, maxEncoderStreamData());
qpackEncoderWriteBuf_.append(std::move(encodeRes.control));
logIfFieldSectionExceedsPeerMax(
headerCodec_.getEncodedSize(),
ingressSettings_.getSetting(SettingsId::MAX_HEADER_LIST_SIZE,
std::numeric_limits<uint32_t>::max()),
trailers);
WriteResult res;
res = hq::writeHeaders(writeBuf, std::move(encodeRes.stream));
if (res.hasError()) {
LOG(ERROR) << __func__ << ": failed to write trailers: " << res.error();
return 0;
}
return *res;
}
size_t HQStreamCodec::generateEOM(folly::IOBufQueue& /*writeBuf*/,
StreamID stream) {
// Generate EOM is a no-op
DCHECK_EQ(stream, streamId_);
return 0;
}
CompressionInfo HQStreamCodec::getCompressionInfo() const {
return headerCodec_.getCompressionInfo();
}
}} // namespace proxygen::hq
| 6,552 |
1,621 | <reponame>sigmaister/snakeware_os
import pygame
import pygame_gui
def load(manager, params):
"""
Launch a Colour Picker dialog to change the desktop background color.
"""
# default position
pos = (100, 100)
if params is not None and len(params) > 0:
pos = params[0]
pygame_gui.windows.UIColourPickerDialog(
rect=pygame.Rect(pos, (600, 400)),
manager=manager,
window_title="Set Background Color",
object_id="#desktop_colour_picker",
)
| 203 |
822 | """
Follows the FIPS PUB 180-4 description for calculating SHA-256 hash function
https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
Noone in their right mind should use this for any serious reason. This was written
purely for educational purposes.
"""
import math
from itertools import count, islice
# -----------------------------------------------------------------------------
# SHA-256 Functions, defined in Section 4
def rotr(x, n, size=32):
return (x >> n) | (x << size - n) & (2**size - 1)
def shr(x, n):
return x >> n
def sig0(x):
return rotr(x, 7) ^ rotr(x, 18) ^ shr(x, 3)
def sig1(x):
return rotr(x, 17) ^ rotr(x, 19) ^ shr(x, 10)
def capsig0(x):
return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22)
def capsig1(x):
return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25)
def ch(x, y, z):
return (x & y)^ (~x & z)
def maj(x, y, z):
return (x & y) ^ (x & z) ^ (y & z)
def b2i(b):
return int.from_bytes(b, 'big')
def i2b(i):
return i.to_bytes(4, 'big')
# -----------------------------------------------------------------------------
# SHA-256 Constants
def is_prime(n):
return not any(f for f in range(2,int(math.sqrt(n))+1) if n%f == 0)
def first_n_primes(n):
return islice(filter(is_prime, count(start=2)), n)
def frac_bin(f, n=32):
""" return the first n bits of fractional part of float f """
f -= math.floor(f) # get only the fractional part
f *= 2**n # shift left
f = int(f) # truncate the rest of the fractional content
return f
def genK():
"""
Follows Section 4.2.2 to generate K
The first 32 bits of the fractional parts of the cube roots of the first
64 prime numbers:
428a2f98 71374491 b5c0fbcf e9b5dba5 3956c25b 59f111f1 923f82a4 ab1c5ed5
d807aa98 12835b01 243185be 550c7dc3 72be5d74 80deb1fe 9bdc06a7 c19bf174
e49b69c1 efbe4786 0fc19dc6 240ca1cc 2de92c6f 4a7484aa 5cb0a9dc 76f988da
983e5152 a831c66d b00327c8 bf597fc7 c6e00bf3 d5a79147 06ca6351 14292967
27b70a85 2e1b2138 4d2c6dfc 53380d13 650a7354 766a0abb 81c2c92e 92722c85
a2bfe8a1 a81a664b c24b8b70 c76c51a3 d192e819 d6990624 f40e3585 106aa070
19a4c116 1e376c08 2748774c 34b0bcb5 391c0cb3 4ed8aa4a 5b9cca4f 682e6ff3
748f82ee 78a5636f 84c87814 8cc70208 90befffa a4506ceb bef9a3f7 c67178f2
"""
return [frac_bin(p ** (1/3.0)) for p in first_n_primes(64)]
def genH():
"""
Follows Section 5.3.3 to generate the initial hash value H^0
The first 32 bits of the fractional parts of the square roots of
the first 8 prime numbers.
6a09e667 bb67ae85 3c6ef372 a54ff53a 9b05688c 510e527f 1f83d9ab 5be0cd19
"""
return [frac_bin(p ** (1/2.0)) for p in first_n_primes(8)]
# -----------------------------------------------------------------------------
def pad(b):
""" Follows Section 5.1: Padding the message """
b = bytearray(b) # convert to a mutable equivalent
l = len(b) * 8 # note: len returns number of bytes not bits
# append but "1" to the end of the message
b.append(0b10000000) # appending 10000000 in binary (=128 in decimal)
# follow by k zero bits, where k is the smallest non-negative solution to
# l + 1 + k = 448 mod 512
# i.e. pad with zeros until we reach 448 (mod 512)
while (len(b)*8) % 512 != 448:
b.append(0x00)
# the last 64-bit block is the length l of the original message
# expressed in binary (big endian)
b.extend(l.to_bytes(8, 'big'))
return b
def sha256(b: bytes) -> bytes:
# Section 4.2
K = genK()
# Section 5: Preprocessing
# Section 5.1: Pad the message
b = pad(b)
# Section 5.2: Separate the message into blocks of 512 bits (64 bytes)
blocks = [b[i:i+64] for i in range(0, len(b), 64)]
# for each message block M^1 ... M^N
H = genH() # Section 5.3
# Section 6
for M in blocks: # each block is a 64-entry array of 8-bit bytes
# 1. Prepare the message schedule, a 64-entry array of 32-bit words
W = []
for t in range(64):
if t <= 15:
# the first 16 words are just a copy of the block
W.append(bytes(M[t*4:t*4+4]))
else:
term1 = sig1(b2i(W[t-2]))
term2 = b2i(W[t-7])
term3 = sig0(b2i(W[t-15]))
term4 = b2i(W[t-16])
total = (term1 + term2 + term3 + term4) % 2**32
W.append(i2b(total))
# 2. Initialize the 8 working variables a,b,c,d,e,f,g,h with prev hash value
a, b, c, d, e, f, g, h = H
# 3.
for t in range(64):
T1 = (h + capsig1(e) + ch(e, f, g) + K[t] + b2i(W[t])) % 2**32
T2 = (capsig0(a) + maj(a, b, c)) % 2**32
h = g
g = f
f = e
e = (d + T1) % 2**32
d = c
c = b
b = a
a = (T1 + T2) % 2**32
# 4. Compute the i-th intermediate hash value H^i
delta = [a, b, c, d, e, f, g, h]
H = [(i1 + i2) % 2**32 for i1, i2 in zip(H, delta)]
return b''.join(i2b(i) for i in H)
if __name__ == '__main__':
import sys
assert len(sys.argv) == 2, "Pass in exactly one filename to return checksum of"
with open(sys.argv[1], 'rb') as f:
print(sha256(f.read()).hex())
| 2,466 |
653 | <reponame>mkinsner/llvm
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
#include <riscv_vector.h>
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
vint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
vint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
vint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
vint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
vint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
vint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
vint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
vint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
vint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
vint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
vint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
vint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
vint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
vint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
vint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
vint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
vint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
vint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
vint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
vint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
vint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
vint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
vuint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
vuint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
vuint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
vuint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
vuint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
vuint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
vuint8m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
vint8mf8_t vector, vint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
vint8mf4_t vector, vint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
vint8mf2_t vector, vint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
vint8m1_t vector, vint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
vint8m2_t vector, vint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
vint8m4_t vector, vint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
vint8m8_t vector, vint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
vint16mf4_t vector, vint16m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
vint16mf2_t vector, vint16m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
vint16m1_t vector, vint16m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
vint16m2_t vector, vint16m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
vint16m4_t vector, vint16m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
vint16m8_t vector, vint16m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
vint32mf2_t vector, vint32m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
vint32m1_t vector, vint32m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
vint32m2_t vector, vint32m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
vint32m4_t vector, vint32m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
vint32m8_t vector, vint32m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
vint64m1_t vector, vint64m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
vint64m2_t vector, vint64m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
vint64m4_t vector, vint64m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
vint64m8_t vector, vint64m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
vuint8mf8_t vector, vuint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
vuint8mf4_t vector, vuint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
vuint8mf2_t vector, vuint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
vuint8m1_t vector, vuint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
vuint8m2_t vector, vuint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
vuint8m4_t vector, vuint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
vuint8m8_t vector, vuint8m1_t scalar,
size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
vuint16mf4_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
vuint16mf2_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
vuint16m1_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
vuint16m2_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
vuint16m4_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
vuint16m8_t vector,
vuint16m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
vuint32mf2_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
vuint32m1_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
vuint32m2_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
vuint32m4_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
vuint32m8_t vector,
vuint32m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
vuint64m1_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
vuint64m2_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
vuint64m4_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
vuint64m8_t vector,
vuint64m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
| 31,949 |
1,338 | <reponame>Kirishikesan/haiku<gh_stars>1000+
/*
* Copyright (c) 2005-2010, Haiku, Inc.
* Distributed under the terms of the MIT license.
*
* Author:
* DarkWyrm <<EMAIL>>
*/
#include "ResourceData.h"
#include "ResFields.h"
#include <stdlib.h>
ResourceData::ResourceData(void)
: fType(0),
fTypeString("Invalid"),
fID(-1),
fIDString("Invalid"),
fName(""),
fData(NULL),
fLength(0),
fAttr(false)
{
}
ResourceData::ResourceData(const type_code &code, const int32 &id,
const char *name, char *data,
const size_t &length)
: fType(code),
fID(id),
fName(name),
fData(data),
fLength(length),
fAttr(false)
{
fIDString = "";
fIDString << fID;
fTypeString = MakeTypeString(code);
}
ResourceData::ResourceData(const ResourceData &data)
{
*this = data;
}
ResourceData::~ResourceData(void)
{
free(fData);
}
ResourceData &
ResourceData::operator=(const ResourceData &data)
{
fType = data.fType;
fTypeString = data.fTypeString;
fID = data.fID;
fIDString = data.fIDString;
fName = data.fName;
fAttr = data.fAttr;
SetData(data.fData, data.fLength);
return *this;
}
bool
ResourceData::SetFromResource(const int32 &index, BResources &res)
{
char *name;
if (!res.GetResourceInfo(index, (type_code*)&fType, &fID,
(const char **)&name, &fLength)) {
*this = ResourceData();
return false;
}
fName = name;
fTypeString = MakeTypeString(fType);
fIDString = "";
fIDString << fID;
fAttr = false;
char *data = (char *)res.LoadResource(fType, fID, &fLength);
SetData(data, fLength);
return true;
}
bool
ResourceData::SetFromAttribute(const char *name, BNode &node)
{
attr_info info;
if (node.GetAttrInfo(name, &info) != B_OK) {
*this = ResourceData();
return false;
}
fType = info.type;
fID = -1;
fIDString = "(attr)";
fName = name;
fLength = info.size;
fAttr = true;
fTypeString = MakeTypeString(fType);
fData = (char *)malloc(fLength);
if (fData) {
ssize_t size = node.ReadAttr(name, info.type, 0, (void*)fData, fLength);
if (size >= 0) {
fLength = (size_t) size;
return true;
}
}
*this = ResourceData();
return false;
}
void
ResourceData::SetTo(const type_code &code, const int32 &id,
const char *name, char *data, const size_t &length)
{
fType = code;
fTypeString = MakeTypeString(code);
fID = id;
fIDString = "";
fIDString << fID;
fName = name;
SetData(data, length);
}
void
ResourceData::SetType(const type_code &code)
{
fType = code;
fTypeString = MakeTypeString(code);
}
void
ResourceData::SetID(const int32 &id)
{
fID = id;
fIDString = "";
fIDString << fID;
}
void
ResourceData::SetData(const char *data, const size_t &size)
{
free(fData);
fLength = size;
if (size > 0) {
fData = (char *)malloc(size);
memcpy(fData, data, size);
}
else
fData = NULL;
}
| 1,194 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000
{"nom":"Saint-Ferréol-des-Côtes","circ":"5ème circonscription","dpt":"Puy-de-Dôme","inscrits":460,"abs":183,"votants":277,"blancs":7,"nuls":5,"exp":265,"res":[{"nuance":"COM","nom":"M. <NAME>","voix":174},{"nuance":"REM","nom":"M. <NAME>","voix":91}]} | 134 |
767 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import json
import random
import time
import hashlib
from . import configs
class QCloudVoiceClient(object):
def __init__(self, http_client):
self.http_client = http_client
def get_random(self):
return random.randint(10000, 99999)
def get_cur_time(self):
return int(time.time())
def generate_sig(self, qcloud_app_key, mobile, random_int, now):
fmt = "appkey={}&random={}&time={}&mobile={}"
return hashlib.sha256(fmt.format(qcloud_app_key, random_int, now, mobile)).hexdigest()
def post(self, path, data):
return self.http_client.post(configs.host, path, data=json.dumps(data))
| 457 |
3,402 | <filename>core-metadata/src/main/java/org/apache/kylin/metadata/expression/BinaryTupleExpression.java<gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kylin.metadata.expression;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.util.List;
import org.apache.kylin.common.util.DecimalUtil;
import org.apache.kylin.exception.QueryOnCubeException;
import org.apache.kylin.metadata.filter.IFilterCodeSystem;
import org.apache.kylin.metadata.tuple.IEvaluatableTuple;
import org.apache.kylin.shaded.com.google.common.collect.Lists;
public class BinaryTupleExpression extends TupleExpression {
public BinaryTupleExpression(ExpressionOperatorEnum op) {
this(op, Lists.<TupleExpression> newArrayListWithExpectedSize(2));
}
public BinaryTupleExpression(ExpressionOperatorEnum op, List<TupleExpression> exprs) {
super(op, exprs);
boolean opGood = (op == ExpressionOperatorEnum.PLUS || op == ExpressionOperatorEnum.MINUS
|| op == ExpressionOperatorEnum.MULTIPLE || op == ExpressionOperatorEnum.DIVIDE);
if (opGood == false)
throw new IllegalArgumentException("Unsupported operator " + op);
}
@Override
public boolean ifForDynamicColumn() {
return ifAbleToPushDown();
}
@Override
public void verify() {
switch (operator) {
case MULTIPLE:
verifyMultiply();
break;
case DIVIDE:
verifyDivide();
break;
default:
}
}
private void verifyMultiply() {
if (ExpressionColCollector.collectMeasureColumns(getLeft()).size() > 0 //
&& ExpressionColCollector.collectMeasureColumns(getRight()).size() > 0) {
throw new QueryOnCubeException(
"That both of the two sides of the BinaryTupleExpression own columns is not supported for "
+ operator.toString());
}
}
private void verifyDivide() {
if (ExpressionColCollector.collectMeasureColumns(getRight()).size() > 0) {
throw new QueryOnCubeException(
"That the right side of the BinaryTupleExpression owns columns is not supported for "
+ operator.toString());
}
}
@Override
public BigDecimal calculate(IEvaluatableTuple tuple, IFilterCodeSystem<?> cs) {
assert children.size() == 2;
BigDecimal left = DecimalUtil.toBigDecimal(getLeft().calculate(tuple, cs));
if (left == null)
return null;
BigDecimal right = DecimalUtil.toBigDecimal(getRight().calculate(tuple, cs));
if (right == null)
return null;
switch (operator) {
case PLUS:
return left.add(right);
case MINUS:
return left.subtract(right);
case MULTIPLE:
return left.multiply(right);
case DIVIDE:
return left.divide(right);
default:
throw new UnsupportedOperationException();
}
}
@Override
public TupleExpression accept(ExpressionVisitor visitor) {
return visitor.visitBinary(this);
}
@Override
public void serialize(IFilterCodeSystem<?> cs, ByteBuffer buffer) {
}
@Override
public void deserialize(IFilterCodeSystem<?> cs, ByteBuffer buffer) {
}
public TupleExpression getLeft() {
return children.get(0);
}
public TupleExpression getRight() {
return children.get(1);
}
public String toString() {
return operator.toString() + "(" + getLeft().toString() + "," + getRight().toString() + ")";
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
BinaryTupleExpression that = (BinaryTupleExpression) o;
if (operator != that.operator)
return false;
return children.equals(that.children);
}
@Override
public int hashCode() {
int result = operator != null ? operator.hashCode() : 0;
result = 31 * result + (children != null ? children.hashCode() : 0);
return result;
}
}
| 1,974 |
303 | <filename>dbaas/api/environment.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework import viewsets, serializers
from physical.models import Environment
class EnvironmentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Environment
fields = ('url', 'id', 'name', 'stage', 'provisioner')
class EnvironmentAPI(viewsets.ReadOnlyModelViewSet):
"""
Environment API
"""
model = Environment
serializer_class = EnvironmentSerializer
queryset = Environment.objects.all()
filter_fields = (
'id',
'name',
'stage',
'provisioner'
)
def get_queryset(self):
params = self.request.GET.dict()
filter_params = {}
for k, v in params.iteritems():
if k == 'get_provisioner_by_label':
if hasattr(self.model, v.upper()):
label_id = getattr(self.model, v.upper())
filter_params['provisioner'] = label_id
else:
return self.model.objects.none()
elif k.split('__')[0] in self.filter_fields:
filter_params[k] = v
return self.model.objects.filter(**filter_params)
| 552 |
763 | package org.batfish.representation.juniper;
import static org.junit.Assert.assertEquals;
import com.google.common.collect.ImmutableList;
import java.util.List;
import org.batfish.datamodel.HeaderSpace;
import org.batfish.datamodel.acl.MatchHeaderSpace;
import org.batfish.datamodel.acl.OrMatchExpr;
import org.junit.Test;
/** Test for {@link ApplicationSet} */
public class ApplicationSetTest {
@Test
public void testToAclLineMatchExpr() {
/*
masterAppSet
- app1
- appSet
- app2
*/
JuniperConfiguration jc = new JuniperConfiguration();
jc.setFilename("host");
jc.getMasterLogicalSystem().getApplications().put("app2", new BaseApplication("app2"));
ApplicationSet appSet = new ApplicationSet("appSet");
appSet.setMembers(ImmutableList.of(new ApplicationReference("app2")));
jc.getMasterLogicalSystem().getApplicationSets().put("appSet", appSet);
jc.getMasterLogicalSystem().getApplications().put("app1", new BaseApplication("app1"));
ApplicationSet masterAppSet = new ApplicationSet("masterAppSet");
List<ApplicationSetMemberReference> members =
ImmutableList.of(new ApplicationReference("app1"), new ApplicationSetReference("appSet"));
masterAppSet.setMembers(members);
assertEquals(
masterAppSet.toAclLineMatchExpr(jc, null),
new OrMatchExpr(
ImmutableList.of(
new MatchHeaderSpace(
HeaderSpace.builder().build(),
ApplicationSetMember.getTraceElementForUserApplication(
"host", JuniperStructureType.APPLICATION, "app1")),
new OrMatchExpr(
ImmutableList.of(
new MatchHeaderSpace(
HeaderSpace.builder().build(),
ApplicationSetMember.getTraceElementForUserApplication(
"host", JuniperStructureType.APPLICATION, "app2"))),
ApplicationSetMember.getTraceElementForUserApplication(
"host", JuniperStructureType.APPLICATION_SET, "appSet"))),
ApplicationSetMember.getTraceElementForUserApplication(
"host", JuniperStructureType.APPLICATION_SET, "masterAppSet")));
}
}
| 946 |
6,989 | <filename>library/digest/lower_case/hash_ops.h
#pragma once
#include <util/generic/strbuf.h>
// can be used for caseless hashes like: THashMap<TStringBuf, T, TCIOps, TCIOps>
struct TCIOps {
size_t operator()(const char* s) const noexcept;
size_t operator()(const TStringBuf& s) const noexcept;
bool operator()(const char* f, const char* s) const noexcept;
bool operator()(const TStringBuf& f, const TStringBuf& s) const noexcept;
};
| 169 |
1,338 | <gh_stars>1000+
#ifndef _GFX_CONV_MMX_H
#define _GFX_CONV_MMX_H
// BeOS and libavcodec bitmap formats
#include <GraphicsDefs.h>
#include "libavcodec/avcodec.h"
void gfx_conv_null_mmx(AVFrame *in, AVFrame *out, int width, int height);
// Planar
void gfx_conv_yuv420p_rgba32_sse(AVFrame *in, AVFrame *out, int width, int height);
void gfx_conv_yuv420p_rgba32_sse2(AVFrame *in, AVFrame *out, int width, int height);
void gfx_conv_yuv420p_rgba32_ssse3(AVFrame *in, AVFrame *out, int width, int height);
void gfx_conv_yuv422p_rgba32_sse(AVFrame *in, AVFrame *out, int width, int height);
void gfx_conv_yuv422p_rgba32_sse2(AVFrame *in, AVFrame *out, int width, int height);
void gfx_conv_yuv422p_rgba32_ssse3(AVFrame *in, AVFrame *out, int width, int height);
// Packed
void gfx_conv_yuv422_rgba32_sse(AVFrame *in, AVFrame *out, int width, int height);
void gfx_conv_yuv422_rgba32_sse2(AVFrame *in, AVFrame *out, int width, int height);
void gfx_conv_yuv422_rgba32_ssse3(AVFrame *in, AVFrame *out, int width, int height);
#endif
| 437 |
682 | <filename>abc/src/aig/saig/saigIso.c
/**CFile****************************************************************
FileName [saigIso.c]
SystemName [ABC: Logic synthesis and verification system.]
PackageName [Sequential AIG package.]
Synopsis [Sequential cleanup.]
Author [<NAME>]
Affiliation [UC Berkeley]
Date [Ver. 1.0. Started - June 20, 2005.]
Revision [$Id: saigIso.c,v 1.00 2005/06/20 00:00:00 alanmi Exp $]
***********************************************************************/
#include "aig/ioa/ioa.h"
#include "saig.h"
ABC_NAMESPACE_IMPL_START
////////////////////////////////////////////////////////////////////////
/// DECLARATIONS ///
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
/// FUNCTION DEFINITIONS ///
////////////////////////////////////////////////////////////////////////
/**Function*************************************************************
Synopsis [Find the canonical permutation of the COs.]
Description []
SideEffects []
SeeAlso []
***********************************************************************/
Vec_Int_t * Saig_ManFindIsoPermCos( Aig_Man_t * pAig, Vec_Int_t * vPermCis )
{
extern int Iso_ObjCompareByData( Aig_Obj_t ** pp1, Aig_Obj_t ** pp2 );
Vec_Int_t * vPermCos;
Aig_Obj_t * pObj, * pFanin;
int i, Entry, Diff;
assert( Vec_IntSize(vPermCis) == Aig_ManCiNum(pAig) );
vPermCos = Vec_IntAlloc( Aig_ManCoNum(pAig) );
if ( Saig_ManPoNum(pAig) == 1 )
Vec_IntPush( vPermCos, 0 );
else
{
Vec_Ptr_t * vRoots = Vec_PtrAlloc( Saig_ManPoNum(pAig) );
Saig_ManForEachPo( pAig, pObj, i )
{
pFanin = Aig_ObjFanin0(pObj);
assert( Aig_ObjIsConst1(pFanin) || pFanin->iData > 0 );
pObj->iData = Abc_Var2Lit( pFanin->iData, Aig_ObjFaninC0(pObj) );
Vec_PtrPush( vRoots, pObj );
}
Vec_PtrSort( vRoots, (int (*)(void))Iso_ObjCompareByData );
Vec_PtrForEachEntry( Aig_Obj_t *, vRoots, pObj, i )
Vec_IntPush( vPermCos, Aig_ObjCioId(pObj) );
Vec_PtrFree( vRoots );
}
// add flop outputs
Diff = Saig_ManPoNum(pAig) - Saig_ManPiNum(pAig);
Vec_IntForEachEntryStart( vPermCis, Entry, i, Saig_ManPiNum(pAig) )
Vec_IntPush( vPermCos, Entry + Diff );
return vPermCos;
}
/**Function*************************************************************
Synopsis [Performs canonical duplication of the AIG.]
Description []
SideEffects []
SeeAlso []
***********************************************************************/
void Saig_ManDupIsoCanonical_rec( Aig_Man_t * pNew, Aig_Man_t * pAig, Aig_Obj_t * pObj )
{
if ( Aig_ObjIsTravIdCurrent(pAig, pObj) )
return;
Aig_ObjSetTravIdCurrent(pAig, pObj);
assert( Aig_ObjIsNode(pObj) );
if ( !Aig_ObjIsNode(Aig_ObjFanin0(pObj)) || !Aig_ObjIsNode(Aig_ObjFanin1(pObj)) )
{
Saig_ManDupIsoCanonical_rec( pNew, pAig, Aig_ObjFanin0(pObj) );
Saig_ManDupIsoCanonical_rec( pNew, pAig, Aig_ObjFanin1(pObj) );
}
else
{
assert( Aig_ObjFanin0(pObj)->iData != Aig_ObjFanin1(pObj)->iData );
if ( Aig_ObjFanin0(pObj)->iData < Aig_ObjFanin1(pObj)->iData )
{
Saig_ManDupIsoCanonical_rec( pNew, pAig, Aig_ObjFanin0(pObj) );
Saig_ManDupIsoCanonical_rec( pNew, pAig, Aig_ObjFanin1(pObj) );
}
else
{
Saig_ManDupIsoCanonical_rec( pNew, pAig, Aig_ObjFanin1(pObj) );
Saig_ManDupIsoCanonical_rec( pNew, pAig, Aig_ObjFanin0(pObj) );
}
}
pObj->pData = Aig_And( pNew, Aig_ObjChild0Copy(pObj), Aig_ObjChild1Copy(pObj) );
}
/**Function*************************************************************
Synopsis [Performs canonical duplication of the AIG.]
Description []
SideEffects []
SeeAlso []
***********************************************************************/
Aig_Man_t * Saig_ManDupIsoCanonical( Aig_Man_t * pAig, int fVerbose )
{
Aig_Man_t * pNew;
Aig_Obj_t * pObj;
Vec_Int_t * vPerm, * vPermCo;
int i, Entry;
// derive permutations
vPerm = Saig_ManFindIsoPerm( pAig, fVerbose );
vPermCo = Saig_ManFindIsoPermCos( pAig, vPerm );
// create the new manager
pNew = Aig_ManStart( Aig_ManNodeNum(pAig) );
pNew->pName = Abc_UtilStrsav( pAig->pName );
Aig_ManIncrementTravId( pAig );
// create constant
pObj = Aig_ManConst1(pAig);
pObj->pData = Aig_ManConst1(pNew);
Aig_ObjSetTravIdCurrent( pAig, pObj );
// create PIs
Vec_IntForEachEntry( vPerm, Entry, i )
{
pObj = Aig_ManCi(pAig, Entry);
pObj->pData = Aig_ObjCreateCi(pNew);
Aig_ObjSetTravIdCurrent( pAig, pObj );
}
// traverse from the POs
Vec_IntForEachEntry( vPermCo, Entry, i )
{
pObj = Aig_ManCo(pAig, Entry);
Saig_ManDupIsoCanonical_rec( pNew, pAig, Aig_ObjFanin0(pObj) );
}
// create POs
Vec_IntForEachEntry( vPermCo, Entry, i )
{
pObj = Aig_ManCo(pAig, Entry);
Aig_ObjCreateCo( pNew, Aig_ObjChild0Copy(pObj) );
}
Aig_ManSetRegNum( pNew, Aig_ManRegNum(pAig) );
Vec_IntFreeP( &vPerm );
Vec_IntFreeP( &vPermCo );
return pNew;
}
/**Function*************************************************************
Synopsis [Checks structural equivalence of AIG1 and AIG2.]
Description [Returns 1 if AIG1 and AIG2 are structurally equivalent
under this mapping.]
SideEffects []
SeeAlso []
***********************************************************************/
int Iso_ManCheckMapping( Aig_Man_t * pAig1, Aig_Man_t * pAig2, Vec_Int_t * vMap2to1, int fVerbose )
{
Aig_Obj_t * pObj, * pFanin0, * pFanin1;
int i;
assert( Aig_ManCiNum(pAig1) == Aig_ManCiNum(pAig2) );
assert( Aig_ManCoNum(pAig1) == Aig_ManCoNum(pAig2) );
assert( Aig_ManRegNum(pAig1) == Aig_ManRegNum(pAig2) );
assert( Aig_ManNodeNum(pAig1) == Aig_ManNodeNum(pAig2) );
Aig_ManCleanData( pAig1 );
// map const and PI nodes
Aig_ManConst1(pAig2)->pData = Aig_ManConst1(pAig1);
Aig_ManForEachCi( pAig2, pObj, i )
pObj->pData = Aig_ManCi( pAig1, Vec_IntEntry(vMap2to1, i) );
// try internal nodes
Aig_ManForEachNode( pAig2, pObj, i )
{
pFanin0 = Aig_ObjChild0Copy( pObj );
pFanin1 = Aig_ObjChild1Copy( pObj );
pObj->pData = Aig_TableLookupTwo( pAig1, pFanin0, pFanin1 );
if ( pObj->pData == NULL )
{
if ( fVerbose )
printf( "Structural equivalence failed at node %d.\n", i );
return 0;
}
}
// make sure the first PO points to the same node
if ( Aig_ManCoNum(pAig1)-Aig_ManRegNum(pAig1) == 1 && Aig_ObjChild0Copy(Aig_ManCo(pAig2, 0)) != Aig_ObjChild0(Aig_ManCo(pAig1, 0)) )
{
if ( fVerbose )
printf( "Structural equivalence failed at primary output 0.\n" );
return 0;
}
return 1;
}
//static int s_Counter;
/**Function*************************************************************
Synopsis []
Description []
SideEffects []
SeeAlso []
***********************************************************************/
int Iso_ManNegEdgeNum( Aig_Man_t * pAig )
{
Aig_Obj_t * pObj;
int i, Counter = 0;
if ( pAig->nComplEdges > 0 )
return pAig->nComplEdges;
Aig_ManForEachObj( pAig, pObj, i )
if ( Aig_ObjIsNode(pObj) )
{
Counter += Aig_ObjFaninC0(pObj);
Counter += Aig_ObjFaninC1(pObj);
}
else if ( Aig_ObjIsCo(pObj) )
Counter += Aig_ObjFaninC0(pObj);
return (pAig->nComplEdges = Counter);
}
/**Function*************************************************************
Synopsis [Finds mapping of CIs of AIG2 into those of AIG1.]
Description [Returns the mapping of CIs of the two AIGs, or NULL
if there is no mapping.]
SideEffects []
SeeAlso []
***********************************************************************/
Vec_Int_t * Iso_ManFindMapping( Aig_Man_t * pAig1, Aig_Man_t * pAig2, Vec_Int_t * vPerm1_, Vec_Int_t * vPerm2_, int fVerbose )
{
Vec_Int_t * vPerm1, * vPerm2, * vInvPerm2;
int i, Entry;
if ( Aig_ManCiNum(pAig1) != Aig_ManCiNum(pAig2) )
return NULL;
if ( Aig_ManCoNum(pAig1) != Aig_ManCoNum(pAig2) )
return NULL;
if ( Aig_ManRegNum(pAig1) != Aig_ManRegNum(pAig2) )
return NULL;
if ( Aig_ManNodeNum(pAig1) != Aig_ManNodeNum(pAig2) )
return NULL;
if ( Aig_ManLevelNum(pAig1) != Aig_ManLevelNum(pAig2) )
return NULL;
// if ( Iso_ManNegEdgeNum(pAig1) != Iso_ManNegEdgeNum(pAig2) )
// return NULL;
// s_Counter++;
if ( fVerbose )
printf( "AIG1:\n" );
vPerm1 = vPerm1_ ? vPerm1_ : Saig_ManFindIsoPerm( pAig1, fVerbose );
if ( fVerbose )
printf( "AIG1:\n" );
vPerm2 = vPerm2_ ? vPerm2_ : Saig_ManFindIsoPerm( pAig2, fVerbose );
if ( vPerm1_ )
assert( Vec_IntSize(vPerm1_) == Aig_ManCiNum(pAig1) );
if ( vPerm2_ )
assert( Vec_IntSize(vPerm2_) == Aig_ManCiNum(pAig2) );
// find canonical permutation
// vPerm1/vPerm2 give canonical order of CIs of AIG1/AIG2
vInvPerm2 = Vec_IntInvert( vPerm2, -1 );
Vec_IntForEachEntry( vInvPerm2, Entry, i )
{
assert( Entry >= 0 && Entry < Aig_ManCiNum(pAig1) );
Vec_IntWriteEntry( vInvPerm2, i, Vec_IntEntry(vPerm1, Entry) );
}
if ( vPerm1_ == NULL )
Vec_IntFree( vPerm1 );
if ( vPerm2_ == NULL )
Vec_IntFree( vPerm2 );
// check if they are indeed equivalent
if ( !Iso_ManCheckMapping( pAig1, pAig2, vInvPerm2, fVerbose ) )
Vec_IntFreeP( &vInvPerm2 );
return vInvPerm2;
}
/**Function*************************************************************
Synopsis []
Description []
SideEffects []
SeeAlso []
***********************************************************************/
Aig_Man_t * Iso_ManFilterPos_old( Aig_Man_t * pAig, int fVerbose )
{
int fVeryVerbose = 0;
Vec_Ptr_t * vParts, * vPerms, * vAigs;
Vec_Int_t * vPos, * vMap;
Aig_Man_t * pPart, * pTemp;
int i, k, nPos;
// derive AIG for each PO
nPos = Aig_ManCoNum(pAig) - Aig_ManRegNum(pAig);
vParts = Vec_PtrAlloc( nPos );
vPerms = Vec_PtrAlloc( nPos );
for ( i = 0; i < nPos; i++ )
{
pPart = Saig_ManDupCones( pAig, &i, 1 );
vMap = Saig_ManFindIsoPerm( pPart, fVeryVerbose );
Vec_PtrPush( vParts, pPart );
Vec_PtrPush( vPerms, vMap );
}
// s_Counter = 0;
// check AIGs for each PO
vAigs = Vec_PtrAlloc( 1000 );
vPos = Vec_IntAlloc( 1000 );
Vec_PtrForEachEntry( Aig_Man_t *, vParts, pPart, i )
{
if ( fVeryVerbose )
{
printf( "AIG %4d : ", i );
Aig_ManPrintStats( pPart );
}
Vec_PtrForEachEntry( Aig_Man_t *, vAigs, pTemp, k )
{
if ( fVeryVerbose )
printf( "Comparing AIG %4d and AIG %4d. ", Vec_IntEntry(vPos,k), i );
vMap = Iso_ManFindMapping( pTemp, pPart,
(Vec_Int_t *)Vec_PtrEntry(vPerms, Vec_IntEntry(vPos,k)),
(Vec_Int_t *)Vec_PtrEntry(vPerms, i),
fVeryVerbose );
if ( vMap != NULL )
{
if ( fVeryVerbose )
printf( "Found match\n" );
// if ( fVerbose )
// printf( "Found match for AIG %4d and AIG %4d.\n", Vec_IntEntry(vPos,k), i );
Vec_IntFree( vMap );
break;
}
if ( fVeryVerbose )
printf( "No match.\n" );
}
if ( k == Vec_PtrSize(vAigs) )
{
Vec_PtrPush( vAigs, pPart );
Vec_IntPush( vPos, i );
}
}
// delete AIGs
Vec_PtrForEachEntry( Aig_Man_t *, vParts, pPart, i )
Aig_ManStop( pPart );
Vec_PtrFree( vParts );
Vec_PtrForEachEntry( Vec_Int_t *, vPerms, vMap, i )
Vec_IntFree( vMap );
Vec_PtrFree( vPerms );
// derive the resulting AIG
pPart = Saig_ManDupCones( pAig, Vec_IntArray(vPos), Vec_IntSize(vPos) );
Vec_PtrFree( vAigs );
Vec_IntFree( vPos );
// printf( "The number of all checks %d. Complex checks %d.\n", nPos*(nPos-1)/2, s_Counter );
return pPart;
}
/**Function*************************************************************
Synopsis [Takes multi-output sequential AIG.]
Description [Returns candidate equivalence classes of POs.]
SideEffects []
SeeAlso []
***********************************************************************/
int Iso_StoCompareVecStr( Vec_Str_t ** p1, Vec_Str_t ** p2 )
{
return Vec_StrCompareVec( *p1, *p2 );
}
/**Function*************************************************************
Synopsis []
Description []
SideEffects []
SeeAlso []
***********************************************************************/
Aig_Man_t * Iso_ManFilterPos( Aig_Man_t * pAig, Vec_Ptr_t ** pvPosEquivs, int fVerbose )
{
// int fVeryVerbose = 0;
Aig_Man_t * pPart, * pTemp;
Vec_Ptr_t * vBuffers, * vClasses;
Vec_Int_t * vLevel, * vRemain;
Vec_Str_t * vStr, * vPrev;
int i, nPos;
abctime clk = Abc_Clock();
abctime clkDup = 0, clkAig = 0, clkIso = 0, clk2;
*pvPosEquivs = NULL;
// derive AIG for each PO
nPos = Aig_ManCoNum(pAig) - Aig_ManRegNum(pAig);
vBuffers = Vec_PtrAlloc( nPos );
for ( i = 0; i < nPos; i++ )
{
if ( i % 100 == 0 )
printf( "%6d finished...\r", i );
clk2 = Abc_Clock();
pPart = Saig_ManDupCones( pAig, &i, 1 );
clkDup += Abc_Clock() - clk2;
clk2 = Abc_Clock();
pTemp = Saig_ManDupIsoCanonical( pPart, 0 );
clkIso += Abc_Clock() - clk2;
clk2 = Abc_Clock();
vStr = Ioa_WriteAigerIntoMemoryStr( pTemp );
clkAig += Abc_Clock() - clk2;
Vec_PtrPush( vBuffers, vStr );
Aig_ManStop( pTemp );
Aig_ManStop( pPart );
// remember the output number in nCap (attention: hack!)
vStr->nCap = i;
}
// s_Counter = 0;
if ( fVerbose )
{
Abc_PrintTime( 1, "Duplicate time", clkDup );
Abc_PrintTime( 1, "Isomorph time", clkIso );
Abc_PrintTime( 1, "AIGER time", clkAig );
}
// sort the infos
clk = Abc_Clock();
Vec_PtrSort( vBuffers, (int (*)(void))Iso_StoCompareVecStr );
// create classes
clk = Abc_Clock();
vClasses = Vec_PtrAlloc( Saig_ManPoNum(pAig) );
// start the first class
Vec_PtrPush( vClasses, (vLevel = Vec_IntAlloc(4)) );
vPrev = (Vec_Str_t *)Vec_PtrEntry( vBuffers, 0 );
Vec_IntPush( vLevel, vPrev->nCap );
// consider other classes
Vec_PtrForEachEntryStart( Vec_Str_t *, vBuffers, vStr, i, 1 )
{
if ( Vec_StrCompareVec(vPrev, vStr) )
Vec_PtrPush( vClasses, Vec_IntAlloc(4) );
vLevel = (Vec_Int_t *)Vec_PtrEntryLast( vClasses );
Vec_IntPush( vLevel, vStr->nCap );
vPrev = vStr;
}
Vec_VecFree( (Vec_Vec_t *)vBuffers );
if ( fVerbose )
Abc_PrintTime( 1, "Sorting time", Abc_Clock() - clk );
// Abc_PrintTime( 1, "Traversal time", time_Trav );
// report the results
// Vec_VecPrintInt( (Vec_Vec_t *)vClasses );
// printf( "Devided %d outputs into %d cand equiv classes.\n", Saig_ManPoNum(pAig), Vec_PtrSize(vClasses) );
/*
if ( fVerbose )
{
Vec_PtrForEachEntry( Vec_Int_t *, vClasses, vLevel, i )
if ( Vec_IntSize(vLevel) > 1 )
printf( "%d ", Vec_IntSize(vLevel) );
else
nUnique++;
printf( " Unique = %d\n", nUnique );
}
*/
// canonicize order
Vec_PtrForEachEntry( Vec_Int_t *, vClasses, vLevel, i )
Vec_IntSort( vLevel, 0 );
Vec_VecSortByFirstInt( (Vec_Vec_t *)vClasses, 0 );
// collect the first ones
vRemain = Vec_IntAlloc( 100 );
Vec_PtrForEachEntry( Vec_Int_t *, vClasses, vLevel, i )
Vec_IntPush( vRemain, Vec_IntEntry(vLevel, 0) );
// derive the resulting AIG
pPart = Saig_ManDupCones( pAig, Vec_IntArray(vRemain), Vec_IntSize(vRemain) );
Vec_IntFree( vRemain );
// return (Vec_Vec_t *)vClasses;
// Vec_VecFree( (Vec_Vec_t *)vClasses );
*pvPosEquivs = vClasses;
return pPart;
}
/**Function*************************************************************
Synopsis []
Description []
SideEffects []
SeeAlso []
***********************************************************************/
Aig_Man_t * Iso_ManTest( Aig_Man_t * pAig, int fVerbose )
{
Vec_Int_t * vPerm;
abctime clk = Abc_Clock();
vPerm = Saig_ManFindIsoPerm( pAig, fVerbose );
Vec_IntFree( vPerm );
Abc_PrintTime( 1, "Time", Abc_Clock() - clk );
return NULL;
}
/**Function*************************************************************
Synopsis []
Description []
SideEffects []
SeeAlso []
***********************************************************************/
Aig_Man_t * Saig_ManIsoReduce( Aig_Man_t * pAig, Vec_Ptr_t ** pvPosEquivs, int fVerbose )
{
Aig_Man_t * pPart;
abctime clk = Abc_Clock();
pPart = Iso_ManFilterPos( pAig, pvPosEquivs, fVerbose );
printf( "Reduced %d outputs to %d outputs. ", Saig_ManPoNum(pAig), Saig_ManPoNum(pPart) );
Abc_PrintTime( 1, "Time", Abc_Clock() - clk );
if ( fVerbose && *pvPosEquivs && Saig_ManPoNum(pAig) != Vec_PtrSize(*pvPosEquivs) )
{
printf( "Nontrivial classes:\n" );
Vec_VecPrintInt( (Vec_Vec_t *)*pvPosEquivs, 1 );
}
// Aig_ManStopP( &pPart );
return pPart;
}
ABC_NAMESPACE_IMPL_END
#include "base/abc/abc.h"
ABC_NAMESPACE_IMPL_START
/**Function*************************************************************
Synopsis []
Description []
SideEffects []
SeeAlso []
***********************************************************************/
Aig_Man_t * Iso_ManTest888( Aig_Man_t * pAig1, int fVerbose )
{
extern Aig_Man_t * Abc_NtkToDar( Abc_Ntk_t * pNtk, int fExors, int fRegisters );
extern Abc_Ntk_t * Abc_NtkFromAigPhase( Aig_Man_t * pMan );
Abc_Ntk_t * pNtk;
Aig_Man_t * pAig2;
Vec_Int_t * vMap;
pNtk = Abc_NtkFromAigPhase( pAig1 );
Abc_NtkPermute( pNtk, 1, 0, 1, NULL );
pAig2 = Abc_NtkToDar( pNtk, 0, 1 );
Abc_NtkDelete( pNtk );
vMap = Iso_ManFindMapping( pAig1, pAig2, NULL, NULL, fVerbose );
Aig_ManStop( pAig2 );
if ( vMap != NULL )
{
printf( "Mapping of AIGs is found.\n" );
if ( fVerbose )
Vec_IntPrint( vMap );
}
else
printf( "Mapping of AIGs is NOT found.\n" );
Vec_IntFreeP( &vMap );
return NULL;
}
////////////////////////////////////////////////////////////////////////
/// END OF FILE ///
////////////////////////////////////////////////////////////////////////
ABC_NAMESPACE_IMPL_END
| 9,136 |
2,308 | <filename>saturn-core/src/main/java/com/vip/saturn/job/utils/LogEvents.java
/**
* Copyright 2016 vip.com.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
* </p>
**/
package com.vip.saturn.job.utils;
public class LogEvents {
public static class ExecutorEvent {
public static final String VERSION_UPGRADE = "VERSION_UPGRADE";
public static final String INIT = "EXECUTOR_INIT";
public static final String GRACEFUL_SHUTDOWN = "EXECUTOR_SHUTDOWN_GRACEFULLY";
public static final String SHUTDOWN = "EXECUTOR_SHUTDOWN";
public static final String INIT_OR_SHUTDOWN = "EXECUTOR_INIT_OR_SHUTDOWN";
public static final String REINIT = "EXECUTOR_REINIT";
public static final String RESTART = "EXECUTOR_RESTART";
public static final String DUMP = "EXECUTOR_DUMP";
public static final String COMMON = "COMMON";
public static final String FORCE_STOP = "FORCE_STOP";
}
}
| 441 |
2,288 | #ifndef CCAN_CRYPTO_HKDF_SHA256_H
#define CCAN_CRYPTO_HKDF_SHA256_H
/* BSD-MIT - see LICENSE file for details */
#include "config.h"
#include <stdlib.h>
/**
* hkdf_sha256 - generate a derived key
* @okm: where to output the key
* @okm_size: the number of bytes pointed to by @okm (must be less than 255*32)
* @s: salt
* @ssize: the number of bytes pointed to by @s
* @k: pointer to input key
* @ksize: the number of bytes pointed to by @k
* @info: pointer to info
* @isize: the number of bytes pointed to by @info
*/
void hkdf_sha256(void *okm, size_t okm_size,
const void *s, size_t ssize,
const void *k, size_t ksize,
const void *info, size_t isize);
#endif /* CCAN_CRYPTO_HKDF_SHA256_H */
| 274 |
892 | <gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-2mhq-qg26-p24h",
"modified": "2022-04-29T02:57:09Z",
"published": "2022-04-29T02:57:09Z",
"aliases": [
"CVE-2004-0116"
],
"details": "An Activation function in the RPCSS Service involved with DCOM activation for Microsoft Windows 2000, XP, and 2003 allows remote attackers to cause a denial of service (memory consumption) via an activation request with a large length field.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2004-0116"
},
{
"type": "WEB",
"url": "https://docs.microsoft.com/en-us/security-updates/securitybulletins/2004/ms04-012"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/15708"
},
{
"type": "WEB",
"url": "https://oval.cisecurity.org/repository/search/definition/oval%3Aorg.mitre.oval%3Adef%3A955"
},
{
"type": "WEB",
"url": "https://oval.cisecurity.org/repository/search/definition/oval%3Aorg.mitre.oval%3Adef%3A957"
},
{
"type": "WEB",
"url": "https://oval.cisecurity.org/repository/search/definition/oval%3Aorg.mitre.oval%3Adef%3A958"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/11065/"
},
{
"type": "WEB",
"url": "http://securitytracker.com/alerts/2004/Apr/1009758.html"
},
{
"type": "WEB",
"url": "http://www.ciac.org/ciac/bulletins/o-115.shtml"
},
{
"type": "WEB",
"url": "http://www.eeye.com/html/Research/Advisories/AD20040413A.html"
},
{
"type": "WEB",
"url": "http://www.kb.cert.org/vuls/id/417052"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/10127"
},
{
"type": "WEB",
"url": "http://www.us-cert.gov/cas/techalerts/TA04-104A.html"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 998 |
318 | <reponame>Sunlitspace542/SNESticle
#ifndef _NETSOCKET_H
#define _NETSOCKET_H
#include "llnetsocket.h"
#include "netsys.h"
struct NetSocket_t;
typedef int (*NetSocketFuncT)(struct NetSocket_t *pSocket, void *pUserData);
typedef enum
{
NETSOCKET_STATUS_INVALID,
NETSOCKET_STATUS_LISTENING,
NETSOCKET_STATUS_CONNECTING,
NETSOCKET_STATUS_CONNECTED,
NETSOCKET_STATUS_DISCONNECTING,
} NetSocketStatusE;
typedef enum
{
NETSOCKET_ERROR_NONE = 0 ,
NETSOCKET_ERROR_THREAD = -100,
NETSOCKET_ERROR_ACCEPT,
NETSOCKET_ERROR_BIND,
NETSOCKET_ERROR_SELECT,
NETSOCKET_ERROR_SOCKET,
} NetSocketErrorE;
typedef struct NetSocket_t
{
int status;
SOCKET socket;
NetSysThreadT threadid;
void *pUserData;
NetSocketFuncT pReadFunc;
NetSocketFuncT pWriteFunc;
NetSocketFuncT pAbortFunc;
NetSocketAddrT PeerAddr;
} NetSocketT;
void NetSocketNew(NetSocketT *pSocket, void *pUserData);
void NetSocketDelete(NetSocketT *pSocket);
void NetSocketCopy(NetSocketT *pDest, NetSocketT *pSrc);
int NetSocketListen(NetSocketT *pSocket, int port);
int NetSocketConnect(NetSocketT *pSocket, unsigned ipaddr, int port);
int NetSocketAccept(NetSocketT *pListen, NetSocketT *pAccept);
int NetSocketProcess(NetSocketT *pSocket);
void NetSocketDisconnect(NetSocketT *pSocket);
void NetSocketSetFunc(NetSocketT *pSocket, NetSocketFuncT pReadFunc, NetSocketFuncT pWriteFunc, NetSocketFuncT pAbortFunc);
int NetSocketBindUDP(NetSocketT *pSocket, int port);
int NetSocketRecv(NetSocketT *pSocket, char *pBuffer, int nBytes, int flags);
int NetSocketSend(NetSocketT *pSocket, char *pBuffer, int nBytes, int flags);
int NetSocketRecvFrom(NetSocketT *pSocket, char *pBuffer, int nBytes, NetSocketAddrT *pAddr, int flags);
int NetSocketSendTo(NetSocketT *pSocket, char *pBuffer, int nBytes, NetSocketAddrT *pAddr, int flags);
int NetSocketRecvBytes(NetSocketT *pSocket, char *pBuffer, int nBytes, int flags);
void NetSocketGetLocalAddr(NetSocketT *pSocket, NetSocketAddrT *pAddr);
void NetSocketGetRemoteAddr(NetSocketT *pSocket, NetSocketAddrT *pAddr);
int NetSocketGetSocket(NetSocketT *pSocket);
NetSocketStatusE NetSocketGetStatus(NetSocketT *pSocket);
int NetSocketAddrGetPort(NetSocketAddrT *pAddr);
unsigned int NetSocketIpAddr(int a, int b, int c, int d);
int NetSocketAddrIsEqual(NetSocketAddrT *pAddrA, NetSocketAddrT *pAddrB);
#endif
| 1,036 |
787 | // OJ: https://leetcode.com/problems/design-tic-tac-toe/
// Author: github.com/lzl124631x
// Time: O(1)
// Space: O(N)
class TicTacToe {
private:
vector<int> rows, cols;
int n, diag = 0, anti = 0;
public:
TicTacToe(int n) : n(n), rows(n, 0), cols(n, 0) {}
int move(int row, int col, int player) {
int d = player == 1 ? 1 : -1;
rows[row] += d;
cols[col] += d;
if (row == col) diag += d;
if (row + col == n - 1) anti += d;
if (abs(rows[row]) == n
|| abs(cols[col]) == n
|| abs(diag) == n
|| abs(anti) == n) return player;
return 0;
}
}; | 343 |
402 | import requests
import configuration
import performance
from mamba import description, context, it
from expects import expect, be_true, have_length, equal, be_a, have_property, be_none
rule_name="route-rule-http-redirect.yaml"
Rule=configuration.Rule()
with description('Testing HTTP Redirect'):
with before.all:
#Read Config file
configuration.setenv(self)
with context('Set environment'):
with it('Add routing rule'):
Rule.add(rule_name)
with context('Starting test'):
with it('Testing HTTP Redirect'):
while self.total_count < self.request_count:
r = requests.get(self.url,allow_redirects=False)
r.status_code
expect(r.status_code).to(equal(301))
self.total_count += 1
configuration.generate_request(self,rule_name)
with context('Clean environment'):
with it('Delete routing rule'):
Rule.delete(rule_name)
| 402 |
2,151 | <filename>google_apis/gcm/engine/gcm_registration_request_handler.cc
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "google_apis/gcm/engine/gcm_registration_request_handler.h"
#include "base/metrics/histogram_macros.h"
#include "google_apis/gcm/base/gcm_util.h"
namespace gcm {
namespace {
// Request constants.
const char kSenderKey[] = "sender";
} // namespace
GCMRegistrationRequestHandler::GCMRegistrationRequestHandler(
const std::string& senders)
: senders_(senders) {
}
GCMRegistrationRequestHandler::~GCMRegistrationRequestHandler() {}
void GCMRegistrationRequestHandler::BuildRequestBody(std::string* body){
BuildFormEncoding(kSenderKey, senders_, body);
}
void GCMRegistrationRequestHandler::ReportUMAs(
RegistrationRequest::Status status,
int retry_count,
base::TimeDelta complete_time) {
UMA_HISTOGRAM_ENUMERATION("GCM.RegistrationRequestStatus",
status,
RegistrationRequest::STATUS_COUNT);
// Other UMAs are only reported when the request succeeds.
if (status != RegistrationRequest::SUCCESS)
return;
UMA_HISTOGRAM_COUNTS("GCM.RegistrationRetryCount", retry_count);
UMA_HISTOGRAM_TIMES("GCM.RegistrationCompleteTime", complete_time);
}
} // namespace gcm
| 494 |
1,056 | <reponame>timfel/netbeans
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.javafx2.editor.completion.impl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.lang.model.type.TypeMirror;
import org.netbeans.api.editor.mimelookup.MimeRegistration;
import org.netbeans.api.java.source.TypeMirrorHandle;
import org.netbeans.modules.javafx2.editor.JavaFXEditorUtils;
import org.netbeans.modules.javafx2.editor.completion.beans.FxBean;
import org.netbeans.modules.javafx2.editor.completion.beans.FxDefinitionKind;
import org.netbeans.modules.javafx2.editor.completion.beans.FxProperty;
import org.netbeans.modules.javafx2.editor.completion.model.FxClassUtils;
import org.netbeans.modules.javafx2.editor.completion.model.FxInstance;
import org.netbeans.modules.javafx2.editor.completion.model.FxNewInstance;
import org.netbeans.modules.javafx2.editor.completion.model.FxXmlSymbols;
import org.netbeans.modules.javafx2.editor.completion.model.PropertyValue;
import org.netbeans.spi.editor.completion.CompletionItem;
import org.netbeans.spi.editor.completion.CompletionProvider;
/**
* Creates property names completions. Activates in tag names, or attribute names
*
* @author sdedic
*/
@MimeRegistration(mimeType=JavaFXEditorUtils.FXML_MIME_TYPE, service=Completer.Factory.class)
public class PropertyCompleter extends InstanceCompleter {
/**
* Property names which already exist on the tag
*/
private Set<String> existingPropNames = new HashSet<String>();
public PropertyCompleter() {
}
public PropertyCompleter(FxInstance instance, boolean attribute, CompletionContext context) {
super(instance, attribute, context);
}
private List<CompletionItem> resultItems = new ArrayList<CompletionItem>();
private boolean itemsFiltered;
private String namePrefix;
public boolean hasMoreItems() {
return itemsFiltered;
}
/**
* Adds properties from the mentioned beaninfo. Does not add properties, whose names
* are in the 'alreadyAdded' set.
*
* @param beanInfo
* @param alreadyAdded
* @param dontMark
*/
private void addPropertiesFrom(FxBean beanInfo, Set<String> alreadyAdded, boolean dontMark) {
if (beanInfo == null) {
return;
}
Collection<String> propNames = filterNames(new ArrayList<String>(attribute ?
beanInfo.getSimplePropertyNames() : beanInfo.getPropertyNames()));
FxBean parentInfo = beanInfo.getSuperclassInfo();
for (String s : propNames) {
if (alreadyAdded.contains(s)) {
continue;
}
FxProperty pi = beanInfo.getProperty(s);
boolean propInherited = parentInfo != null && parentInfo.getProperty(s) != null;
if (existingPropNames.contains(s)) {
// if replacing, leave the property being replaced in the list
if (!s.startsWith(namePrefix) || !ctx.isReplaceExisting()) {
continue;
}
}
if (attribute && !pi.isSimple()) {
continue;
}
PropertyElementItem item = new PropertyElementItem(ctx, s, attribute);
@SuppressWarnings("rawtypes")
TypeMirrorHandle typeH = pi.getType();
if (typeH != null) {
TypeMirror tm = typeH.resolve(ctx.getCompilationInfo());
if (tm != null) {
String typeString = ctx.getCompilationInfo().getTypeUtilities().
getTypeName(tm).toString();
item.setPropertyType(typeString);
item.setPrimitive(FxClassUtils.isSimpleType(tm, ctx.getCompilationInfo()));
item.setInherited(dontMark || propInherited);
alreadyAdded.add(s);
}
}
item.setMap(pi.getKind() == FxDefinitionKind.MAP);
resultItems.add(item);
}
}
private static final int IMPORTANT_PROPERTIES_TRESHOLD = 10;
/**
* Adds up to approx IMPORTANT_PROPERTIES_TRESHOLD from the class and superclasses.
* Stops when # of properties after adding certain beaninfo exceeds the treshold.
*/
private void addImportantProperties() {
FxBean beanInfo = getBeanInfo();
if (beanInfo == null) {
return;
}
HashSet<String> names = new HashSet<String>();
boolean next = false;
do {
addPropertiesFrom(beanInfo.getDeclareadInfo(), names, next);
if (beanInfo.getBuilder() != null) {
addPropertiesFrom(beanInfo.getBuilder().getDeclareadInfo(), names, next);
}
beanInfo = beanInfo.getSuperclassInfo();
next = true;
} while (beanInfo != null && resultItems.size() < IMPORTANT_PROPERTIES_TRESHOLD);
}
private void init() {
namePrefix = ctx.getPrefix();
if (namePrefix.startsWith("<")) {
namePrefix = namePrefix.substring(1);
}
for (PropertyValue pv : (Collection<PropertyValue>)instance.getProperties()) {
existingPropNames.add(pv.getPropertyName());
}
}
@Override
public List<CompletionItem> complete() {
init();
if (getBeanInfo() == null) {
return null;
}
Set<String> names = new HashSet<String>();
if (ctx.getCompletionType() == CompletionProvider.COMPLETION_QUERY_TYPE) {
addImportantProperties();
if (resultItems.isEmpty()) {
addPropertiesFrom(getBeanInfo(), names, false);
addPropertiesFrom(getBeanInfo().getBuilder(), names, false);
}
} else if (ctx.getCompletionType() == CompletionProvider.COMPLETION_ALL_QUERY_TYPE) {
addPropertiesFrom(getBeanInfo(), names, false);
addPropertiesFrom(getBeanInfo().getBuilder(), names, false);
}
if (ctx.getType() == CompletionContext.Type.PROPERTY) {
String ns = ctx.findFxmlNsPrefix();
if (instance.getId() == null) {
if ("id".startsWith(namePrefix) || // NOI18N
(ns != null && (ns + ":id").startsWith(namePrefix))) { // NOI18N
// suggest also fx:id
PropertyElementItem pi = new PropertyElementItem(ctx, "fx:id", // NOI18N
true);
pi.setPrimitive(true);
pi.setInherited(false);
pi.setSystem(true);
pi.setNamespaceCreator(CompletionUtils.makeFxNamespaceCreator(ctx));
pi.setPropertyType("String"); // NOI18N
resultItems.add(pi);
}
}
if (ctx.isRootElement() && ctx.getModel().getController() == null) {
if ("controller".startsWith(namePrefix) || // NOI18N
(ns != null && (ns + ":controller").startsWith(namePrefix))) { // NOI18N
// suggest also fx:id
PropertyElementItem pi = new PropertyElementItem(ctx, "fx:controller", // NOI18N
true);
pi.setPrimitive(true);
pi.setInherited(false);
pi.setSystem(true);
pi.setNamespaceCreator(CompletionUtils.makeFxNamespaceCreator(ctx));
pi.setPropertyType("Class"); // NOI18N
resultItems.add(pi);
}
}
if (instance instanceof FxNewInstance) {
FxNewInstance newInst = (FxNewInstance)instance;
if (newInst.getFactoryMethod() == null &&
newInst.getInitValue() == null) {
// check that the instance's definition has some constants to suggest
if (!newInst.getDefinition().getConstants().isEmpty()) {
// suggest fx:constant
PropertyElementItem pi = new PropertyElementItem(ctx, "fx:constant", // NOI18N
true);
pi.setPrimitive(true);
pi.setInherited(false);
pi.setSystem(true);
pi.setNamespaceCreator(CompletionUtils.makeFxNamespaceCreator(ctx));
pi.setPropertyType(newInst.getDefinition().getClassName()); // NOI18N
resultItems.add(pi);
}
}
}
}
return resultItems;
}
@Override
protected InstanceCompleter createCompleter(FxInstance instance, boolean attribute, CompletionContext ctx) {
return new PropertyCompleter(instance, attribute, ctx);
}
}
| 4,392 |
1,180 | <reponame>cntrump/libtomcrypt<filename>src/modes/ctr/ctr_decrypt.c<gh_stars>1000+
/* LibTomCrypt, modular cryptographic library -- <NAME> */
/* SPDX-License-Identifier: Unlicense */
#include "tomcrypt_private.h"
/**
@file ctr_decrypt.c
CTR implementation, decrypt data, <NAME>
*/
#ifdef LTC_CTR_MODE
/**
CTR decrypt
@param ct Ciphertext
@param pt [out] Plaintext
@param len Length of ciphertext (octets)
@param ctr CTR state
@return CRYPT_OK if successful
*/
int ctr_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, symmetric_CTR *ctr)
{
LTC_ARGCHK(pt != NULL);
LTC_ARGCHK(ct != NULL);
LTC_ARGCHK(ctr != NULL);
return ctr_encrypt(ct, pt, len, ctr);
}
#endif
| 300 |
2,338 | <gh_stars>1000+
//===- unittests/AST/AttrTests.cpp --- Attribute tests --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Attr.h"
#include "clang/Basic/AttrKinds.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace clang;
namespace {
TEST(Attr, Doc) {
EXPECT_THAT(Attr::getDocumentation(attr::Used).str(),
testing::HasSubstr("The compiler must emit the definition even "
"if it appears to be unused"));
}
} // namespace
| 281 |
585 | <filename>appindexing/app/src/main/java/com/google/firebase/example/appindexing/MainActivity.java
package com.google.firebase.example.appindexing;
import android.os.Bundle;
import androidx.appcompat.app.AppCompatActivity;
import com.google.android.gms.tasks.Task;
import com.google.firebase.appindexing.Action;
import com.google.firebase.appindexing.FirebaseAppIndex;
import com.google.firebase.appindexing.FirebaseUserActions;
import com.google.firebase.appindexing.Indexable;
import com.google.firebase.appindexing.builders.Indexables;
import com.google.firebase.example.appindexing.model.Note;
import com.google.firebase.example.appindexing.model.Recipe;
public class MainActivity extends AppCompatActivity {
private Note mNote;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
}
// [START appindexing_onstart_onstop]
@Override
protected void onStart() {
super.onStart();
// If you’re logging an action on content that hasn’t been added to the index yet,
// add it first.
// See <a href="https://firebase.google.com/docs/app-indexing/android/personal-content#update-the-index">https://firebase.google.com/docs/app-indexing/android/personal-content#update-the-index</a>.
FirebaseUserActions.getInstance(this).start(getRecipeViewAction());
}
@Override
protected void onStop() {
FirebaseUserActions.getInstance(this).end(getRecipeViewAction());
super.onStop();
}
// [END appindexing_onstart_onstop]
// [START appindexing_instantaneous]
public void displayNoteDialog(final String positiveText, final String negativeText) {
// ...
// If you’re logging an action on content that hasn’t been added to the index yet,
// add it first.
// See <a href="https://firebase.google.com/docs/app-indexing/android/personal-content#update-the-index">https://firebase.google.com/docs/app-indexing/android/personal-content#update-the-index</a>.
FirebaseUserActions.getInstance(this).end(getNoteCommentAction());
// ...
}
public Action getNoteCommentAction() {
return new Action.Builder(Action.Builder.COMMENT_ACTION)
.setObject(mNote.getTitle(), mNote.getNoteUrl())
// Keep action data for personal connulltent on the device
.setMetadata(new Action.Metadata.Builder().setUpload(false))
.build();
}
// [END appindexing_instantaneous]
// [START appindexing_update]
public void indexNote(Recipe recipe) {
Note note = recipe.getNote();
Indexable noteToIndex = Indexables.noteDigitalDocumentBuilder()
.setName(recipe.getTitle())
.setText(note.getText())
.setUrl(recipe.getNoteUrl())
.build();
Task<Void> task = FirebaseAppIndex.getInstance(this).update(noteToIndex);
// ...
}
// [END appindexing_update]
private void removeNote(Recipe recipe) {
// [START appindexing_remove_one]
// Deletes or removes the corresponding notes from index.
String noteUrl = recipe.getNoteUrl();
FirebaseAppIndex.getInstance(this).remove(noteUrl);
// [END appindexing_remove_one]
}
public void removeAll() {
// [START appindexing_remove_all]
FirebaseAppIndex.getInstance(this).removeAll();
// [END appindexing_remove_all]
}
public Action getRecipeViewAction() {
// This is just to make some things compile.
return null;
}
}
| 1,386 |
1,390 |
static int SIZE = 20;
| 11 |
1,405 | <reponame>jarekankowski/pegasus_spyware
package com.lenovo.lps.sus.control;
import android.os.Message;
import android.widget.CompoundButton;
import com.lenovo.lps.sus.b.b;
import com.lenovo.lps.sus.c.a;
/* access modifiers changed from: package-private */
public final class w implements CompoundButton.OnCheckedChangeListener {
private final /* synthetic */ int a;
w(int i) {
this.a = i;
}
public final void onCheckedChanged(CompoundButton compoundButton, boolean z) {
Integer num;
Integer.valueOf(0);
if (z) {
num = 1;
a.d();
} else {
num = 0;
}
if (am.z != null) {
Message message = new Message();
message.what = b.a(b.SUS_USER_CHANGESETTINGS_EVENT);
message.obj = Integer.valueOf(this.a);
message.arg1 = num.intValue();
am.z.sendMessage(message);
}
}
}
| 440 |
1,157 | <reponame>sdrik/MaxScale
/**
* MXS-1743: Maxscale unable to enforce round-robin between read service for Slave
*
* https://jira.mariadb.org/browse/MXS-1743
*/
#include <maxtest/testconnections.hh>
#include <vector>
int main(int argc, char** argv)
{
TestConnections test(argc, argv);
auto do_test = [&]() {
test.reset_timeout();
test.maxscale->connect();
test.try_query(test.maxscale->conn_master, "SELECT 1");
test.maxscale->disconnect();
};
test.tprintf("Testing with both master and slave up");
do_test();
test.tprintf("Testing with only the master");
test.repl->block_node(0);
test.maxscale->wait_for_monitor();
do_test();
test.repl->unblock_node(0);
test.maxscale->wait_for_monitor();
test.tprintf("Testing with only the slave");
test.repl->block_node(1);
test.maxscale->wait_for_monitor();
do_test();
test.repl->unblock_node(1);
test.maxscale->wait_for_monitor();
test.tprintf("Checking that both the master and slave are used");
std::vector<Connection> connections;
test.tprintf("Opening new connections to verify readconnroute works");
for (int i = 0; i < 20; i++)
{
test.reset_timeout();
connections.push_back(test.maxscale->readconn_master());
Connection& c = connections.back();
test.expect(c.connect(), "Connect should work: %s", c.error());
test.expect(c.query("SELECT 1"), "Query should work: %s", c.error());
}
auto s1 = test.maxscale->ssh_output("maxctrl --tsv list servers|grep server1|cut -f 4").output;
auto s2 = test.maxscale->ssh_output("maxctrl --tsv list servers|grep server2|cut -f 4").output;
test.expect(s1 == s2,
"Master and slave shoud have the same amount of connections: %s != %s",
s1.c_str(), s2.c_str());
return test.global_result;
}
| 772 |
494 | <gh_stars>100-1000
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.queue;
import java.util.concurrent.atomic.AtomicLong;
/**
* Track the state of a partition
*
* @author elandau
*/
public class MessageQueueShard implements MessageQueueShardStats {
private volatile int lastCount = 0;
private final String name;
private final int partition;
private final int shard;
private final AtomicLong readCount = new AtomicLong();
private final AtomicLong writeCount = new AtomicLong();
public MessageQueueShard(String name, int partition, int shard) {
this.name = name;
this.partition = partition;
this.shard = shard;
}
public String getName() {
return name;
}
public void setLastCount(int count) {
this.lastCount = count;
this.readCount.addAndGet(count);
}
@Override
public long getReadCount() {
return this.readCount.get();
}
@Override
public long getWriteCount() {
return this.writeCount.get();
}
@Override
public long getLastReadCount() {
return this.lastCount;
}
public void incInsertCount(int count) {
this.writeCount.addAndGet(count);
}
public int getShard() {
return this.shard;
}
public int getPartition() {
return this.partition;
}
@Override
public String toString() {
return "Partition [lastCount=" + lastCount + ", name=" + name + ", partition=" + partition + ", shard=" + shard + "]";
}
} | 795 |
1,444 |
package mage.cards.g;
import java.util.UUID;
import mage.MageInt;
import mage.abilities.common.AttacksTriggeredAbility;
import mage.abilities.effects.common.continuous.BoostControlledEffect;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.SubType;
import mage.constants.Duration;
import mage.filter.StaticFilters;
/**
*
* @author LevelX2
*/
public final class GoblinGeneral extends CardImpl {
public GoblinGeneral(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.CREATURE}, "{1}{R}{R}");
this.subtype.add(SubType.GOBLIN);
this.subtype.add(SubType.WARRIOR);
this.power = new MageInt(1);
this.toughness = new MageInt(1);
// Whenever Goblin General attacks, Goblin creatures you control get +1/+1 until end of turn.
this.addAbility(new AttacksTriggeredAbility(new BoostControlledEffect(1, 1, Duration.EndOfTurn, StaticFilters.FILTER_PERMANENT_CREATURE_GOBLINS, false), false));
}
private GoblinGeneral(final GoblinGeneral card) {
super(card);
}
@Override
public GoblinGeneral copy() {
return new GoblinGeneral(this);
}
}
| 439 |
14,668 | def main(request, response):
headers = [(b"Content-type", b"text/html;charset=utf-8")]
content = u"<!doctype html><div id=test></div>"
return headers, content
| 66 |
1,168 | <reponame>wcalandro/kythe
// Checks that we can properly handle a test that fails to verify.
//- _NodeA does/not/relate/to _NodeB
int x;
| 48 |
668 | <reponame>jkreindl/sulong<filename>projects/com.oracle.truffle.llvm.parser/src/com/oracle/truffle/llvm/parser/metadata/MDImportedEntity.java
/*
* Copyright (c) 2017, Oracle and/or its affiliates.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.oracle.truffle.llvm.parser.metadata;
public final class MDImportedEntity extends MDName implements MDBaseNode {
private final long tag;
private final long line;
private MDBaseNode scope;
private MDBaseNode entity;
private MDImportedEntity(long tag, long line) {
this.tag = tag;
this.line = line;
this.scope = MDVoidNode.INSTANCE;
this.entity = MDVoidNode.INSTANCE;
}
public long getTag() {
return tag;
}
public MDBaseNode getScope() {
return scope;
}
public MDBaseNode getEntity() {
return entity;
}
public long getLine() {
return line;
}
@Override
public void replace(MDBaseNode oldValue, MDBaseNode newValue) {
super.replace(oldValue, newValue);
if (scope == oldValue) {
scope = newValue;
}
if (entity == oldValue) {
entity = newValue;
}
}
@Override
public void accept(MetadataVisitor visitor) {
visitor.visit(this);
}
private static final int ARGINDEX_TAG = 1;
private static final int ARGINDEX_SCOPE = 2;
private static final int ARGINDEX_ENTITY = 3;
private static final int ARGINDEX_LINE = 4;
private static final int ARGINDEX_NAME = 5;
public static MDImportedEntity create38(long[] args, MetadataValueList md) {
final long tag = args[ARGINDEX_TAG];
final long line = args[ARGINDEX_LINE];
final MDImportedEntity importedEntity = new MDImportedEntity(tag, line);
importedEntity.scope = md.getNullable(args[ARGINDEX_SCOPE], importedEntity);
importedEntity.entity = md.getNullable(args[ARGINDEX_ENTITY], importedEntity);
importedEntity.setName(md.getNullable(args[ARGINDEX_NAME], importedEntity));
return importedEntity;
}
}
| 1,224 |
21,382 | <gh_stars>1000+
// generated automatically, do not modify.
package io.ray.api.function;
/** Functional interface for a remote function that has 6 parameters. */
@FunctionalInterface
public interface RayFuncVoid6<T0, T1, T2, T3, T4, T5> extends RayFuncVoid {
void apply(T0 t0, T1 t1, T2 t2, T3 t3, T4 t4, T5 t5) throws Exception;
}
| 123 |
1,652 | package com.ctrip.xpipe.redis.console.migration.model.impl;
import com.ctrip.xpipe.api.migration.OuterClientService;
import com.ctrip.xpipe.api.observer.Observable;
import com.ctrip.xpipe.api.retry.RetryTemplate;
import com.ctrip.xpipe.command.AbstractCommand;
import com.ctrip.xpipe.observer.AbstractObservable;
import com.ctrip.xpipe.redis.console.annotation.DalTransaction;
import com.ctrip.xpipe.redis.console.job.retry.RetryCondition;
import com.ctrip.xpipe.redis.console.job.retry.RetryNTimesOnCondition;
import com.ctrip.xpipe.redis.console.migration.model.*;
import com.ctrip.xpipe.redis.console.migration.status.*;
import com.ctrip.xpipe.redis.console.model.ClusterTbl;
import com.ctrip.xpipe.redis.console.model.DcTbl;
import com.ctrip.xpipe.redis.console.model.MigrationClusterTbl;
import com.ctrip.xpipe.redis.console.model.ShardTbl;
import com.ctrip.xpipe.redis.console.service.ClusterService;
import com.ctrip.xpipe.redis.console.service.DcService;
import com.ctrip.xpipe.redis.console.service.RedisService;
import com.ctrip.xpipe.redis.console.service.ShardService;
import com.ctrip.xpipe.redis.console.service.migration.MigrationService;
import com.ctrip.xpipe.utils.VisibleForTesting;
import java.util.*;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
/**
* @author shyin
* <p>
* Dec 8, 2016
*/
public class DefaultMigrationCluster extends AbstractObservable implements MigrationCluster {
private volatile MigrationState currentState;
private MigrationEvent event;
private MigrationClusterTbl migrationCluster;
private List<MigrationShard> migrationShards = new LinkedList<>();
private ClusterTbl currentCluster;
private Map<Long, ShardTbl> shards;
private Map<Long, DcTbl> dcs;
private ClusterService clusterService;
private ShardService shardService;
private DcService dcService;
private RedisService redisService;
private MigrationService migrationService;
private Executor executors;
private ScheduledExecutorService scheduled;
private OuterClientService outerClientService = OuterClientService.DEFAULT;
public DefaultMigrationCluster(Executor executors, ScheduledExecutorService scheduled, MigrationEvent event, MigrationClusterTbl migrationCluster, DcService dcService, ClusterService clusterService, ShardService shardService,
RedisService redisService, MigrationService migrationService) {
this.event = event;
this.migrationCluster = migrationCluster;
this.clusterService = clusterService;
this.shardService = shardService;
this.dcService = dcService;
this.redisService = redisService;
this.migrationService = migrationService;
this.executors = executors;
this.scheduled = scheduled;
loadMetaInfo();
setStatus();
}
public ScheduledExecutorService getScheduled() {
return scheduled;
}
@Override
public Executor getMigrationExecutor() {
return executors;
}
@Override
public MigrationEvent getMigrationEvent() {
return this.event;
}
@Override
public String fromDc() {
long fromDcId = migrationCluster.getSourceDcId();
return dcs.get(fromDcId).getDcName();
}
@Override
public String destDc() {
long destDcId = migrationCluster.getDestinationDcId();
return dcs.get(destDcId).getDcName();
}
@Override
public long fromDcId() {
return migrationCluster.getSourceDcId();
}
@Override
public long destDcId() {
return migrationCluster.getDestinationDcId();
}
@Override
public MigrationStatus getStatus() {
return currentState.getStatus();
}
@Override
public MigrationClusterTbl getMigrationCluster() {
return migrationCluster;
}
@Override
public List<MigrationShard> getMigrationShards() {
return migrationShards;
}
@Override
public ClusterTbl getCurrentCluster() {
return currentCluster;
}
@Override
public Map<Long, ShardTbl> getClusterShards() {
return shards;
}
@Override
public Map<Long, DcTbl> getClusterDcs() {
return dcs;
}
@Override
public void addNewMigrationShard(MigrationShard migrationShard) {
migrationShards.add(migrationShard);
}
@Override
public void process() {
logger.info("[process]{}-{}, {}", migrationCluster.getMigrationEventId(), clusterName(), this.currentState.getStatus());
this.currentState.getStateActionState().tryAction();
}
@Override
@DalTransaction
public void updateStat(MigrationState stat) {
logger.info("[updateStat]{}-{}, {} -> {}",
migrationCluster.getMigrationEventId(), clusterName(), this.currentState.getStatus(), stat.getStatus());
this.currentState = stat; // update local state even if update db fail
this.getMigrationService().updateMigrationStatus(this, stat.getStatus());
this.currentState = stat; // avoid local state updating by other thread before real update db with row lock
}
@Override
public void updatePublishInfo(String desc) {
migrationService.updatePublishInfoById(migrationCluster.getId(), desc);
}
@Override
public void updateActiveDcIdToDestDcId() {
long destDcId = destDcId();
ClusterTbl cluster = getCurrentCluster();
cluster.setActivedcId(destDcId);
clusterService.updateActivedcId(clusterId(), destDcId);
}
@Override
public ClusterStepResult stepStatus(ShardMigrationStep shardMigrationStep) {
int finishCount = 0;
int successCount = 0;
List<MigrationShard> migrationShards = getMigrationShards();
int shardSize = migrationShards.size();
for(MigrationShard migrationShard : migrationShards){
ShardMigrationStepResult shardMigrationStepResult = migrationShard.stepResult(shardMigrationStep);
switch (shardMigrationStepResult){
case FAIL:
finishCount++;
break;
case SUCCESS:
finishCount++;
successCount++;
break;
case UNKNOWN:
break;
default:
throw new IllegalStateException("unkonw result:" + shardMigrationStep + "," + this);
}
}
return new ClusterStepResult(shardSize, finishCount, successCount);
}
@Override
public void markCheckFail(String failMessage) {
logger.info("[markCheckFail]{}", clusterName());
for(MigrationShard migrationShard : getMigrationShards()){
migrationShard.markCheckFail(failMessage);
}
}
@Override
public OuterClientService getOuterClientService() {
return outerClientService;
}
//for unit test
public void setOuterClientService(OuterClientService outerClientService) {
this.outerClientService = outerClientService;
}
private long clusterId() {
return getCurrentCluster().getId();
}
@Override
public String clusterName() {
return getCurrentCluster().getClusterName();
}
@Override
public void cancel() {
logger.info("[Cancel]{}-{}, {} -> Cancelled", migrationCluster.getMigrationEventId(), clusterName(), this.currentState.getStatus());
this.currentState.getStateActionState().tryRollback();
}
@Override
public void rollback() {
logger.info("[Rollback]{}-{}, {} -> Rollback", migrationCluster.getMigrationEventId(), clusterName(), this.currentState.getStatus());
this.currentState.getStateActionState().tryRollback();
}
@Override
public void forceProcess() {
logger.info("[ForceProcess]{}-{}, {} -> ForceProcess", migrationCluster.getMigrationEventId(), clusterName(), this.currentState.getStatus());
if (!(currentState instanceof ForceProcessAbleState)) {
throw new IllegalStateException(String.format("cannot cancel while %s", this.currentState.getStatus()));
}
ForceProcessAbleState forceProcessAbleState = (ForceProcessAbleState) this.currentState;
forceProcessAbleState.updateAndForceProcess();
}
@Override
public void forceEnd() {
logger.info("[ForceEnd]{}-{}, {} -> ForceEnd", migrationCluster.getMigrationEventId(), clusterName(), this.currentState.getStatus());
if (!(currentState instanceof PublishState)) {
throw new IllegalStateException(String.format("Cannot force end while %s", this.currentState.getStatus()));
}
PublishState publishState = (PublishState) this.currentState;
publishState.forceEnd();
}
@Override
public void update(Object args, Observable observable) {
logger.info("[update]{}", args);
this.currentState.refresh();
notifyObservers(this);
}
@Override
public ClusterService getClusterService() {
return clusterService;
}
@Override
public ShardService getShardService() {
return shardService;
}
@Override
public DcService getDcService() {
return dcService;
}
@Override
public RedisService getRedisService() {
return redisService;
}
@Override
public MigrationService getMigrationService() {
return migrationService;
}
private void setStatus() {
MigrationStatus status = MigrationStatus.valueOf(migrationCluster.getStatus());
currentState = status.createMigrationState(this);
}
private void loadMetaInfo() {
this.currentCluster = getClusterService().find(migrationCluster.getClusterId());
this.shards = generateShardMap(getShardService().findAllByClusterName(currentCluster.getClusterName()));
this.dcs = generateDcMap(getDcService().findClusterRelatedDc(currentCluster.getClusterName()));
}
private Map<Long, ShardTbl> generateShardMap(List<ShardTbl> shards) {
Map<Long, ShardTbl> result = new HashMap<>();
for (ShardTbl shard : shards) {
result.put(shard.getId(), shard);
}
return result;
}
private Map<Long, DcTbl> generateDcMap(List<DcTbl> dcs) {
Map<Long, DcTbl> result = new HashMap<>();
for (DcTbl dc : dcs) {
result.put(dc.getId(), dc);
}
return result;
}
@Override
public String toString() {
return String.format("[cluster:%s, state:%s]", clusterName(), currentState);
}
@VisibleForTesting
public void setMigrationState(MigrationState state) {
this.currentState = state;
}
}
| 4,168 |
4,538 | /*
* Copyright (C) 2015-2017 Alibaba Group Holding Limited
*/
#ifndef SENSOR_HAL_H
#define SENSOR_HAL_H
#include <aos/init.h>
#include <aos/errno.h>
#include <aos/compiler.h>
#include "ulog/ulog.h"
#include "sensor/sensor.h"
#define SENSOR_DRV_ADD(func) \
int __sensor_##func##_func__(void){ \
return func(); \
}
typedef int (*SENSOR_INIT_FUN)(void);
int sensor_init(void);
int sensor_create_obj(sensor_obj_t *sensor);
#if SENSOR_CONFIG_MODBUS_ENABLE
int modbus_init(void);
#endif
#endif /* SENSOR_HAL_H */
| 227 |
852 | <filename>CondCore/CondDB/interface/CoralServiceMacros.h
#ifndef CondCore_CondDB_CoralServiceMacros_h
#define CondCore_CondDB_CoralServiceMacros_h
#include "CondCore/CondDB/interface/CoralServiceFactory.h"
#include "CoralKernel/Service.h"
#define DEFINE_CORALSERVICE(type, name) \
DEFINE_EDM_PLUGIN(cond::CoralServicePluginFactory, cond::CoralServiceWrapper<type>, name)
#endif
| 139 |
2,440 | <gh_stars>1000+
from cyberbrain import Binding, InitialValue, Symbol
def test_unary_operations(tracer, check_golden_file):
a = 1
tracer.start()
b = +a # UNARY_POSITIVE
b = -a # UNARY_NEGATIVE
b = not a # UNARY_NOT
b = ~a # UNARY_INVERT
tracer.stop()
| 126 |
2,219 | /*
* Copyright (c) 2014-2015 <NAME>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 3, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file
* @brief Event type manipulation.
*
* This header file defines the event types of the `libfswatch` API.
*
* @copyright Copyright (c) 2014-2015 <NAME>
* @license GNU General Public License v. 3.0
* @author <NAME>
* @version 1.8.0
*/
#ifndef FSW__CEVENT_H
# define FSW__CEVENT_H
# include <time.h>
# include <limits.h>
# include "libfswatch_types.h"
# ifdef __cplusplus
extern "C"
{
# endif
/**
* @brief Backend-agnostic change flags.
*
* Each element of this enum represents a backend-agnostic change flag. No
* direct mapping to backend-specific change types is guaranteed to exist: a
* change type may be mapped to multiple `fsw_event_flag` instances included
* the `PlatformSpecific` flag.
*
* The values of event flags are all powers of 2, that is numbers @f$f=2^n@f$
* where @f$n@f$ is an integer. This representation makes it easy to combine
* flags into a bit mask and encode multiple events flags into a single integer.
*
* A monitor implementation is required to map implementation-specific flags
* into API flags. Sometimes, though, a perfect match is not possible and the
* following situation may arise:
*
* - One platform-specific flag must be mapped into multiple API flags.
*
* - Multiple platform-specific flags must be mapped into a single API flag.
*
* - A mapping is not possible for some flags, in which case they should be
* mapped to fsw_event_flag::PlatformSpecific. The API currently offers no
* way to retain a platform-specific event flag value in this case.
*/
enum fsw_event_flag
{
NoOp = 0, /**< No event has occurred. */
PlatformSpecific = (1 << 0), /**< Platform-specific placeholder for event type that cannot currently be mapped. */
Created = (1 << 1), /**< An object was created. */
Updated = (1 << 2), /**< An object was updated. */
Removed = (1 << 3), /**< An object was removed. */
Renamed = (1 << 4), /**< An object was renamed. */
OwnerModified = (1 << 5), /**< The owner of an object was modified. */
AttributeModified = (1 << 6), /**< The attributes of an object were modified. */
MovedFrom = (1 << 7), /**< An object was moved from this location. */
MovedTo = (1 << 8), /**< An object was moved to this location. */
IsFile = (1 << 9), /**< The object is a file. */
IsDir = (1 << 10), /**< The object is a directory. */
IsSymLink = (1 << 11), /**< The object is a symbolic link. */
Link = (1 << 12), /**< The link count of an object has changed. */
Overflow = (1 << 13) /**< The event queue has overflowed. */
};
extern fsw_event_flag FSW_ALL_EVENT_FLAGS[15];
/**
* @brief Get event flag by name.
*
* This function looks for an event flag called @p name and, if it exists, it
* writes its value onto @p flag and @c FSW_OK, otherwise @p flag is not
* modified and @c FSW_ERR_UNKNOWN_VALUE is returned.
*
* @param[in] name The name of the event flag to look for.
* @param[out] flag The output variable where the event flag is returned.
* @return #FSW_OK if the functions succeeds, #FSW_ERR_UNKNOWN_VALUE
* otherwise.
*/
FSW_STATUS fsw_get_event_flag_by_name(const char *name, enum fsw_event_flag *flag);
/**
* @brief Get the name of an event flag.
*
* This function looks for the name of the specified event @p flag. If it
* exists, it returns its name, otherwise @c nullptr is returned.
*
* @param[in] flag The event flag to look for.
* @return The name of @p flag, or @c nullptr if it does not exist.
*/
char *fsw_get_event_flag_name(const enum fsw_event_flag flag);
/**
* A file change event is represented as an instance of this struct where:
* - path is the path where the event was triggered.
* - evt_time the time when the event was triggered.
* - flags is an array of fsw_event_flag of size flags_num.
* - flags_num is the size of the flags array.
*/
typedef struct fsw_cevent
{
char * path;
time_t evt_time;
enum fsw_event_flag * flags;
unsigned int flags_num;
} fsw_cevent;
/**
* A function pointer of type FSW_CEVENT_CALLBACK is used by the API as a
* callback to provide information about received events. The callback is
* passed the following arguments:
* - events, a const pointer to an array of events of type const fsw_cevent.
* - event_num, the size of the *events array.
* - data, optional persisted data for a callback.
*
* The memory used by the fsw_cevent objects will be freed at the end of the
* callback invocation. A callback should copy such data instead of storing
* a pointer to it.
*/
typedef void (*FSW_CEVENT_CALLBACK)(fsw_cevent const *const events,
const unsigned int event_num,
void *data);
# ifdef __cplusplus
}
# endif
#endif /* FSW__CEVENT_H */
| 1,962 |
693 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
import torch.nn.functional as F
def invariance_loss(z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes mse loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: invariance loss (mean squared error).
"""
return F.mse_loss(z1, z2)
def variance_loss(z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes variance loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: variance regularization loss.
"""
eps = 1e-4
std_z1 = torch.sqrt(z1.var(dim=0) + eps)
std_z2 = torch.sqrt(z2.var(dim=0) + eps)
std_loss = torch.mean(F.relu(1 - std_z1)) + torch.mean(F.relu(1 - std_z2))
return std_loss
def covariance_loss(z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes covariance loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: covariance regularization loss.
"""
N, D = z1.size()
z1 = z1 - z1.mean(dim=0)
z2 = z2 - z2.mean(dim=0)
cov_z1 = (z1.T @ z1) / (N - 1)
cov_z2 = (z2.T @ z2) / (N - 1)
diag = torch.eye(D, device=z1.device)
cov_loss = cov_z1[~diag.bool()].pow_(2).sum() / D + cov_z2[~diag.bool()].pow_(2).sum() / D
return cov_loss
def vicreg_loss_func(
z1: torch.Tensor,
z2: torch.Tensor,
sim_loss_weight: float = 25.0,
var_loss_weight: float = 25.0,
cov_loss_weight: float = 1.0,
) -> torch.Tensor:
"""Computes VICReg's loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
sim_loss_weight (float): invariance loss weight.
var_loss_weight (float): variance loss weight.
cov_loss_weight (float): covariance loss weight.
Returns:
torch.Tensor: VICReg loss.
"""
sim_loss = invariance_loss(z1, z2)
var_loss = variance_loss(z1, z2)
cov_loss = covariance_loss(z1, z2)
loss = sim_loss_weight * sim_loss + var_loss_weight * var_loss + cov_loss_weight * cov_loss
return loss
| 1,451 |
347 | package com.cjj.http;
import android.app.Activity;
import android.content.Context;
import android.graphics.drawable.Drawable;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentActivity;
import android.support.v7.app.AppCompatActivity;
import android.widget.ImageView;
import com.bumptech.glide.Glide;
import com.cjj.listener.CallbackListener;
import java.util.Map;
/**
/**
* Http
* @author cjj
* @category A frame for web
* @version 1.0
* @date 2015/8/26
*/
public class Http extends BaseHttp{
/**
* get 请求
* @param url
* @param listener
*/
public static void get(String url,CallbackListener<?> listener)
{
getInstance().baseGet(url, listener);
}
/**
* post 请求
* @param url
* @param params
* @param listener
*/
public static void post(String url,Map<String,String>params,CallbackListener<?> listener)
{
getInstance().basePost(url, params, listener);
}
/**
* 无参post 请求
* @param url
* @param listener
*/
public static void postNotParams(String url,CallbackListener<?> listener)
{
getInstance().basePost(url, null, listener);
}
/**
* 下载
* @param url 下载的url
* @param savePath 保存的路径
* @param listener 回调
*/
public static void download(String url,String savePath,CallbackListener<?> listener)
{
getInstance().baseDownload(url, savePath, listener);
}
/**
* 取消request
*/
public static void cancel(String url)
{
getInstance().getOkHttpClient().cancel(url);
}
}
| 671 |
843 | package org.zalando.nakadi.exceptions.runtime;
public class NoSuchEventTypeException extends NakadiBaseException {
public NoSuchEventTypeException(final String message) {
super(message);
}
public NoSuchEventTypeException(final String msg, final Exception cause) {
super(msg, cause);
}
}
| 106 |
601 | <reponame>timgates42/googleads-python-lib
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new live stream events.
To determine which live stream events exist, run get_all_live_stream_events.py.
To determine which cdn configurations exist, run get_cdn_configurations.py.
"""
import datetime
import uuid
# Import appropriate modules from the client library.
from googleads import ad_manager
import pytz
# Set content urls and adTags to use
CONTENT_URLS = ['INSERT_CONTENT_URLS_HERE']
AD_TAGS = ['INSERT_AD_TAGS_HERE']
def main(client, content_urls, ad_tags):
# Initialize appropriate services.
live_events_service = client.GetService(
'LiveStreamEventService', version='v202105')
# Stream will play for 365 days
start_datetime = datetime.datetime.now(tz=pytz.timezone('America/New_York'))
end_datetime = start_datetime + datetime.timedelta(days=365)
# Create live stream event objects
live_stream_events = [{
'name': 'Live Stream Event #%s' % uuid.uuid4(),
'startDateTime': start_datetime,
'endDateTime': end_datetime,
'contentUrls': content_urls,
'adTags': ad_tags
}]
# Add live stream events.
live_stream_events = live_events_service.createLiveStreamEvents(
live_stream_events)
# Display results.
for live_stream_event in live_stream_events:
print(
'Live stream event with id "%s", named "%s" and status %s was created.'
% (live_stream_event['id'], live_stream_event['name'],
live_stream_event['status']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, CONTENT_URLS, AD_TAGS)
| 748 |
614 | <reponame>Nagarjunanepl/Spring-3.0
package com.in28minutes.springmvc.web.controller;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.propertyeditors.CustomDateEditor;
import org.springframework.context.MessageSource;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.WebDataBinder;
import org.springframework.web.bind.annotation.InitBinder;
import org.springframework.web.bind.annotation.ModelAttribute;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.servlet.ModelAndView;
import com.in28minutes.domain.Priority;
import com.in28minutes.domain.TodoItem;
import com.in28minutes.domain.TodoItemList;
import com.in28minutes.domain.User;
import com.in28minutes.service.api.TodoService;
import com.in28minutes.springmvc.web.util.SessionData;
import com.in28minutes.springmvc.web.util.TodoPriorityPropertyEditor;
import com.in28minutes.web.common.util.TodoListUtils;
@Controller
public class TodoController extends AbstractController {
private final Logger LOGGER = LoggerFactory.getLogger(this.getClass()
.getName());
@Autowired
private SessionData sessionData;
@Autowired
private MessageSource messageSource;
@Autowired
private TodoService todoService;
@InitBinder
public void initBinder(WebDataBinder binder) {
SimpleDateFormat dateFormat = new SimpleDateFormat(
TodoListUtils.DATE_FORMAT);
binder.registerCustomEditor(Date.class, new CustomDateEditor(
dateFormat, false));
binder.registerCustomEditor(Priority.class,
new TodoPriorityPropertyEditor());
}
@RequestMapping("/user/todos")
public ModelAndView loadTodoList() {
ModelAndView modelAndView = new ModelAndView();
// user login is ensured by the login filter/interceptor
TodoItemList todoList = todoService.getTodoListByUser(sessionData
.getUser().getId());
modelAndView.addObject("todoList", todoList.getItems());
modelAndView.addObject("totalCount", todoList.getCount());
modelAndView.addObject("doneCount", todoList.getDoneCount());
modelAndView.addObject("todoCount", todoList.getTodoCount());
modelAndView.addObject("homeTabStyle", "active");
modelAndView.setViewName("todo/list");
return modelAndView;
}
@RequestMapping(value = "/user/todos/new", method = RequestMethod.GET)
public String showCreateTodo(Model model) {
model.addAttribute("today", new SimpleDateFormat(
TodoListUtils.DATE_FORMAT).format(new Date()));
model.addAttribute("todo", new TodoItem());
return "todo/create";
}
@RequestMapping(value = "/user/todos/new", method = RequestMethod.POST)
public String createNewTodo(@ModelAttribute TodoItem todoItem) {
final User user = sessionData.getUser();
todoItem.setDone(false);
todoItem.setUserId(user.getId());
todoService.create(todoItem);
return REDIRECT_TO_VIEW_TODOS_CONTROLLER;
}
@RequestMapping("/user/todos/{todoId}/update")
public String showUpdateTodo(@PathVariable long todoId, Model model) {
LOGGER.info("Updating TODO");
TodoItem todoItem = todoService.getTodoById(todoId);
model.addAttribute("todo", todoItem);
return "todo/update";
}
@RequestMapping(value = "/user/todos/update", method = RequestMethod.POST)
public String updateTodo(@ModelAttribute TodoItem todoItem) {
LOGGER.info("Updating TODO" + todoItem);
todoService.update(todoItem);
return REDIRECT_TO_VIEW_TODOS_CONTROLLER;
}
@RequestMapping(value = "/user/todos/{todoId}/delete", method = RequestMethod.POST)
public ModelAndView deleteTodo(@PathVariable long todoId) {
TodoItem todoItem = todoService.getTodoById(todoId);
if (todoItem == null) {
String errorMessage = messageSource.getMessage("no.such.todo",
new Object[] { todoId }, sessionData.getLocale());
return redirectToErrorPageWithMessage(errorMessage);
}
todoService.remove(todoItem);
return new ModelAndView(REDIRECT_TO_VIEW_TODOS_CONTROLLER);
}
@RequestMapping(value = "/user/todos/search", method = RequestMethod.GET)
public String searchTodo(@RequestParam String title, Model model) {
TodoItemList todoList = todoService.searchTodoListByTitle(sessionData
.getUser().getId(), title);
model.addAttribute("todoList", todoList.getItems());
model.addAttribute("title", title);
return "todo/search";
}
} | 1,602 |
1,107 | <reponame>eino/pyvista
"""
.. _themes_example:
Control Global and Local Plotting Themes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PyVista allows you to set global and local plotting themes to easily
set default plotting parameters.
"""
import pyvista as pv
from pyvista import examples
###############################################################################
# Define a simple plotting routine for comparing the themes.
mesh = examples.download_st_helens().warp_by_scalar()
def plot_example():
p = pv.Plotter()
p.add_mesh(mesh)
p.add_bounding_box()
p.show()
###############################################################################
# PyVista's default color theme is chosen to be generally easy on your
# eyes and is best used when working long hours on your visualization
# project. The grey background and warm colormaps are chosen to make
# sure 3D renderings do not drastically change the brightness of your
# screen when working in dark environments.
#
# Here's an example of our default plotting theme - this is what you
# would see by default after running any of our examples locally.
pv.set_plot_theme('default')
plot_example()
###############################################################################
# PyVista also ships with a few plotting themes:
#
# * ``'ParaView'``: this is designed to mimic ParaView's default plotting theme.
# * ``'dark'``: this is designed to be night-mode friendly with dark backgrounds and color schemes.
# * ``'document'``: this is built for use in document style plotting and making publication quality figures.
###############################################################################
# Demo the ``'ParaView'`` theme.
pv.set_plot_theme("paraview")
plot_example()
###############################################################################
# Demo the ``'dark'`` theme.
pv.set_plot_theme("dark")
plot_example()
###############################################################################
# Demo the ``'document'`` theme. This theme is used on our online examples.
pv.set_plot_theme("document")
plot_example()
###############################################################################
# Note that you can also use color gradients for the background of the plotting
# window!
plotter = pv.Plotter()
plotter.add_mesh(mesh)
plotter.show_grid()
# Here we set the gradient
plotter.set_background("royalblue", top="aliceblue")
cpos = plotter.show()
###############################################################################
# Modifying the Global Theme
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# You can control how meshes are displayed by setting individual
# parameters when plotting like ``mesh.plot(show_edges=True)``, or by
# setting a global theme. You can also control individual parameters
# how all meshes are displayed by default via ``pyvista.global_theme``.
#
# Here, we print out the current global defaults for all ``pyvista``
# meshes. These values have been changed by the previous "Document"
# theme.
pv.global_theme
###############################################################################
# By default, edges are not shown on meshes unless explicitly
# specified when plotting a mesh via ``show_edges=True``. You can
# change this default behavior globally by changing the default
# parameter.
pv.global_theme.show_edges = True
cpos = pv.Sphere().plot()
###############################################################################
# You can reset pyvista to default behavior with ``restore_defaults``.
# Note that the figure's color was reset to the default "white" color
# rather than the "tan" color default with the document theme. Under
# the hood, each theme applied changes the global plot defaults stored
# within ``pyvista.global_theme.``
pv.global_theme.restore_defaults()
cpos = pv.Sphere().plot()
###############################################################################
# Creating a Custom Theme and Applying it Globally
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# You can create a custom theme by modifying one of the existing
# themes and then loading it into the global plotting defaults.
#
# Here, we create a dark theme that plots meshes red by default while
# showing edges.
from pyvista import themes
my_theme = themes.DarkTheme()
my_theme.color = 'red'
my_theme.lighting = False
my_theme.show_edges = True
my_theme.axes.box = True
pv.global_theme.load_theme(my_theme)
cpos = pv.Sphere().plot()
###############################################################################
# Creating a Custom Theme and Applying it to a Single Plotter
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# In this example, we create a custom theme from the base "default"
# theme and then apply it to a single plotter. Note that this does
# not change the behavior of the global "defaults", which are still
# set to the modified ``DarkTheme``.
#
# This approach carries the advantage that you can maintain several
# themes and apply them to one or more plotters.
from pyvista import themes
my_theme = themes.DefaultTheme()
my_theme.color = 'black'
my_theme.lighting = True
my_theme.show_edges = True
my_theme.edge_color = 'white'
my_theme.background = 'white'
cpos = pv.Sphere().plot(theme=my_theme)
###############################################################################
# Alternatively, set the theme of an instance of ``Plotter``.
pl = pv.Plotter(theme=my_theme)
# pl.theme = my_theme # alternatively use the setter
pl.add_mesh(pv.Cube())
cpos = pl.show()
###############################################################################
# Reset to use the document theme
pv.set_plot_theme("document")
| 1,420 |
360 | from build import models, reader
from build import labels as categories
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import classification_report
docs = reader.fileids(categories=categories)
labels = [reader.categories(fileids=[fid])[0] for fid in docs]
train_docs, test_docs, train_labels, test_labels = tts(docs, labels, test_size=0.2)
def get_docs(fids):
for fid in fids:
yield list(reader.docs(fileids=[fid]))
sgd = models[3]
nby = models[4]
sgd.fit(get_docs(train_docs), train_labels)
y_pred = sgd.predict(get_docs(test_docs))
print(classification_report(test_labels, y_pred, labels=categories))
import nltk
def preprocess(text):
return [
[
list(nltk.pos_tag(nltk.word_tokenize(sent)))
for sent in nltk.sent_tokenize(para)
] for para in text.split("\n\n")
]
doc = preprocess("""
Last summer, two defensemen from opposing conferences with distinct styles of play and contrasting personalities were forever placed in the same breath, their destinies intertwined by a trade.
The Nashville Predators sent Shea Weber, their cornerstone, to the Montreal Canadiens for <NAME>, who had become tremendously popular in Montreal and throughout the league. Subban, 27, won a Norris Trophy as the league’s top defenseman in 2013. Weber, 31, had been a three-time finalist for the award.
“Sometimes you forget that superstars get traded,” Anaheim Ducks defenseman <NAME> said. “Obviously, what <NAME>. meant to Montreal and the impact that he had on that city, it was hard for them to let him go. The same with Shea, who was their captain for years.”
Weber and Subban were together again at last weekend’s All-Star three-on-three tournament. Weber’s 31 points in 50 games for the first-place Canadiens, and his plus-18 rating, made him an obvious selection. Subban was voted in as a team captain by the fans despite a mixed first half of the season. He posted only 18 points and missed 16 games for the Predators, who are in third place in the Central Division.
""")
# print(doc[0][0])
print(sgd.predict(doc[0][0]))
| 660 |
374 | package net.dubboclub.netty4;
import com.alibaba.dubbo.common.Constants;
import com.alibaba.dubbo.common.URL;
import com.alibaba.dubbo.common.Version;
import com.alibaba.dubbo.common.logger.Logger;
import com.alibaba.dubbo.common.logger.LoggerFactory;
import com.alibaba.dubbo.common.utils.NamedThreadFactory;
import com.alibaba.dubbo.common.utils.NetUtils;
import com.alibaba.dubbo.remoting.Channel;
import com.alibaba.dubbo.remoting.ChannelHandler;
import com.alibaba.dubbo.remoting.RemotingException;
import com.alibaba.dubbo.remoting.transport.AbstractClient;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import java.util.concurrent.TimeUnit;
/**
* Created by bieber on 2015/10/8.
*/
public class Netty4Client extends AbstractClient {
private static final Logger logger = LoggerFactory.getLogger(Netty4Client.class);
private Bootstrap bootstrap;
private io.netty.channel.Channel channel;
public Netty4Client(URL url, ChannelHandler handler) throws RemotingException {
super(url, wrapChannelHandler(url, handler));
}
@Override
protected void doOpen() throws Throwable {
EventLoopGroup bossGroup = new NioEventLoopGroup( Constants.DEFAULT_IO_THREADS,new NamedThreadFactory("NettyClientBoss",true));
final Netty4Handler nettyHandler = new Netty4Handler(getUrl(), this);
bootstrap = new Bootstrap();
bootstrap.group(bossGroup).channel(NioSocketChannel.class)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getTimeout())
.option(ChannelOption.TCP_NODELAY,true)
.handler(new ChannelInitializer<NioSocketChannel>() {
@Override
protected void initChannel(NioSocketChannel ch) throws Exception {
Netty4CodecAdapter adapter = new Netty4CodecAdapter(getCodec(), getUrl(), Netty4Client.this);
ch.pipeline().addLast("decoder", adapter.getDecoder())
.addLast("encoder", adapter.getEncoder())
.addLast("handler", nettyHandler);
}
});
}
@Override
protected void doClose() throws Throwable {
//do nothing
}
@Override
protected void doConnect() throws Throwable {
long start = System.currentTimeMillis();
ChannelFuture future = bootstrap.connect(getConnectAddress()).sync();
try{
boolean ret = future.awaitUninterruptibly(getConnectTimeout(), TimeUnit.MILLISECONDS);
if (ret && future.isSuccess()) {
io.netty.channel.Channel newChannel = future.channel();
try {
// 关闭旧的连接
io.netty.channel.Channel oldChannel = Netty4Client.this.channel; // copy reference
if (oldChannel != null) {
try {
if (logger.isInfoEnabled()) {
logger.info("Close old netty channel " + oldChannel + " on create new netty channel " + newChannel);
}
oldChannel.close();
} finally {
Netty4Channel.removeChannelIfDisconnected(oldChannel);
}
}
} finally {
if (Netty4Client.this.isClosed()) {
try {
if (logger.isInfoEnabled()) {
logger.info("Close new netty channel " + newChannel + ", because the client closed.");
}
newChannel.close();
} finally {
Netty4Client.this.channel = null;
Netty4Channel.removeChannelIfDisconnected(newChannel);
}
} else {
Netty4Client.this.channel = newChannel;
}
}
} else if (future.cause() != null) {
throw new RemotingException(this, "client(url: " + getUrl() + ") failed to connect to server "
+ getRemoteAddress() + ", error message is:" + future.cause().getMessage(), future.cause());
} else {
throw new RemotingException(this, "client(url: " + getUrl() + ") failed to connect to server "
+ getRemoteAddress() + " client-side timeout "
+ getConnectTimeout() + "ms (elapsed: " + (System.currentTimeMillis() - start) + "ms) from netty client "
+ NetUtils.getLocalHost() + " using dubbo version " + Version.getVersion());
}
}finally{
if (! isConnected()) {
future.cancel(true);
}
}
}
@Override
protected void doDisConnect() throws Throwable {
try {
Netty4Channel.removeChannelIfDisconnected(channel);
} catch (Throwable t) {
logger.warn(t.getMessage());
}
}
@Override
protected Channel getChannel() {
io.netty.channel.Channel c = channel;
if (c == null || ! c.isOpen())
return null;
return Netty4Channel.getOrAddChannel(c, getUrl(), this);
}
}
| 2,697 |
310 | package org.seasar.doma.internal.jdbc.entity;
import static org.seasar.doma.internal.util.AssertionUtil.*;
import java.lang.reflect.Method;
import org.seasar.doma.DomaNullPointerException;
import org.seasar.doma.jdbc.Config;
import org.seasar.doma.jdbc.entity.EntityType;
public abstract class AbstractEntityListenerContext<E> {
protected final EntityType<E> entityType;
protected final Method method;
protected final Config config;
protected E newEntity;
protected AbstractEntityListenerContext(EntityType<E> entityType, Method method, Config config) {
assertNotNull(entityType, method, config);
this.entityType = entityType;
this.method = method;
this.config = config;
}
@SuppressWarnings("BooleanMethodIsAlwaysInverted")
protected boolean isPropertyDefinedInternal(String propertyName) {
assertNotNull(propertyName);
return entityType.getEntityPropertyType(propertyName) != null;
}
public EntityType<E> getEntityType() {
return entityType;
}
public Method getMethod() {
return method;
}
public Config getConfig() {
return config;
}
public E getNewEntity() {
return this.newEntity;
}
public void setNewEntity(E newEntity) {
if (newEntity == null) {
throw new DomaNullPointerException("newEntity");
}
this.newEntity = newEntity;
}
}
| 435 |
1,831 | /**
* Copyright (c) 2019-present, Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include "logdevice/common/AdminCommandTable.h"
#include "logdevice/common/Processor.h"
#include "logdevice/common/Semaphore.h"
#include "logdevice/common/Worker.h"
#include "logdevice/common/configuration/logs/LogsConfigManager.h"
#include "logdevice/common/configuration/logs/LogsConfigStateMachine.h"
#include "logdevice/common/event_log/EventLogStateMachine.h"
#include "logdevice/common/request_util.h"
#include "logdevice/common/util.h"
#include "logdevice/include/Err.h"
#include "logdevice/server/admincommands/AdminCommand.h"
namespace facebook { namespace logdevice { namespace commands {
class RSMTrim : public AdminCommand {
using AdminCommand::AdminCommand;
private:
std::string rsm_type_;
Semaphore semaphore_;
Status st_;
void onTrimmed(Status st) {
st_ = st;
semaphore_.post();
}
public:
void getOptions(
boost::program_options::options_description& out_options) override {
out_options.add_options()(
"rsm_type", boost::program_options::value<std::string>(&rsm_type_));
}
void getPositionalOptions(
boost::program_options::positional_options_description& out_options)
override {
out_options.add("rsm_type", 1);
}
std::string getUsage() override {
return "rsm trim eventlog|logsconfig";
}
void run() override {
if (rsm_type_.empty()) {
out_.printf("snapshot type is not provided\r\n");
} else if (rsm_type_ == "eventlog") {
auto event_log_owner = EventLogStateMachine::getWorkerIdx(
server_->getProcessor()->getWorkerCount(WorkerType::GENERAL));
auto rc = run_on_worker(
server_->getProcessor(), event_log_owner, WorkerType::GENERAL, [&]() {
Worker* w = Worker::onThisThread();
if (w->event_log_) {
auto cb = [&](Status st) { this->onTrimmed(st); };
w->event_log_->trim(cb);
return true;
} else {
return false;
}
});
if (!rc) {
// callback is skipped so no need to wait for the semaphore
out_.printf(
"This node is not running with an event log state machine\r\n");
} else {
semaphore_.wait();
if (st_ == E::OK) {
out_.printf("Successfully created eventlog snapshot\r\n");
} else if (st_ == E::UPTODATE) {
out_.printf("Eventlog snapshot is already uptodate.\r\n");
} else {
out_.printf(
"Could not create eventlog snapshot:%s\r\n", error_name(st_));
}
}
} else if (rsm_type_ == "logsconfig") {
auto logsconfig_worker_type =
LogsConfigManager::workerType(server_->getProcessor());
auto logsconfig_owner_worker =
LogsConfigManager::getLogsConfigManagerWorkerIdx(
server_->getProcessor()->getWorkerCount(logsconfig_worker_type));
auto rc =
run_on_worker(server_->getProcessor(),
logsconfig_owner_worker,
logsconfig_worker_type,
[&]() {
Worker* w = Worker::onThisThread();
if (w->logsconfig_manager_ &&
w->logsconfig_manager_->getStateMachine()) {
auto cb = [&](Status st) { this->onTrimmed(st); };
w->logsconfig_manager_->getStateMachine()->trim(cb);
return true;
} else {
return false;
}
});
if (!rc) {
out_.printf(
"This node is not running with a logs config state machine\r\n");
} else {
semaphore_.wait();
if (st_ == E::OK) {
out_.printf("Successfully trimmed logsconfig\r\n");
} else {
out_.printf(
"Could not create logsconfig snapshot:%s\r\n", error_name(st_));
}
}
} else {
out_.printf("Snapshot type '%s' not supported\r\n", rsm_type_.c_str());
}
}
};
}}} // namespace facebook::logdevice::commands
| 1,991 |
303 | r"""Downloads and converts Market1501 data to TFRecords of TF-Example protos.
This module downloads the Market1501 data, uncompresses it, reads the files
that make up the Market1501 data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import tensorflow as tf
try:
import dataset_utils
except:
from datasets import dataset_utils
import numpy as np
import pickle
import pdb
import glob
import scipy.misc
# Seed for repeatability.
_RANDOM_SEED = 0
random.seed(_RANDOM_SEED)
# The number of shards per dataset split.
_NUM_SHARDS = 1
# _IMG_PATTERN = 'jpg'
_IMG_PATTERN = 'png'
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB data.
## For JPEG
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
## For PNG
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_image(self._decode_png_data, channels=3)
def read_image_dims(self, sess, image_data):
if _IMG_PATTERN=='jpg':
return self.read_image_dims_jpeg(sess, image_data)
elif _IMG_PATTERN=='png':
return self.read_image_dims_png(sess, image_data)
def read_image_dims_jpeg(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def read_image_dims_png(self, sess, image_data):
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1]
def decode_png(self, sess, image_data):
image = sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_folder_path(dataset_dir, split_name):
if split_name == 'train':
folder_path = os.path.join(dataset_dir, 'train')
elif split_name == 'test':
folder_path = os.path.join(dataset_dir, 'test')
elif split_name == 'test_seq':
folder_path = os.path.join(dataset_dir, 'test')
elif split_name == 'test_seq_other':
folder_path = os.path.join(dataset_dir, 'test')
assert os.path.isdir(folder_path)
return folder_path
def _get_image_file_list(dataset_dir, split_name):
folder_path = _get_folder_path(dataset_dir, split_name)
if split_name == 'train' or split_name == 'train_flip' or split_name == 'test_samples' or split_name == 'test_seq' or split_name == 'test_seq_other' or split_name == 'query' or split_name == 'all':
filelist = sorted(os.listdir(folder_path))
elif split_name == 'test':
filelist = sorted(os.listdir(folder_path))
# Remove non-jpg files
valid_filelist = []
for i in xrange(0, len(filelist)):
if filelist[i].endswith('.'+_IMG_PATTERN):
valid_filelist.append(filelist[i])
return valid_filelist
def _get_dataset_filename(dataset_dir, out_dir, split_name, shard_id):
output_filename = 'DeepFashion_%s_%05d-of-%05d.tfrecord' % (
split_name.split('_')[0], shard_id, _NUM_SHARDS)
return os.path.join(out_dir, output_filename)
def _get_train_all_pn_pairs(dataset_dir, out_dir, split_name='train', augment_ratio=1, mode='same_diff_cam'):
"""Returns a list of pair image filenames.
Args:
dataset_dir: A directory containing person images.
Returns:
p_pairs: A list of positive pairs.
n_pairs: A list of negative pairs.
"""
assert split_name in {'train', 'test', 'test_seq'}
if split_name=='train_flip':
p_pairs_path = os.path.join(out_dir, 'p_pairs_train_flip.p')
n_pairs_path = os.path.join(out_dir, 'n_pairs_train_flip.p')
else:
p_pairs_path = os.path.join(out_dir, 'p_pairs_'+split_name.split('_')[0]+'.p')
n_pairs_path = os.path.join(out_dir, 'n_pairs_'+split_name.split('_')[0]+'.p')
if os.path.exists(p_pairs_path):
with open(p_pairs_path,'r') as f:
p_pairs = pickle.load(f)
with open(n_pairs_path,'r') as f:
n_pairs = pickle.load(f)
else:
filelist = _get_image_file_list(dataset_dir, split_name)
filenames = []
p_pairs = []
n_pairs = []
# pdb.set_trace()
if 'each_img_once'==mode:
for i in xrange(0, len(filelist)):
p_pairs.append([filelist[i],filelist[min(i+100,len(filelist)-1)]])
if len(p_pairs)%100000==0:
print(len(p_pairs))
elif 'each_pair_once'==mode:
for i in xrange(0, len(filelist)):
for j in xrange(0, len(filelist)):
p_pairs.append([filelist[i],filelist[j]])
if len(p_pairs)%100000==0:
print(len(p_pairs))
elif 'one_to_all'==mode:
for i in xrange(1):
for j in xrange(0, len(filelist)):
p_pairs.append([filelist[i],filelist[j]])
if len(p_pairs)%100000==0:
print(len(p_pairs))
elif 'same_diff_cam'==mode:
for i in xrange(0, len(filelist)):
names = filelist[i].split('_')
id_i = names[0]
for j in xrange(i+1, len(filelist)):
names = filelist[j].split('_')
id_j = names[0]
if id_j == id_i:
p_pairs.append([filelist[i],filelist[j]])
p_pairs.append([filelist[j],filelist[i]]) # if two streams share the same weights, no need switch
if len(p_pairs)%100000==0:
print(len(p_pairs))
elif j%2000==0 and id_j != id_i: # limit the neg pairs to 1/40, otherwise it cost too much time
n_pairs.append([filelist[i],filelist[j]])
# n_pairs.append([filelist[j],filelist[i]]) # two streams share the same weights, no need switch
if len(n_pairs)%100000==0:
print(len(n_pairs))
print('repeat positive pairs augment_ratio times and cut down negative pairs to balance data ......')
p_pairs = p_pairs * augment_ratio
random.shuffle(n_pairs)
n_pairs = n_pairs[:len(p_pairs)]
print('p_pairs length:%d' % len(p_pairs))
print('n_pairs length:%d' % len(n_pairs))
print('save p_pairs and n_pairs ......')
with open(p_pairs_path,'w') as f:
pickle.dump(p_pairs,f)
with open(n_pairs_path,'w') as f:
pickle.dump(n_pairs,f)
print('_get_train_all_pn_pairs finish ......')
print('p_pairs length:%d' % len(p_pairs))
print('n_pairs length:%d' % len(n_pairs))
print('save pn_pairs_num ......')
pn_pairs_num = len(p_pairs) + len(n_pairs)
if split_name=='train_flip':
fpath = os.path.join(out_dir, 'pn_pairs_num_train_flip.p')
else:
fpath = os.path.join(out_dir, 'pn_pairs_num_'+split_name.split('_')[0]+'.p')
with open(fpath,'w') as f:
pickle.dump(pn_pairs_num,f)
return p_pairs, n_pairs
def _get_train_all_pn_pairs_other(dataset_dir, dataset_dir_other, out_dir, split_name='train', augment_ratio=1, mode='one_to_all'):
"""Returns a list of pair image filenames.
Args:
dataset_dir: A directory containing person images.
Returns:
p_pairs: A list of positive pairs.
n_pairs: A list of negative pairs.
"""
assert split_name in {'test_seq_other'}
# if split_name=='train_flip':
# p_pairs_path = os.path.join(out_dir, 'p_pairs_train_flip.p')
# n_pairs_path = os.path.join(out_dir, 'n_pairs_train_flip.p')
# else:
p_pairs_path = os.path.join(out_dir, 'p_pairs_'+split_name.split('_')[0]+'.p')
n_pairs_path = os.path.join(out_dir, 'n_pairs_'+split_name.split('_')[0]+'.p')
if os.path.exists(p_pairs_path):
with open(p_pairs_path,'r') as f:
p_pairs = pickle.load(f)
with open(n_pairs_path,'r') as f:
n_pairs = pickle.load(f)
else:
filelist = _get_image_file_list(dataset_dir, 'test_seq')
filelist_other = _get_image_file_list(dataset_dir_other, 'test_seq')
filenames = []
p_pairs = []
n_pairs = []
# pdb.set_trace()
if 'one_to_all'==mode:
for i in xrange(1):
for j in xrange(0, len(filelist_other)):
p_pairs.append([filelist[i],filelist_other[j]])
if len(p_pairs)%100000==0:
print(len(p_pairs))
print('repeat positive pairs augment_ratio times and cut down negative pairs to balance data ......')
p_pairs = p_pairs * augment_ratio
random.shuffle(n_pairs)
n_pairs = n_pairs[:len(p_pairs)]
print('p_pairs length:%d' % len(p_pairs))
print('n_pairs length:%d' % len(n_pairs))
print('save p_pairs and n_pairs ......')
with open(p_pairs_path,'w') as f:
pickle.dump(p_pairs,f)
with open(n_pairs_path,'w') as f:
pickle.dump(n_pairs,f)
print('_get_train_all_pn_pairs finish ......')
print('p_pairs length:%d' % len(p_pairs))
print('n_pairs length:%d' % len(n_pairs))
print('save pn_pairs_num ......')
pn_pairs_num = len(p_pairs) + len(n_pairs)
if split_name=='train_flip':
fpath = os.path.join(out_dir, 'pn_pairs_num_train_flip.p')
else:
fpath = os.path.join(out_dir, 'pn_pairs_num_'+split_name.split('_')[0]+'.p')
with open(fpath,'w') as f:
pickle.dump(pn_pairs_num,f)
return p_pairs, n_pairs
##################### one_pair_rec ###############
import scipy.io
import scipy.stats
import skimage.morphology
from skimage.morphology import square, dilation, erosion
def _getPoseMask_COCO(RCV, height, width, radius=4, var=4, mode='Solid'):
## MSCOCO Pose part_str = [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne, Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19]
# find connection in the specified sequence, center 29 is in the position 15
# limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
# [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
# [1,16], [16,18], [3,17], [6,18]]
# limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
# [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
# [1,16], [16,18]] # , [9,12]
# limbSeq = [[3,4], [4,5], [6,7], [7,8], [9,10], \
# [10,11], [12,13], [13,14], [2,1], [1,15], [15,17], \
# [1,16], [16,18]] #
limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
[10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
[1,16], [16,18], [2,17], [2,18], [9,12], [12,6], [9,3], [17,18]] #
indices = []
values = []
for limb in limbSeq:
r0,c0,v0 = RCV[limb[0]-1, :]
r1,c1,v1 = RCV[limb[1]-1, :]
if 0!=v0 and 0!=v1:
ind, val = _getSparseKeypoint(r0, c0, 0, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
ind, val = _getSparseKeypoint(r1, c1, 0, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
distance = np.sqrt((r0-r1)**2 + (c0-c1)**2)
sampleN = int(distance/radius)
if sampleN>1:
for i in xrange(1,sampleN):
r = r0 + (r1-r0)*i/sampleN
c = c0 + (c1-c0)*i/sampleN
ind, val = _getSparseKeypoint(r, c, 0, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
shape = [height, width, 1]
## Fill body
dense = np.squeeze(_sparse2dense(indices, values, shape))
dense = dilation(dense, square(5))
dense = erosion(dense, square(5))
return dense
def _get_part_bbox_COCO(RCV, img_path=None, idx=None, img_H=256, img_W=256):
## Generate body region proposals
## MSCOCO Pose part_str = [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne, Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19]
## part1: nose, neck, Rsho, Lsho, Leye, Reye, Lear, Rear [0,1,2,5,14,15,16,17]
## part2: Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Lhip [2,3,4,5,6,7,8,11]
## part3: Rhip, Rkne, Rank, Lhip, Lkne, Lank [8,9,10,11,12,13]
## part4: Lsho, Lelb, Lwri [3,6,7]
## part5: Rsho, Relb, Rwri [2,4,5]
## part6: Lhip, Lkne, Lank [11,12,13]
## part7: Rhip, Rkne, Rank [8,9,10]
###################################
## part8: Rsho, Lsho, Rhip, Lhip [2,5,8,11]
## part9: Lsho, Lelb [5,6]
## part10: Lelb, Lwri [6,7]
## part11: Rsho, Relb [2,3]
## part12: Relb, Rwri [3,4]
## part13: Lhip, Lkne [11,12]
## part14: Lkne, Lank [12,13]
## part15: Rhip, Rkne [8,9]
## part16: Rkne, Rank [9,10]
## part17: WholeBody range(0,18)
## part18-36: single key point [0],...,[17]
## part36: Rsho, Relb, Rwri, Rhip, Rkne, Rank [2,3,4,8,9,10]
## part37: Lsho, Lelb, Lwri, Lhip, Lkne, Lank [5,6,7,11,12,13]
part_idx_list_all = [ [0,1,2,5,14,15,16,17], ## part1: nose, neck, Rsho, Lsho, Leye, Reye, Lear, Rear
[2,3,4,5,6,7,8,11], ## part2: Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Lhip
[8,9,10,11,12,13], ## part3: Rhip, Rkne, Rank, Lhip, Lkne, Lank
[5,6,7], ## part4: Lsho, Lelb, Lwri
[2,3,4], ## part5: Rsho, Relb, Rwri
[11,12,13], ## part6: Lhip, Lkne, Lank
[8,9,10], ## part7: Rhip, Rkne, Rank
[2,5,8,11], ## part8: Rsho, Lsho, Rhip, Lhip
[5,6], ## part9: Lsho, Lelb
[6,7], ## part10: Lelb, Lwri
[2,3], ## part11: Rsho, Relb
[3,4], ## part12: Relb, Rwri
[11,12], ## part13: Lhip, Lkne
[12,13], ## part14: Lkne, Lank
[8,9], ## part15: Rhip, Rkne
[9,10], ## part16: Rkne, Rank
range(0,18) ] ## part17: WholeBody
part_idx_list_all.extend([[i] for i in range(0,18)]) ## part18-35: single key point
part_idx_list_all.extend([ [2,3,4,8,9,10], ## part36: Rsho, Relb, Rwri, Rhip, Rkne, Rank
[5,6,7,11,12,13]]) ## part37: Lsho, Lelb, Lwri, Lhip, Lkne, Lank
# part_idx_list = [part_idx_list_all[i] for i in [0,1,2,3,4,5,6,7,8,16]] ## select >3 keypoints
part_idx_list = part_idx_list_all ## select all
part_bbox_list = [] ## bbox: normalized coordinates [y1, x1, y2, x2]
visibility_list = []
## Judge wheather it's whole body or not
for ii in range(len(part_idx_list)):
part_idx = part_idx_list[ii]
xs = []
ys = []
select_rcv_list = [RCV[i,:] for i in part_idx]
for rcv in select_rcv_list:
r,c,v = rcv
if v:
xs.append(c)
ys.append(r)
if len(xs)==0:
visibility_list.append(0)
else:
visibility_list.append(1)
if visibility_list[13] and visibility_list[15]:
## Whole body that includes the following two parts
## part14: Lkne, Lank [12,13]
## part16: Rkne, Rank [9,10]
WholeBody = True
r = 10
r_single = 20
else:
WholeBody = False
r = 20
r_single = 40
for ii in range(len(part_idx_list)):
part_idx = part_idx_list[ii]
xs = []
ys = []
select_rcv_list = [RCV[i,:] for i in part_idx]
for part_id in part_idx:
r,c,v = RCV[part_id,:]
if v:
x = c
y = r
if part_id in [2,5]: ## head+shoulder
pass
if part_id in [0]: ## enlarge the head roi mask
if WholeBody:
y = max(0,y-10)
else:
y = max(0,y-25)
elif part_id in [3,4,6,35,36]: ## enlarge the wrist and ankle roi mask
# if WholeBody:
# y = min(img_H-1,y+5)
# else:
# y = min(img_H-1,y+10)
pass
# if not WholeBody:
# y1_t = max(0,y1_t-5)
# x1_t = max(0,x1_t-5)
# y2_t = min(img_H-1,y2_t+5)
# x2_t = min(img_W-1,x2_t+5)
xs.append(x)
ys.append(y)
if len(xs)==0:
part_bbox_list.append([0,0,1,1])
else:
y1 = np.array(ys).min()
x1 = np.array(xs).min()
y2 = np.array(ys).max()
x2 = np.array(xs).max()
if len(xs)>1:
y1 = max(0,y1-r)
x1 = max(0,x1-r)
y2 = min(img_H-1,y2+r)
x2 = min(img_W-1,x2+r)
else:
y1 = max(0,y1-r_single)
x1 = max(0,x1-r_single)
y2 = min(img_H-1,y2+r_single)
x2 = min(img_W-1,x2+r_single)
part_bbox_list.append([y1, x1, y2, x2])
if idx is not None:
img = scipy.misc.imread(img_path)
scipy.misc.imsave('%04d_part%d.jpg'%(idx,ii+1), img[y1:y2,x1:x2,:])
if idx is not None:
scipy.misc.imsave('%04d_part_whole.jpg'%idx, img)
return part_bbox_list, visibility_list
Ratio_0_4 = 1.0/scipy.stats.norm(0, 4).pdf(0)
Gaussian_0_4 = scipy.stats.norm(0, 4)
def _getSparseKeypoint(r, c, k, height, width, radius=4, var=4, mode='Solid'):
r = int(r)
c = int(c)
k = int(k)
indices = []
values = []
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
distance = np.sqrt(float(i**2+j**2))
if r+i>=0 and r+i<height and c+j>=0 and c+j<width:
if 'Solid'==mode and distance<=radius:
indices.append([r+i, c+j, k])
values.append(1)
elif 'Gaussian'==mode and distance<=radius:
indices.append([r+i, c+j, k])
if 4==var:
values.append( Gaussian_0_4.pdf(distance) * Ratio_0_4 )
else:
assert 'Only define Ratio_0_4 Gaussian_0_4 ...'
return indices, values
def _getSparsePose(rcv, height, width, channel, radius=4, var=4, mode='Solid'):
indices = []
values = []
for k in range(rcv.shape[0]):
r,c,v = rcv[k, :]
if v:
ind, val = _getSparseKeypoint(r, c, k, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
shape = [height, width, channel]
return indices, values, shape
def _oneDimSparsePose(indices, shape):
ind_onedim = []
for ind in indices:
# idx = ind[2]*shape[0]*shape[1] + ind[1]*shape[0] + ind[0]
idx = ind[0]*shape[2]*shape[1] + ind[1]*shape[2] + ind[2]
ind_onedim.append(idx)
shape = np.prod(shape)
return ind_onedim, shape
def _sparse2dense(indices, values, shape):
dense = np.zeros(shape)
for i in range(len(indices)):
r = indices[i][0]
c = indices[i][1]
k = indices[i][2]
dense[r,c,k] = values[i]
return dense
def _get_valid_peaks(all_peaks, subsets):
try:
subsets = subsets.tolist()
valid_idx = -1
valid_score = -1
for i, subset in enumerate(subsets):
score = subset[-2]
# for s in subset:
# if s > -1:
# cnt += 1
if score > valid_score:
valid_idx = i
valid_score = score
if valid_idx>=0:
peaks = []
cand_id_list = subsets[valid_idx][:18]
for ap in all_peaks:
valid_p = []
for p in ap:
if p[-1] in cand_id_list:
valid_p = p
peaks.append(valid_p)
# if subsets[valid_idx][i] > -1:
# kk = 0
# for j in xrange(valid_idx):
# if subsets[j][i] > -1:
# kk += 1
# peaks.append(all_peaks[i][kk])
# else:
# peaks.append([])
return all_peaks
else:
return None
except:
# pdb.set_trace()
return None
import matplotlib.pyplot as plt
import scipy.misc
def _visualizePose(pose, img):
# pdb.set_trace()
if 3==len(pose.shape):
pose = pose.max(axis=-1, keepdims=True)
pose = np.tile(pose, (1,1,3))
elif 2==len(pose.shape):
pose = np.expand_dims(pose, -1)
pose = np.tile(pose, (1,1,3))
imgShow = ((pose.astype(np.float)+1)/2.0*img.astype(np.float)).astype(np.uint8)
plt.imshow(imgShow)
plt.show()
def _format_data(sess, image_reader, folder_path, pairs, i, labels, id_map, attr_mat, id_map_attr,
pose_RCV_dic, pose_order='COCO', FiltOutMissRegion=False):
# Read the filename:
img_path_0 = os.path.join(folder_path, pairs[i][0])
img_path_1 = os.path.join(folder_path, pairs[i][1])
id_0 = pairs[i][0].split('_')[0]
id_1 = pairs[i][1].split('_')[0]
image_raw_0 = tf.gfile.FastGFile(img_path_0, 'r').read()
image_raw_1 = tf.gfile.FastGFile(img_path_1, 'r').read()
height, width = image_reader.read_image_dims(sess, image_raw_0)
attrs_0 = []
attrs_1 = []
if attr_mat is not None:
idx_0 = id_map_attr[id_0]
idx_1 = id_map_attr[id_1]
for name in attr_mat.dtype.names:
attrs_0.append(attr_mat[(name)][0][0][0][idx_0])
attrs_1.append(attr_mat[(name)][0][0][0][idx_1])
########################## Pose RCV [Row Column Visible] ##########################
## Pose RCV
if 'COCO'==pose_order:
pose_peaks_0_rcv = np.zeros([18,3])
pose_peaks_1_rcv = np.zeros([18,3])
getPoseMask_fn = _getPoseMask_COCO
get_part_bbox_fn = _get_part_bbox_COCO
else:
raise 'Non-valid pose keypoint order! Use \'COCO\''
#
pose_subs_0 = []
pose_subs_1 = []
# pdb.set_trace()
if (pose_RCV_dic is not None) and (pairs[i][0] in pose_RCV_dic) and (pairs[i][1] in pose_RCV_dic):
## Pose 0
pose_peaks_0_rcv = pose_RCV_dic[pairs[i][0]]
indices_r4_0, values_r4_0, shape = _getSparsePose(pose_peaks_0_rcv, height, width, 18, radius=4, mode='Solid')
indices_r4_0, shape_0 = _oneDimSparsePose(indices_r4_0, shape)
indices_r8_0, values_r8_0, shape = _getSparsePose(pose_peaks_0_rcv, height, width, 18, radius=8, mode='Solid')
indices_r8_0, _ = _oneDimSparsePose(indices_r8_0, shape)
pose_mask_r4_0 = getPoseMask_fn(pose_peaks_0_rcv, height, width, radius=4, mode='Solid')
pose_mask_r8_0 = getPoseMask_fn(pose_peaks_0_rcv, height, width, radius=8, mode='Solid')
pose_mask_r10_0 = getPoseMask_fn(pose_peaks_0_rcv, height, width, radius=10, mode='Solid')
## Generate body region proposals
part_bbox_list_0, visibility_list_0 = get_part_bbox_fn(pose_peaks_0_rcv, img_path_0)
if FiltOutMissRegion and (0 in visibility_list_0):
return None
## Pose 1
pose_peaks_1_rcv = pose_RCV_dic[pairs[i][1]]
indices_r4_1, values_r4_1, shape = _getSparsePose(pose_peaks_1_rcv, height, width, 18, radius=4, mode='Solid')
indices_r4_1, shape_1 = _oneDimSparsePose(indices_r4_1, shape)
indices_r8_1, values_r8_1, shape = _getSparsePose(pose_peaks_1_rcv, height, width, 18, radius=8, mode='Solid')
indices_r8_1, _ = _oneDimSparsePose(indices_r8_1, shape)
pose_mask_r4_1 = getPoseMask_fn(pose_peaks_1_rcv, height, width, radius=4, mode='Solid')
pose_mask_r8_1 = getPoseMask_fn(pose_peaks_1_rcv, height, width, radius=8, mode='Solid')
pose_mask_r10_1 = getPoseMask_fn(pose_peaks_1_rcv, height, width, radius=10, mode='Solid')
## Generate body region proposals
part_bbox_list_1, visibility_list_1 = get_part_bbox_fn(pose_peaks_1_rcv, img_path_1)
if FiltOutMissRegion and (0 in visibility_list_1):
return None
###### Visualize ######
# _visualizePose(pose_mask_r4_0, scipy.misc.imread(img_path_0))
# _visualizePose(pose_mask_r8_0, scipy.misc.imread(img_path_0))
# _visualizePose(pose_mask_r9_0, scipy.misc.imread(img_path_0))
# _visualizePose(pose_mask_r10_0, scipy.misc.imread(img_path_0))
# dense = _sparse2dense(indices_r4_0, values_r4_0, shape)
# if i in [0,5]:
# _visualizePose(roi_mask_list_0[0], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[1], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[2], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[3], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[4], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[5], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[6], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[7], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[8], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[9], scipy.misc.imread(img_path_0))
# pdb.set_trace()
else:
return None
example = tf.train.Example(features=tf.train.Features(feature={
'image_name_0': dataset_utils.bytes_feature(pairs[i][0]),
'image_name_1': dataset_utils.bytes_feature(pairs[i][1]),
'image_raw_0': dataset_utils.bytes_feature(image_raw_0),
'image_raw_1': dataset_utils.bytes_feature(image_raw_1),
'label': dataset_utils.int64_feature(labels[i]),
'id_0': dataset_utils.int64_feature(id_map[id_0]),
'id_1': dataset_utils.int64_feature(id_map[id_1]),
'cam_0': dataset_utils.int64_feature(-1),
'cam_1': dataset_utils.int64_feature(-1),
'image_format': dataset_utils.bytes_feature(_IMG_PATTERN),
'image_height': dataset_utils.int64_feature(height),
'image_width': dataset_utils.int64_feature(width),
'real_data': dataset_utils.int64_feature(1),
'attrs_0': dataset_utils.int64_feature(attrs_0),
'attrs_1': dataset_utils.int64_feature(attrs_1),
'pose_peaks_0_rcv': dataset_utils.float_feature(pose_peaks_0_rcv.flatten().tolist()),
'pose_peaks_1_rcv': dataset_utils.float_feature(pose_peaks_1_rcv.flatten().tolist()),
'pose_mask_r4_0': dataset_utils.int64_feature(pose_mask_r4_0.astype(np.int64).flatten().tolist()),
'pose_mask_r4_1': dataset_utils.int64_feature(pose_mask_r4_1.astype(np.int64).flatten().tolist()),
'pose_mask_r8_0': dataset_utils.int64_feature(pose_mask_r8_0.astype(np.int64).flatten().tolist()),
'pose_mask_r8_1': dataset_utils.int64_feature(pose_mask_r8_1.astype(np.int64).flatten().tolist()),
'pose_mask_r10_0': dataset_utils.int64_feature(pose_mask_r10_0.astype(np.int64).flatten().tolist()),
'pose_mask_r10_1': dataset_utils.int64_feature(pose_mask_r10_1.astype(np.int64).flatten().tolist()),
'shape': dataset_utils.int64_feature(shape_0),
'indices_r4_0': dataset_utils.int64_feature(np.array(indices_r4_0).astype(np.int64).flatten().tolist()),
'values_r4_0': dataset_utils.float_feature(np.array(values_r4_0).astype(np.float).flatten().tolist()),
'indices_r4_1': dataset_utils.int64_feature(np.array(indices_r4_1).astype(np.int64).flatten().tolist()),
'values_r4_1': dataset_utils.float_feature(np.array(values_r4_1).astype(np.float).flatten().tolist()),
'indices_r8_0': dataset_utils.int64_feature(np.array(indices_r8_0).astype(np.int64).flatten().tolist()),
'values_r8_0': dataset_utils.float_feature(np.array(values_r8_0).astype(np.float).flatten().tolist()),
'indices_r8_1': dataset_utils.int64_feature(np.array(indices_r8_1).astype(np.int64).flatten().tolist()),
'values_r8_1': dataset_utils.float_feature(np.array(values_r8_1).astype(np.float).flatten().tolist()),
# 'pose_subs_0': dataset_utils.float_feature(pose_subs_0),
# 'pose_subs_1': dataset_utils.float_feature(pose_subs_1),
'part_bbox_0': dataset_utils.int64_feature(np.array(part_bbox_list_0).astype(np.int64).flatten().tolist()),
'part_bbox_1': dataset_utils.int64_feature(np.array(part_bbox_list_1).astype(np.int64).flatten().tolist()),
'part_vis_0': dataset_utils.int64_feature(np.array(visibility_list_0).astype(np.int64).flatten().tolist()),
'part_vis_1': dataset_utils.int64_feature(np.array(visibility_list_1).astype(np.int64).flatten().tolist()),
}))
return example
def _format_data_other(sess, image_reader, folder_path, folder_path_other, pairs, i, labels, id_map, attr_mat, id_map_attr,
pose_RCV_dic, pose_RCV_dic_other, pose_order='COCO', FiltOutMissRegion=False):
# Read the filename:
img_path_0 = os.path.join(folder_path, pairs[i][0])
img_path_1 = os.path.join(folder_path_other, pairs[i][1])
id_0 = pairs[i][0].split('_')[0]
id_1 = pairs[i][1].split('_')[0]
image_raw_0 = tf.gfile.FastGFile(img_path_0, 'r').read()
image_raw_1 = tf.gfile.FastGFile(img_path_1, 'r').read()
height, width = image_reader.read_image_dims(sess, image_raw_0)
attrs_0 = []
attrs_1 = []
if attr_mat is not None:
idx_0 = id_map_attr[id_0]
idx_1 = id_map_attr[id_1]
for name in attr_mat.dtype.names:
attrs_0.append(attr_mat[(name)][0][0][0][idx_0])
attrs_1.append(attr_mat[(name)][0][0][0][idx_1])
########################## Pose RCV [Row Column Visible] ##########################
## Pose RCV
if 'COCO'==pose_order:
pose_peaks_0_rcv = np.zeros([18,3])
pose_peaks_1_rcv = np.zeros([18,3])
getPoseMask_fn = _getPoseMask_COCO
get_part_bbox_fn = _get_part_bbox_COCO
else:
raise 'Non-valid pose keypoint order! Use \'COCO\''
#
pose_subs_0 = []
pose_subs_1 = []
# pdb.set_trace()
if (pose_RCV_dic is not None) and (pairs[i][0] in pose_RCV_dic) and (pairs[i][1] in pose_RCV_dic_other):
## Pose 0
pose_peaks_0_rcv = pose_RCV_dic[pairs[i][0]]
indices_r4_0, values_r4_0, shape = _getSparsePose(pose_peaks_0_rcv, height, width, 18, radius=4, mode='Solid')
indices_r4_0, shape_0 = _oneDimSparsePose(indices_r4_0, shape)
indices_r8_0, values_r8_0, shape = _getSparsePose(pose_peaks_0_rcv, height, width, 18, radius=8, mode='Solid')
indices_r8_0, _ = _oneDimSparsePose(indices_r8_0, shape)
pose_mask_r4_0 = getPoseMask_fn(pose_peaks_0_rcv, height, width, radius=4, mode='Solid')
pose_mask_r8_0 = getPoseMask_fn(pose_peaks_0_rcv, height, width, radius=8, mode='Solid')
pose_mask_r10_0 = getPoseMask_fn(pose_peaks_0_rcv, height, width, radius=10, mode='Solid')
## Generate body region proposals
part_bbox_list_0, visibility_list_0 = get_part_bbox_fn(pose_peaks_0_rcv, img_path_0)
if FiltOutMissRegion and (0 in visibility_list_0):
return None
## Pose 1
pose_peaks_1_rcv = pose_RCV_dic_other[pairs[i][1]]
indices_r4_1, values_r4_1, shape = _getSparsePose(pose_peaks_1_rcv, height, width, 18, radius=4, mode='Solid')
indices_r4_1, shape_1 = _oneDimSparsePose(indices_r4_1, shape)
indices_r8_1, values_r8_1, shape = _getSparsePose(pose_peaks_1_rcv, height, width, 18, radius=8, mode='Solid')
indices_r8_1, _ = _oneDimSparsePose(indices_r8_1, shape)
pose_mask_r4_1 = getPoseMask_fn(pose_peaks_1_rcv, height, width, radius=4, mode='Solid')
pose_mask_r8_1 = getPoseMask_fn(pose_peaks_1_rcv, height, width, radius=8, mode='Solid')
pose_mask_r10_1 = getPoseMask_fn(pose_peaks_1_rcv, height, width, radius=10, mode='Solid')
## Generate body region proposals
part_bbox_list_1, visibility_list_1 = get_part_bbox_fn(pose_peaks_1_rcv, img_path_1)
if FiltOutMissRegion and (0 in visibility_list_1):
return None
###### Visualize ######
# _visualizePose(pose_mask_r4_0, scipy.misc.imread(img_path_0))
# _visualizePose(pose_mask_r8_0, scipy.misc.imread(img_path_0))
# _visualizePose(pose_mask_r9_0, scipy.misc.imread(img_path_0))
# _visualizePose(pose_mask_r10_0, scipy.misc.imread(img_path_0))
# dense = _sparse2dense(indices_r4_0, values_r4_0, shape)
# if i in [0,5]:
# _visualizePose(roi_mask_list_0[0], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[1], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[2], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[3], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[4], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[5], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[6], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[7], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[8], scipy.misc.imread(img_path_0))
# _visualizePose(roi_mask_list_0[9], scipy.misc.imread(img_path_0))
# pdb.set_trace()
else:
return None
example = tf.train.Example(features=tf.train.Features(feature={
'image_name_0': dataset_utils.bytes_feature(pairs[i][0]),
'image_name_1': dataset_utils.bytes_feature(pairs[i][1]),
'image_raw_0': dataset_utils.bytes_feature(image_raw_0),
'image_raw_1': dataset_utils.bytes_feature(image_raw_1),
'label': dataset_utils.int64_feature(labels[i]),
'id_0': dataset_utils.int64_feature(id_map[id_0]),
'id_1': dataset_utils.int64_feature(id_map[id_1]),
'cam_0': dataset_utils.int64_feature(-1),
'cam_1': dataset_utils.int64_feature(-1),
'image_format': dataset_utils.bytes_feature(_IMG_PATTERN),
'image_height': dataset_utils.int64_feature(height),
'image_width': dataset_utils.int64_feature(width),
'real_data': dataset_utils.int64_feature(1),
'attrs_0': dataset_utils.int64_feature(attrs_0),
'attrs_1': dataset_utils.int64_feature(attrs_1),
'pose_peaks_0_rcv': dataset_utils.float_feature(pose_peaks_0_rcv.flatten().tolist()),
'pose_peaks_1_rcv': dataset_utils.float_feature(pose_peaks_1_rcv.flatten().tolist()),
'pose_mask_r4_0': dataset_utils.int64_feature(pose_mask_r4_0.astype(np.int64).flatten().tolist()),
'pose_mask_r4_1': dataset_utils.int64_feature(pose_mask_r4_1.astype(np.int64).flatten().tolist()),
'pose_mask_r8_0': dataset_utils.int64_feature(pose_mask_r8_0.astype(np.int64).flatten().tolist()),
'pose_mask_r8_1': dataset_utils.int64_feature(pose_mask_r8_1.astype(np.int64).flatten().tolist()),
'pose_mask_r10_0': dataset_utils.int64_feature(pose_mask_r10_0.astype(np.int64).flatten().tolist()),
'pose_mask_r10_1': dataset_utils.int64_feature(pose_mask_r10_1.astype(np.int64).flatten().tolist()),
'shape': dataset_utils.int64_feature(shape_0),
'indices_r4_0': dataset_utils.int64_feature(np.array(indices_r4_0).astype(np.int64).flatten().tolist()),
'values_r4_0': dataset_utils.float_feature(np.array(values_r4_0).astype(np.float).flatten().tolist()),
'indices_r4_1': dataset_utils.int64_feature(np.array(indices_r4_1).astype(np.int64).flatten().tolist()),
'values_r4_1': dataset_utils.float_feature(np.array(values_r4_1).astype(np.float).flatten().tolist()),
'indices_r8_0': dataset_utils.int64_feature(np.array(indices_r8_0).astype(np.int64).flatten().tolist()),
'values_r8_0': dataset_utils.float_feature(np.array(values_r8_0).astype(np.float).flatten().tolist()),
'indices_r8_1': dataset_utils.int64_feature(np.array(indices_r8_1).astype(np.int64).flatten().tolist()),
'values_r8_1': dataset_utils.float_feature(np.array(values_r8_1).astype(np.float).flatten().tolist()),
# 'pose_subs_0': dataset_utils.float_feature(pose_subs_0),
# 'pose_subs_1': dataset_utils.float_feature(pose_subs_1),
'part_bbox_0': dataset_utils.int64_feature(np.array(part_bbox_list_0).astype(np.int64).flatten().tolist()),
'part_bbox_1': dataset_utils.int64_feature(np.array(part_bbox_list_1).astype(np.int64).flatten().tolist()),
'part_vis_0': dataset_utils.int64_feature(np.array(visibility_list_0).astype(np.int64).flatten().tolist()),
'part_vis_1': dataset_utils.int64_feature(np.array(visibility_list_1).astype(np.int64).flatten().tolist()),
}))
return example
def _convert_dataset_one_pair_rec(out_dir, split_name, pairs, labels, dataset_dir,
pose_RCV_path=None, tf_record_pair_num=np.inf, USE_FLIP=False):
"""Converts the given pairs to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
pairs: A list of image name pairs.
labels: label list to indicate positive(1) or negative(0)
dataset_dir: The directory where the converted datasets are stored.
"""
# num_shards = _NUM_SHARDS
num_shards = 1
assert split_name in ['train', 'test', 'test_seq']
num_per_shard = int(math.ceil(len(pairs) / float(num_shards)))
folder_path = _get_folder_path(dataset_dir, split_name)
# Load attr mat file
attr_mat = None
id_map_attr = None
# Load pose pickle file
pose_RCV_dic = None
with open(pose_RCV_path, 'r') as f:
pose_RCV_dic = pickle.load(f)
if USE_FLIP:
## Create pair_flip, label_flip
combined = list(zip(pairs, labels))
random.shuffle(combined)
pairs_flip, labels_flip = zip(*combined)
## Create folder_path_flip
folder_path_flip = folder_path + '_flip'
if not os.path.exists(folder_path_flip):
os.makedirs(folder_path_flip)
for filename in os.listdir(folder_path):
img = scipy.misc.imread(os.path.join(folder_path, filename))
img_flip = img[:,::-1,:]
scipy.misc.imsave(os.path.join(folder_path_flip, filename), img_flip)
## Create pose_RCV_dic_flip
height, width, _ = scipy.misc.imread(os.path.join(folder_path, pairs[0][0])).shape
pose_RCV_dic_flip = {}
for key in pose_RCV_dic:
RCV = pose_RCV_dic[key]
RCV_flip = RCV[:]
for k in range(RCV_flip.shape[0]):
r,c,v = RCV[k]
if v:
RCV_flip[k,1] = width - c
pose_RCV_dic_flip[key] = RCV_flip
# Transform ids to [0, ..., num_of_ids]
id_cnt = 0
id_map = {}
for i in range(0, len(pairs)):
id_0 = pairs[i][0].split('_')[0]
id_1 = pairs[i][1].split('_')[0]
if not id_map.has_key(id_0):
id_map[id_0] = id_cnt
id_cnt += 1
if not id_map.has_key(id_1):
id_map[id_1] = id_cnt
id_cnt += 1
print('id_map length:%d' % len(id_map))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(num_shards):
output_filename = _get_dataset_filename(
dataset_dir, out_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
cnt = 0
if USE_FLIP:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(pairs_flip))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(pairs_flip), shard_id))
sys.stdout.flush()
example = _format_data(sess, image_reader, folder_path_flip, pairs_flip, i, labels_flip, id_map, attr_mat, id_map_attr, pose_RCV_dic_flip)
if None==example:
continue
tfrecord_writer.write(example.SerializeToString())
cnt += 1
if cnt==tf_record_pair_num:
break
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(pairs))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(pairs), shard_id))
sys.stdout.flush()
example = _format_data(sess, image_reader, folder_path, pairs, i, labels, id_map, attr_mat, id_map_attr, pose_RCV_dic)
if None==example:
continue
tfrecord_writer.write(example.SerializeToString())
cnt += 1
if cnt==tf_record_pair_num:
break
sys.stdout.write('\n')
sys.stdout.flush()
print('cnt:',cnt)
with open(os.path.join(out_dir,'tf_record_pair_num.txt'),'w') as f:
f.write('cnt:%d' % cnt)
def _convert_dataset_one_pair_rec_other(out_dir, split_name, pairs, labels, dataset_dir, dataset_dir_other,
pose_RCV_path=None, pose_RCV_path_other=None, tf_record_pair_num=np.inf):
"""Converts the given pairs to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
pairs: A list of image name pairs.
labels: label list to indicate positive(1) or negative(0)
dataset_dir: The directory where the converted datasets are stored.
"""
# num_shards = _NUM_SHARDS
num_shards = 1
assert split_name in ['test_seq_other']
# pdb.set_trace()
num_per_shard = int(math.ceil(len(pairs) / float(num_shards)))
folder_path = _get_folder_path(dataset_dir, 'test_seq')
folder_path_other = _get_folder_path(dataset_dir_other, 'test_seq_other')
# Load attr mat file
attr_mat = None
id_map_attr = None
# Load pose pickle file
pose_RCV_dic = None
pose_RCV_dic_other = None
with open(pose_RCV_path, 'r') as f:
pose_RCV_dic = pickle.load(f)
with open(pose_RCV_path_other, 'r') as f:
pose_RCV_dic_other = pickle.load(f)
# Transform ids to [0, ..., num_of_ids]
id_cnt = 0
id_map = {}
for i in range(0, len(pairs)):
id_0 = pairs[i][0].split('_')[0]
id_1 = pairs[i][1].split('_')[0]
if not id_map.has_key(id_0):
id_map[id_0] = id_cnt
id_cnt += 1
if not id_map.has_key(id_1):
id_map[id_1] = id_cnt
id_cnt += 1
print('id_map length:%d' % len(id_map))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(num_shards):
output_filename = _get_dataset_filename(
dataset_dir, out_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
cnt = 0
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(pairs))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(pairs), shard_id))
sys.stdout.flush()
example = _format_data_other(sess, image_reader, folder_path, folder_path_other, pairs, i, labels, id_map, \
attr_mat, id_map_attr, pose_RCV_dic, pose_RCV_dic_other)
if None==example:
continue
tfrecord_writer.write(example.SerializeToString())
cnt += 1
if cnt==tf_record_pair_num:
break
sys.stdout.write('\n')
sys.stdout.flush()
print('cnt:',cnt)
with open(os.path.join(out_dir,'tf_record_pair_num.txt'),'w') as f:
f.write('cnt:%d' % cnt)
def run_one_pair_rec(dataset_dir, out_dir, split_name, dataset_dir_other=None):
# if not tf.gfile.Exists(dataset_dir):
# tf.gfile.MakeDirs(dataset_dir)
if split_name.lower()=='train':
pose_RCV_path = os.path.join(dataset_dir,'PoseRCV', split_name+'.pickle')
p_pairs, n_pairs = _get_train_all_pn_pairs(dataset_dir, out_dir,
split_name=split_name,
augment_ratio=1,
mode='each_img_once')
p_labels = [1]*len(p_pairs)
n_labels = [0]*len(n_pairs)
pairs = p_pairs
labels = p_labels
combined = list(zip(pairs, labels))
random.shuffle(combined)
pairs[:], labels[:] = zip(*combined)
_convert_dataset_one_pair_rec(out_dir, split_name, pairs, labels, dataset_dir,
pose_RCV_path=pose_RCV_path, USE_FLIP=False)
print('\nTrain convert Finished !')
elif split_name.lower()=='test':
pose_RCV_path = os.path.join(dataset_dir,'PoseRCV', split_name+'.pickle')
p_pairs, n_pairs = _get_train_all_pn_pairs(dataset_dir, out_dir,
split_name=split_name,
augment_ratio=1,
mode='same_diff_cam')
p_labels = [1]*len(p_pairs)
n_labels = [0]*len(n_pairs)
pairs = p_pairs
labels = p_labels
_convert_dataset_one_pair_rec(out_dir, split_name, pairs, labels, dataset_dir,
pose_RCV_path=pose_RCV_path, USE_FLIP=False)
print('\nTest convert Finished !')
elif split_name.lower()=='test_seq':
pose_RCV_path = os.path.join(dataset_dir,'PoseRCV', 'test.pickle')
p_pairs, n_pairs = _get_train_all_pn_pairs(dataset_dir, out_dir,
split_name=split_name,
augment_ratio=1,
mode='one_to_all')
p_labels = [1]*len(p_pairs)
n_labels = [0]*len(n_pairs)
pairs = p_pairs
labels = p_labels
_convert_dataset_one_pair_rec(out_dir, split_name, pairs, labels, dataset_dir,
pose_RCV_path=pose_RCV_path, USE_FLIP=False)
print('\nTest convert Finished !')
elif split_name.lower()=='test_seq_other':
assert dataset_dir_other is not None
pose_RCV_path = os.path.join(dataset_dir,'PoseRCV', 'test.pickle')
pose_RCV_path_other = os.path.join(dataset_dir_other,'PoseRCV', 'test.pickle')
p_pairs, n_pairs = _get_train_all_pn_pairs_other(dataset_dir, dataset_dir_other, out_dir,
split_name=split_name,
augment_ratio=1,
mode='one_to_all')
p_labels = [1]*len(p_pairs)
n_labels = [0]*len(n_pairs)
pairs = p_pairs
labels = p_labels
_convert_dataset_one_pair_rec_other(out_dir, split_name, pairs, labels, dataset_dir, dataset_dir_other,
pose_RCV_path=pose_RCV_path, pose_RCV_path_other=pose_RCV_path_other)
print('\nTest convert Finished !')
# if split_name.lower()=='test':
# # ================ Prepare test set ================
# pose_RCV_path = os.path.join(dataset_dir,'PoseRCV','pose_RCV_dic_DeepFashion.p')
# pose_sub_path = os.path.join(dataset_dir,'PoseRCV','subsets_dic_DeepFashion.p')
# p_pairs, n_pairs = _get_train_all_pn_pairs(dataset_dir, out_dir,
# split_name=split_name,
# augment_ratio=1,
# mode='same_diff_cam')
# p_labels = [1]*len(p_pairs)
# n_labels = [0]*len(n_pairs)
# pairs = p_pairs
# labels = p_labels
# combined = list(zip(pairs, labels))
# random.shuffle(combined)
# pairs[:], labels[:] = zip(*combined)
# ## Test will not use flip
# split_name_flip = None
# pairs_flip = None
# labels_flip = None
# _convert_dataset_one_pair_rec_withFlip(out_dir, split_name,split_name_flip, pairs, pairs_flip, labels, labels_flip,
# dataset_dir, pose_RCV_path=pose_RCV_path, pose_sub_path=pose_sub_path)
# print('\nTest samples convert Finished !')
if __name__ == '__main__':
dataset_dir = sys.argv[1]
split_name = sys.argv[2] ## 'train', 'test', 'test_seq', 'test_other_seq'
out_dir = os.path.join(dataset_dir, split_name)
dataset_dir_other = sys.argv[3]
if not os.path.exists(out_dir):
os.mkdir(out_dir)
run_one_pair_rec(dataset_dir, out_dir, split_name, dataset_dir_other)
| 27,323 |
474 | package org.javacord.core.entity.message;
import com.fasterxml.jackson.databind.JsonNode;
import org.javacord.api.entity.DiscordEntity;
import org.javacord.api.entity.channel.TextChannel;
import org.javacord.api.entity.message.Message;
import org.javacord.api.entity.message.MessageSet;
import org.javacord.core.DiscordApiImpl;
import org.javacord.core.util.rest.RestEndpoint;
import org.javacord.core.util.rest.RestMethod;
import org.javacord.core.util.rest.RestRequest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.NavigableSet;
import java.util.Optional;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
/**
* The implementation of {@link MessageSet}.
*/
public class MessageSetImpl implements MessageSet {
/**
* A read-only navigable set with all messages to let the JDK do the dirty work.
*/
private final NavigableSet<Message> messages;
/**
* Creates a new message set.
*
* @param messages The messages to be contained in this set.
*/
public MessageSetImpl(NavigableSet<Message> messages) {
this.messages = Collections.unmodifiableNavigableSet(messages);
}
/**
* Creates a new message set.
*
* @param messages The messages to be contained in this set.
*/
public MessageSetImpl(Collection<Message> messages) {
this(new TreeSet<>(messages));
}
/**
* Creates a new message set.
*
* @param messages The messages to be contained in this set.
*/
public MessageSetImpl(Message... messages) {
this(Arrays.asList(messages));
}
/**
* Gets up to a given amount of messages in the given channel from the newer end.
*
* @param channel The channel of the messages.
* @param limit The limit of messages to get.
* @return The messages.
* @see #getMessagesAsStream(TextChannel)
*/
public static CompletableFuture<MessageSet> getMessages(TextChannel channel, int limit) {
return getMessages(channel, limit, -1, -1);
}
/**
* Gets up to a given amount of messages in the given channel.
*
* @param channel The channel of the messages.
* @param limit The limit of messages to get.
* @param before Get messages before the message with this id.
* @param after Get messages after the message with this id.
*
* @return The messages.
* @see #getMessagesAsStream(TextChannel, long, long)
*/
private static CompletableFuture<MessageSet> getMessages(TextChannel channel, int limit, long before, long after) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
// get the initial batch with the first <= 100 messages
int initialBatchSize = ((limit % 100) == 0) ? 100 : limit % 100;
MessageSet initialMessages = requestAsMessages(channel, initialBatchSize, before, after);
// limit <= 100 => initial request got all messages
// initialMessages is empty => READ_MESSAGE_HISTORY permission is denied or no more messages available
if ((limit <= 100) || initialMessages.isEmpty()) {
future.complete(initialMessages);
return;
}
// calculate the amount and direction of remaining message to get
// this will be a multiple of 100 and at least 100
int remainingMessages = limit - initialBatchSize;
int steps = remainingMessages / 100;
// "before" is set or both are not set
boolean older = (before != -1) || (after == -1);
boolean newer = after != -1;
// get remaining messages
List<MessageSet> messageSets = new ArrayList<>();
MessageSet lastMessages = initialMessages;
messageSets.add(lastMessages);
for (int step = 0; step < steps; ++step) {
lastMessages = requestAsMessages(channel,
100,
lastMessages.getOldestMessage()
.filter(message -> older)
.map(DiscordEntity::getId)
.orElse(-1L),
lastMessages.getNewestMessage()
.filter(message -> newer)
.map(DiscordEntity::getId)
.orElse(-1L));
// no more messages available
if (lastMessages.isEmpty()) {
break;
}
messageSets.add(lastMessages);
}
// combine the message sets
future.complete(new MessageSetImpl(messageSets.stream()
.flatMap(Collection::stream)
.collect(Collectors.toList())));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets messages in the given channel from the newer end until one that meets the given condition is found.
* If no message matches the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The abort condition for when to stop retrieving messages.
* @return The messages.
* @see #getMessagesAsStream(TextChannel)
*/
public static CompletableFuture<MessageSet> getMessagesUntil(TextChannel channel, Predicate<Message> condition) {
return getMessagesUntil(channel, condition, -1, -1);
}
/**
* Gets messages in the given channel until one that meets the given condition is found.
* If no message matches the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The abort condition for when to stop retrieving messages.
* @param before Get messages before the message with this id.
* @param after Get messages after the message with this id.
*
* @return The messages.
*/
private static CompletableFuture<MessageSet> getMessagesUntil(
TextChannel channel, Predicate<Message> condition, long before, long after) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
List<Message> messages = new ArrayList<>();
Optional<Message> untilMessage =
getMessagesAsStream(channel, before, after).peek(messages::add).filter(condition).findFirst();
future.complete(new MessageSetImpl(untilMessage
.map(message -> messages)
.orElse(Collections.emptyList())));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets messages in the given channel from the newer end while they meet the given condition.
* If the first message does not match the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The condition that has to be met.
* @return The messages.
* @see #getMessagesAsStream(TextChannel)
*/
public static CompletableFuture<MessageSet> getMessagesWhile(TextChannel channel, Predicate<Message> condition) {
return getMessagesWhile(channel, condition, -1, -1);
}
/**
* Gets messages in the given channel while they meet the given condition.
* If the first message does not match the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The condition that has to be met.
* @param before Get messages before the message with this id.
* @param after Get messages after the message with this id.
*
* @return The messages.
*/
private static CompletableFuture<MessageSet> getMessagesWhile(
TextChannel channel, Predicate<Message> condition, long before, long after) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
List<Message> messages = new ArrayList<>();
Optional<Message> untilMessage =
getMessagesAsStream(channel, before, after)
.peek(messages::add)
.filter(condition.negate())
.findFirst();
untilMessage.ifPresent(messages::remove);
future.complete(new MessageSetImpl(messages));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets a stream of messages in the given channel sorted from newest to oldest.
*
* <p>The messages are retrieved in batches synchronously from Discord,
* so consider not using this method from a listener directly.
*
* @param channel The channel of the messages.
* @return The stream.
* @see #getMessages(TextChannel, int)
*/
public static Stream<Message> getMessagesAsStream(TextChannel channel) {
return getMessagesAsStream(channel, -1, -1);
}
/**
* Gets a stream of messages in the given channel sorted from newest to oldest.
*
* <p>The messages are retrieved in batches synchronously from Discord,
* so consider not using this method from a listener directly.
*
* @param channel The channel of the messages.
* @param before Get messages before the message with this id.
* @param after Get messages after the message with this id.
*
* @return The stream.
* @see #getMessages(TextChannel, int, long, long)
*/
private static Stream<Message> getMessagesAsStream(TextChannel channel, long before, long after) {
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(new Iterator<Message>() {
private final DiscordApiImpl api = ((DiscordApiImpl) channel.getApi());
// before was set or both were not set
private final boolean older = (before != -1) || (after == -1);
private final boolean newer = after != -1;
private long referenceMessageId = older ? before : after;
private final List<JsonNode> messageJsons = Collections.synchronizedList(new ArrayList<>());
private void ensureMessagesAvailable() {
if (messageJsons.isEmpty()) {
synchronized (messageJsons) {
if (messageJsons.isEmpty()) {
messageJsons.addAll(requestAsSortedJsonNodes(
channel,
100,
older ? referenceMessageId : -1,
newer ? referenceMessageId : -1,
older
));
if (!messageJsons.isEmpty()) {
referenceMessageId = messageJsons.get(messageJsons.size() - 1).get("id").asLong();
}
}
}
}
}
@Override
public boolean hasNext() {
ensureMessagesAvailable();
return !messageJsons.isEmpty();
}
@Override
public Message next() {
ensureMessagesAvailable();
return api.getOrCreateMessage(channel, messageJsons.remove(0));
}
}, Spliterator.ORDERED | Spliterator.DISTINCT | Spliterator.NONNULL | Spliterator.CONCURRENT), false);
}
/**
* Gets up to a given amount of messages in the given channel before a given message in any channel.
*
* @param channel The channel of the messages.
* @param limit The limit of messages to get.
* @param before Get messages before the message with this id.
* @return The messages.
* @see #getMessagesBeforeAsStream(TextChannel, long)
*/
public static CompletableFuture<MessageSet> getMessagesBefore(TextChannel channel, int limit, long before) {
return getMessages(channel, limit, before, -1);
}
/**
* Gets messages in the given channel before a given message in any channel until one that meets the given
* condition is found.
* If no message matches the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The abort condition for when to stop retrieving messages.
* @param before Get messages before the message with this id.
* @return The messages.
* @see #getMessagesBeforeAsStream(TextChannel, long)
*/
public static CompletableFuture<MessageSet> getMessagesBeforeUntil(
TextChannel channel, Predicate<Message> condition, long before) {
return getMessagesUntil(channel, condition, before, -1);
}
/**
* Gets messages in the given channel before a given message in any channel while they meet the given condition.
* If the first message does not match the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The condition that has to be met.
* @param before Get messages before the message with this id.
* @return The messages.
* @see #getMessagesBeforeAsStream(TextChannel, long)
*/
public static CompletableFuture<MessageSet> getMessagesBeforeWhile(
TextChannel channel, Predicate<Message> condition, long before) {
return getMessagesWhile(channel, condition, before, -1);
}
/**
* Gets a stream of messages in the given channel before a given message in any channel sorted from newest to
* oldest.
*
* <p>The messages are retrieved in batches synchronously from Discord,
* so consider not using this method from a listener directly.
*
* @param channel The channel of the messages.
* @param before Get messages before the message with this id.
* @return The stream.
* @see #getMessagesBefore(TextChannel, int, long)
*/
public static Stream<Message> getMessagesBeforeAsStream(TextChannel channel, long before) {
return getMessagesAsStream(channel, before, -1);
}
/**
* Gets up to a given amount of messages in the given channel after a given message in any channel.
*
* @param channel The channel of the messages.
* @param limit The limit of messages to get.
* @param after Get messages after the message with this id.
* @return The messages.
* @see #getMessagesAfterAsStream(TextChannel, long)
*/
public static CompletableFuture<MessageSet> getMessagesAfter(TextChannel channel, int limit, long after) {
return getMessages(channel, limit, -1, after);
}
/**
* Gets messages in the given channel after a given message in any channel until one that meets the given condition
* is found.
* If no message matches the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The abort condition for when to stop retrieving messages.
* @param after Get messages after the message with this id.
* @return The messages.
* @see #getMessagesAfterAsStream(TextChannel, long)
*/
public static CompletableFuture<MessageSet> getMessagesAfterUntil(
TextChannel channel, Predicate<Message> condition, long after) {
return getMessagesUntil(channel, condition, -1, after);
}
/**
* Gets messages in the given channel after a given message in any channel while they meet the given condition.
* If the first message does not match the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The condition that has to be met.
* @param after Get messages after the message with this id.
* @return The messages.
* @see #getMessagesAfterAsStream(TextChannel, long)
*/
public static CompletableFuture<MessageSet> getMessagesAfterWhile(
TextChannel channel, Predicate<Message> condition, long after) {
return getMessagesWhile(channel, condition, -1, after);
}
/**
* Gets a stream of messages in the given channel after a given message in any channel sorted from oldest to newest.
*
* <p>The messages are retrieved in batches synchronously from Discord,
* so consider not using this method from a listener directly.
*
* @param channel The channel of the messages.
* @param after Get messages after the message with this id.
* @return The stream.
* @see #getMessagesAfter(TextChannel, int, long)
*/
public static Stream<Message> getMessagesAfterAsStream(TextChannel channel, long after) {
return getMessagesAsStream(channel, -1, after);
}
/**
* Gets up to a given amount of messages in the given channel around a given message in any channel.
* The given message will be part of the result in addition to the messages around if it was sent in the given
* channel and does not count towards the limit.
* Half of the messages will be older than the given message and half of the messages will be newer.
* If there aren't enough older or newer messages, the actual amount of messages will be less than the given limit.
* It's also not guaranteed to be perfectly balanced.
*
* @param channel The channel of the messages.
* @param limit The limit of messages to get.
* @param around Get messages around the message with this id.
* @return The messages.
* @see #getMessagesAroundAsStream(TextChannel, long)
*/
public static CompletableFuture<MessageSet> getMessagesAround(TextChannel channel, int limit, long around) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
// calculate the half limit.
int halfLimit = limit / 2;
// get the newer half
MessageSet newerMessages = getMessagesAfter(channel, halfLimit, around).join();
// get the older half + around message
MessageSet olderMessages = getMessagesBefore(channel, halfLimit + 1, around + 1).join();
// remove the oldest message if the around message is not part of the result while there is a result,
// for example because the around message was from a different channel
if (olderMessages.getNewestMessage()
.map(DiscordEntity::getId)
.map(id -> id != around)
.orElse(false)) {
olderMessages = olderMessages.tailSet(
olderMessages.getOldestMessage().orElseThrow(AssertionError::new),
false);
}
// combine the messages into one collection
Collection<Message> messages = Stream
.of(olderMessages, newerMessages)
.flatMap(Collection::stream)
.collect(Collectors.toList());
// we are done
future.complete(new MessageSetImpl(messages));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets messages in the given channel around a given message in any channel until one that meets the given
* condition is found. If no message matches the condition, an empty set is returned.
* The given message will be part of the result in addition to the messages around if it was sent in the given
* channel and is matched against the condition and will abort retrieval.
* Half of the messages will be older than the given message and half of the messages will be newer.
* If there aren't enough older or newer messages, the halves will not be same-sized.
* It's also not guaranteed to be perfectly balanced.
*
* @param channel The channel of the messages.
* @param condition The abort condition for when to stop retrieving messages.
* @param around Get messages around the message with this id.
*
* @return The messages.
*/
public static CompletableFuture<MessageSet> getMessagesAroundUntil(
TextChannel channel, Predicate<Message> condition, long around) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
List<Message> messages = new ArrayList<>();
Optional<Message> untilMessage =
getMessagesAroundAsStream(channel, around).peek(messages::add).filter(condition).findFirst();
future.complete(new MessageSetImpl(untilMessage
.map(message -> messages)
.orElse(Collections.emptyList())));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets messages in the given channel around a given message in any channel while they meet the given condition.
* If the first message does not match the condition, an empty set is returned.
* The given message will be part of the result in addition to the messages around if it was sent in the given
* channel and is matched against the condition and will abort retrieval.
* Half of the messages will be older than the given message and half of the messages will be newer.
* If there aren't enough older or newer messages, the halves will not be same-sized.
* It's also not guaranteed to be perfectly balanced.
*
* @param channel The channel of the messages.
* @param condition The condition that has to be met.
* @param around Get messages around the message with this id.
*
* @return The messages.
*/
public static CompletableFuture<MessageSet> getMessagesAroundWhile(
TextChannel channel, Predicate<Message> condition, long around) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
List<Message> messages = new ArrayList<>();
Optional<Message> untilMessage =
getMessagesAroundAsStream(channel, around)
.peek(messages::add)
.filter(condition.negate())
.findFirst();
untilMessage.ifPresent(messages::remove);
future.complete(new MessageSetImpl(messages));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets a stream of messages in the given channel around a given message in any channel.
* The first message in the stream will be the given message if it was sent in the given channel.
* After that you will always get an older message and a newer message alternating as long as on both sides
* messages are available. If only on one side further messages are available, only those are delivered further on.
* It's not guaranteed to be perfectly balanced.
*
* <p>The messages are retrieved in batches synchronously from Discord,
* so consider not using this method from a listener directly.
*
* @param channel The channel of the messages.
* @param around Get messages around the message with this id.
* @return The stream.
* @see #getMessagesAround(TextChannel, int, long)
*/
public static Stream<Message> getMessagesAroundAsStream(TextChannel channel, long around) {
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(new Iterator<Message>() {
private final DiscordApiImpl api = ((DiscordApiImpl) channel.getApi());
private final AtomicBoolean firstBatch = new AtomicBoolean(true);
private final AtomicBoolean nextIsOlder = new AtomicBoolean();
private long olderReferenceMessageId = around;
private long newerReferenceMessageId = around - 1;
private final List<JsonNode> olderMessageJsons = Collections.synchronizedList(new ArrayList<>());
private final List<JsonNode> newerMessageJsons = Collections.synchronizedList(new ArrayList<>());
private final AtomicBoolean hasMoreOlderMessages = new AtomicBoolean(true);
private final AtomicBoolean hasMoreNewerMessages = new AtomicBoolean(true);
private void ensureMessagesAvailable() {
if (olderMessageJsons.isEmpty() && hasMoreOlderMessages.get()) {
synchronized (olderMessageJsons) {
if (olderMessageJsons.isEmpty() && hasMoreOlderMessages.get()) {
olderMessageJsons.addAll(requestAsSortedJsonNodes(
channel,
100,
olderReferenceMessageId,
-1,
true
));
if (olderMessageJsons.isEmpty()) {
hasMoreOlderMessages.set(false);
} else {
olderReferenceMessageId =
olderMessageJsons.get(olderMessageJsons.size() - 1).get("id").asLong();
}
}
}
}
if (newerMessageJsons.isEmpty() && hasMoreNewerMessages.get()) {
synchronized (newerMessageJsons) {
if (newerMessageJsons.isEmpty() && hasMoreNewerMessages.get()) {
newerMessageJsons.addAll(requestAsSortedJsonNodes(
channel,
100,
-1,
newerReferenceMessageId,
false
));
if (newerMessageJsons.isEmpty()) {
hasMoreNewerMessages.set(false);
} else {
newerReferenceMessageId =
newerMessageJsons.get(newerMessageJsons.size() - 1).get("id").asLong();
if (firstBatch.getAndSet(false)) {
nextIsOlder.set(newerMessageJsons.get(0).get("id").asLong() != around);
}
}
}
}
}
}
@Override
public boolean hasNext() {
ensureMessagesAvailable();
return !(olderMessageJsons.isEmpty() && newerMessageJsons.isEmpty());
}
@Override
public Message next() {
ensureMessagesAvailable();
boolean nextIsOlder = this.nextIsOlder.get();
this.nextIsOlder.set(!nextIsOlder);
JsonNode messageJson =
((nextIsOlder && !olderMessageJsons.isEmpty()) || newerMessageJsons.isEmpty())
? olderMessageJsons.remove(0)
: newerMessageJsons.remove(0);
return api.getOrCreateMessage(channel, messageJson);
}
}, Spliterator.ORDERED | Spliterator.DISTINCT | Spliterator.NONNULL | Spliterator.CONCURRENT), false);
}
/**
* Gets all messages in the given channel between the first given message in any channel and the second given
* message in any channel, excluding the boundaries.
* Gets up to a given amount of messages in the given channel before a given message in any channel.
*
* @param channel The channel of the messages.
* @param from The id of the start boundary messages.
* @param to The id of the other boundary messages.
* @return The messages.
* @see #getMessagesBetweenAsStream(TextChannel, long, long)
*/
public static CompletableFuture<MessageSet> getMessagesBetween(TextChannel channel, long from, long to) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
future.complete(new MessageSetImpl(getMessagesBetweenAsStream(channel, from, to)
.collect(Collectors.toList())));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets all messages in the given channel between the first given message in any channel and the second given
* message in any channel, excluding the boundaries, until one that meets the given condition is found.
* If no message matches the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The abort condition for when to stop retrieving messages.
* @param from The id of the start boundary messages.
* @param to The id of the other boundary messages.
* @return The messages.
* @see #getMessagesBetweenAsStream(TextChannel, long, long)
*/
public static CompletableFuture<MessageSet> getMessagesBetweenUntil(
TextChannel channel, Predicate<Message> condition, long from, long to) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
List<Message> messages = new ArrayList<>();
Optional<Message> untilMessage =
getMessagesBetweenAsStream(channel, from, to).peek(messages::add).filter(condition).findFirst();
future.complete(new MessageSetImpl(untilMessage
.map(message -> messages)
.orElse(Collections.emptyList())));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets all messages in the given channel between the first given message in any channel and the second given
* message in any channel, excluding the boundaries, while they meet the given condition.
* If the first message does not match the condition, an empty set is returned.
*
* @param channel The channel of the messages.
* @param condition The condition that has to be met.
* @param from The id of the start boundary messages.
* @param to The id of the other boundary messages.
* @return The messages.
* @see #getMessagesBetweenAsStream(TextChannel, long, long)
*/
public static CompletableFuture<MessageSet> getMessagesBetweenWhile(
TextChannel channel, Predicate<Message> condition, long from, long to) {
CompletableFuture<MessageSet> future = new CompletableFuture<>();
channel.getApi().getThreadPool().getExecutorService().submit(() -> {
try {
List<Message> messages = new ArrayList<>();
Optional<Message> untilMessage =
getMessagesBetweenAsStream(channel, from, to)
.peek(messages::add)
.filter(condition.negate())
.findFirst();
untilMessage.ifPresent(messages::remove);
future.complete(new MessageSetImpl(messages));
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
/**
* Gets all messages in the given channel between the first given message in any channel and the second given
* message in any channel, excluding the boundaries, sorted from first given message to the second given message.
*
* <p>The messages are retrieved in batches synchronously from Discord,
* so consider not using this method from a listener directly.
*
* @param channel The channel of the messages.
* @param from The id of the start boundary messages.
* @param to The id of the other boundary messages.
* @return The stream.
* @see #getMessagesBetween(TextChannel, long, long)
*/
public static Stream<Message> getMessagesBetweenAsStream(TextChannel channel, long from, long to) {
long before = Math.max(from, to);
long after = Math.min(from, to);
Stream<Message> messages = getMessagesAsStream(channel, -1, after).filter(message -> message.getId() < before);
return (from == after) ? messages : messages.sorted(Comparator.reverseOrder());
}
/**
* Requests the messages from Discord.
*
* @param channel The channel of which to get messages from.
* @param limit The limit of messages to get.
* @param before Get messages before the message with this id.
* @param after Get messages after the message with this id.
* @return The messages.
*/
private static MessageSet requestAsMessages(TextChannel channel, int limit, long before, long after) {
DiscordApiImpl api = (DiscordApiImpl) channel.getApi();
return new MessageSetImpl(
requestAsJsonNodes(channel, limit, before, after).stream()
.map(jsonNode -> api.getOrCreateMessage(channel, jsonNode))
.collect(Collectors.toList()));
}
/**
* Requests the messages from Discord, sorted by their id.
*
* @param channel The channel of which to get messages from.
* @param limit The limit of messages to get.
* @param before Get messages before the message with this id.
* @param after Get messages after the message with this id.
* @param reversed If {@code true}, get from oldest to newest, otherwise from newest to oldest.
* @return The JSON nodes.
*/
private static List<JsonNode> requestAsSortedJsonNodes(
TextChannel channel, int limit, long before, long after, boolean reversed) {
List<JsonNode> messageJsonNodes = requestAsJsonNodes(channel, limit, before, after);
Comparator<JsonNode> idComparator = Comparator.comparingLong(jsonNode -> jsonNode.get("id").asLong());
messageJsonNodes.sort(reversed ? idComparator.reversed() : idComparator);
return messageJsonNodes;
}
/**
* Requests the messages from Discord.
*
* @param channel The channel of which to get messages from.
* @param limit The limit of messages to get.
* @param before Get messages before the message with this id.
* @param after Get messages after the message with this id.
* @return The JSON nodes.
*/
private static List<JsonNode> requestAsJsonNodes(TextChannel channel, int limit, long before, long after) {
RestRequest<List<JsonNode>> restRequest =
new RestRequest<List<JsonNode>>(channel.getApi(), RestMethod.GET, RestEndpoint.MESSAGE)
.setUrlParameters(channel.getIdAsString());
if (limit != -1) {
restRequest.addQueryParameter("limit", String.valueOf(limit));
}
if (before != -1) {
restRequest.addQueryParameter("before", Long.toUnsignedString(before));
}
if (after != -1) {
restRequest.addQueryParameter("after", Long.toUnsignedString(after));
}
return restRequest.execute(result -> {
List<JsonNode> messageJsonNodes = new ArrayList<>();
result.getJsonBody().iterator().forEachRemaining(messageJsonNodes::add);
return messageJsonNodes;
}).join();
}
@Override
public Message lower(Message message) {
return messages.lower(message);
}
@Override
public Message floor(Message message) {
return messages.floor(message);
}
@Override
public Message ceiling(Message message) {
return messages.ceiling(message);
}
@Override
public Message higher(Message message) {
return messages.higher(message);
}
@Override
public Message pollFirst() {
return messages.pollFirst();
}
@Override
public Message pollLast() {
return messages.pollLast();
}
@Override
public int size() {
return messages.size();
}
@Override
public boolean isEmpty() {
return messages.isEmpty();
}
@Override
public boolean contains(Object o) {
return messages.contains(o);
}
@Override
public Iterator<Message> iterator() {
return messages.iterator();
}
@Override
public Object[] toArray() {
return messages.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
return messages.toArray(a);
}
@Override
public boolean add(Message message) {
return messages.add(message);
}
@Override
public boolean remove(Object o) {
return messages.remove(o);
}
@Override
public boolean containsAll(Collection<?> c) {
return messages.containsAll(c);
}
@Override
public boolean addAll(Collection<? extends Message> c) {
return messages.addAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return messages.retainAll(c);
}
@Override
public boolean removeAll(Collection<?> c) {
return messages.removeAll(c);
}
@Override
public void clear() {
messages.clear();
}
@Override
public NavigableSet<Message> descendingSet() {
return messages.descendingSet();
}
@Override
public Iterator<Message> descendingIterator() {
return messages.descendingIterator();
}
@Override
public MessageSet subSet(Message fromElement, boolean fromInclusive, Message toElement, boolean toInclusive) {
return new MessageSetImpl(messages.subSet(fromElement, fromInclusive, toElement, toInclusive));
}
@Override
public MessageSet subSet(Message fromElement, Message toElement) {
return new MessageSetImpl(messages.subSet(fromElement, toElement));
}
@Override
public MessageSet headSet(Message toElement, boolean inclusive) {
return new MessageSetImpl(messages.headSet(toElement, inclusive));
}
@Override
public MessageSet headSet(Message toElement) {
return new MessageSetImpl(messages.headSet(toElement));
}
@Override
public MessageSet tailSet(Message fromElement, boolean inclusive) {
return new MessageSetImpl(messages.tailSet(fromElement, inclusive));
}
@Override
public MessageSet tailSet(Message fromElement) {
return new MessageSetImpl(messages.tailSet(fromElement));
}
@Override
public Comparator<? super Message> comparator() {
return messages.comparator();
}
@Override
public Message first() {
return messages.first();
}
@Override
public Message last() {
return messages.last();
}
}
| 16,558 |
2,890 | <reponame>amorvos/light-task-scheduler
package com.github.ltsopensource.zookeeper;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.cache.*;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.KeeperException;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* @author <NAME> (<EMAIL>) on 2/24/16.
*/
public class CarutorDemo {
public static void main(String[] args) throws Exception {
final CuratorFramework client = CuratorFrameworkFactory.builder()
.connectString("127.0.0.1:2181")
.sessionTimeoutMs(5000)
.connectionTimeoutMs(3000)
.retryPolicy(new ExponentialBackoffRetry(1000, 3))
.build();
client.start();
try {
client.delete().forPath("/zk-lts-test/cnode");
} catch (KeeperException.NoNodeException ignored) {
}
try {
client.delete().forPath("/zk-lts-test/cnode2/3424");
} catch (KeeperException.NoNodeException ignored) {
}
try {
client.delete().forPath("/zk-lts-test/cnode2");
} catch (KeeperException.NoNodeException ignored) {
}
client.create()
.creatingParentsIfNeeded()
.forPath("/zk-lts-test/cnode", "hello".getBytes());
/**
* 在注册监听器的时候,如果传入此参数,当事件触发时,逻辑由线程池处理
*/
ExecutorService pool = Executors.newFixedThreadPool(2);
/**
* 监听数据节点的变化情况
*/
final NodeCache nodeCache = new NodeCache(client, "/zk-lts-test/cnode", false);
nodeCache.start(true);
nodeCache.getListenable().addListener(
new NodeCacheListener() {
@Override
public void nodeChanged() throws Exception {
if(nodeCache.getCurrentData().getData() == null){
System.out.println("delete data:" + nodeCache.getCurrentData().getPath());
}else{
System.out.println("Node data is changed, path:"+ nodeCache.getCurrentData().getPath() +" new data: " +
new String(nodeCache.getCurrentData().getData()));
}
}
},
pool
);
/**
* 监听子节点的变化情况
*/
final PathChildrenCache childrenCache = new PathChildrenCache(client, "/zk-lts-test", true);
childrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
childrenCache.getListenable().addListener(
new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client2, PathChildrenCacheEvent event)
throws Exception {
switch (event.getType()) {
case CHILD_ADDED:
case CHILD_REMOVED:
case CHILD_UPDATED:
String childPath = event.getData().getPath();
String parentPath = childPath.substring(0, childPath.lastIndexOf("/"));
List<String> children = client.getChildren().forPath(parentPath);
System.out.println(event.getType() + " " + children);
default:
break;
}
}
},
pool
);
client.setData().forPath("/zk-lts-test/cnode", "world".getBytes());
client.create()
.creatingParentsIfNeeded()
.forPath("/zk-lts-test/cnode2", "hello".getBytes());
client.create()
.creatingParentsIfNeeded()
.forPath("/zk-lts-test/cnode2/3424", "hello".getBytes());
Thread.sleep(1000);
client.setData().forPath("/zk-lts-test/cnode", null);
Thread.sleep(10 * 1000);
pool.shutdown();
client.close();
}
}
| 2,303 |
2,662 | #!/usr/bin/env python
# settings
# Complete these as you need
#############################################
# ENABLE preprocessor
enabled = True
# If you want to keep the temp file for inspection - pre-patch state.
# THE NAME is self.tmp_file
keep_temp = False
# Recheck the file before patching or the next preprocessor
recheck_support = False
# file format that this is for (PE, ELF, MACHO, ALL)
# if not specified the processor will run against all
file_format = "ALL"
#############################################
class preprocessor:
# REQUIRED
def __init__(self, BDF):
# REQUIRED -- exposes BDF objects to the preprocessor environment
self.BDF = BDF
# You can set a return, just add a check that returns False
# 'None' does not flag
self.result = True
# REQUIRED
def run(self):
# call your program main here, we're calling print_debug()
self.print_debug()
return self.result
def print_debug(self):
print "*"*25, "DEBUG INFO", "*"*25
try:
for item, data in vars(self.BDF).iteritems():
# file Items (flItms) will be printed later
if item == 'flItms':
continue
# This will give ARGS info
print item, ":" ,data
# BDF functions are exposed | print PE flItms (PE only)
if 'flItms' in vars(self.BDF):
self.BDF.print_flItms(self.BDF.flItms)
except Exception, e:
print "!" * 50
print "\t[!] Exception:", str(e)
print "!" * 50
self.result = False
print "*"*25, "END DEBUG INFO", "*"*25
| 741 |
3,139 | <reponame>supertick/jmonkeyengine<filename>jme3-core/src/main/java/com/jme3/opencl/MappingAccess.java
/*
* Copyright (c) 2009-2016 jMonkeyEngine
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of 'jMonkeyEngine' nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.jme3.opencl;
/**
* Specifies the access flags when mapping a {@link Buffer} or {@link Image} object.
* @see Buffer#map(com.jme3.opencl.CommandQueue, long, long, com.jme3.opencl.MappingAccess)
* @see Image#map(com.jme3.opencl.CommandQueue, long[], long[], com.jme3.opencl.MappingAccess)
* @author shaman
*/
public enum MappingAccess {
/**
* Only read access is allowed to the mapped memory.
*/
MAP_READ_ONLY,
/**
* Only write access is allowed to the mapped memory.
*/
MAP_WRITE_ONLY,
/**
* Both read and write access is allowed.
*/
MAP_READ_WRITE,
/**
* The old memory content is completely discarded and the buffer is filled
* completely with new data. This might be faster than {@link #MAP_WRITE_ONLY}
*/
MAP_WRITE_INVALIDATE
}
| 793 |
372 | // **************************************************************************************
// File: Krb4Properties.cpp
// By: <NAME>
// Created: 12/02/98
// Copyright @1998 Massachusetts Institute of Technology - All rights reserved.
// Description: CPP file for KrbProperties.h. Contains variables and functions
// for Kerberos Four Properties
//
// History:
//
// MM/DD/YY Inits Description of Change
// 12/02/98 ADL Original
// **************************************************************************************
#include "stdafx.h"
#include "Leash.h"
#include "Krb4Properties.h"
#include "LeashFileDialog.h"
#include "LeashMessageBox.h"
#include "wshelper.h"
#include "lglobals.h"
#include <io.h>
#include <direct.h>
#include "reminder.h"
#ifdef _DEBUG
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif
///////////////////////////////////////////////////////////////////////
// CKrb4ConfigFileLocation property page
IMPLEMENT_DYNCREATE(CKrb4ConfigFileLocation, CPropertyPage)
CString CKrb4ConfigFileLocation::m_newKrbFile;
CString CKrb4ConfigFileLocation::m_newKrbrealmFile;
CKrb4ConfigFileLocation::CKrb4ConfigFileLocation() : CPropertyPage(CKrb4ConfigFileLocation::IDD)
{
m_newTicketFile = _T("");
m_newKrbFile = _T("");
m_newKrbrealmFile = _T("");
m_initKrbFile = _T("");
m_initKrbrealmFile = _T("");
m_initTicketFile = _T("");
m_noKrbrealmFileStartupWarning = FALSE;
m_noKrbFileStartupWarning = FALSE;
m_startupPage1 = TRUE;
//{{AFX_DATA_INIT(CKrb4ConfigFileLocation)
//}}AFX_DATA_INIT
}
CKrb4ConfigFileLocation::~CKrb4ConfigFileLocation()
{
}
BOOL CKrb4ConfigFileLocation::OnInitDialog()
{
CPropertyPage::OnInitDialog();
INT krbCreate = 0;
INT krbrealmCreate = 0;
CHAR krb_path[MAX_PATH];
CHAR krbrealm_path[MAX_PATH];
CHAR ticketName[MAX_PATH];
unsigned int krb_path_sz = sizeof(krb_path);
unsigned int krbrealm_path_sz = sizeof(krbrealm_path);
CString strMessage;
// Set KRB.CON
memset(krb_path, '\0', sizeof(krb_path));
if (!pkrb_get_krbconf2(krb_path, &krb_path_sz))
{ // Error has happened
m_noKrbFileStartupWarning = TRUE;
}
else
{ // normal find
m_initKrbFile = krb_path;
m_newKrbFile = m_initKrbFile;
SetDlgItemText(IDC_EDIT_KRB_LOC, m_initKrbFile);
}
// Set KRBREALM.CON
memset(krbrealm_path, '\0', sizeof(krbrealm_path));
if (!pkrb_get_krbrealm2(krbrealm_path, &krbrealm_path_sz))
{
// Error has happened
m_noKrbrealmFileStartupWarning = TRUE;
}
else
{
// normal find
m_initKrbrealmFile = krbrealm_path;
m_newKrbrealmFile = m_initKrbrealmFile;
SetDlgItemText(IDC_EDIT_KRBREALM_LOC, m_initKrbrealmFile);
}
if (pLeash_get_lock_file_locations() ||
getenv("KRB4_KRB.REALMS") || getenv("KRB4_KRB.CONF") || getenv("KRB4_CONFIG"))
{
GetDlgItem(IDC_EDIT_KRB_LOC)->EnableWindow(FALSE);
GetDlgItem(IDC_EDIT_KRBREALM_LOC)->EnableWindow(FALSE);
GetDlgItem(IDC_BUTTON_KRB_BROWSE)->EnableWindow(FALSE);
GetDlgItem(IDC_BUTTON_KRBREALM_BROWSE)->EnableWindow(FALSE);
}
else if ( !(getenv("KRB4_KRB.REALMS") || getenv("KRB4_KRB.CONF") || getenv("KRB4_CONFIG")) )
{
GetDlgItem(IDC_STATIC_CONFILES)->ShowWindow(FALSE);
}
// Set TICKET.KRB file Editbox
*ticketName = NULL;
pkrb_set_tkt_string(0);
char *pticketName = ptkt_string();
if (pticketName)
strcpy(ticketName, pticketName);
if (!*ticketName)
{
LeashErrorBox("OnInitDialog::Can't locate ticket file", TICKET_FILE);
}
else
{
m_initTicketFile = m_newTicketFile = ticketName;
m_ticketEditBox.ReplaceSel(m_initTicketFile);
}
if (getenv("KRBTKFILE"))
GetDlgItem(IDC_EDIT_TICKET_FILE)->EnableWindow(FALSE);
else
GetDlgItem(IDC_STATIC_TXT)->ShowWindow(FALSE);
return FALSE;
}
BOOL CKrb4ConfigFileLocation::OnApply()
{
// Krb.con
if (0 != m_initKrbFile.CompareNoCase(m_newKrbFile))
{
// Commit changes
if (SetRegistryVariable("krb.conf", m_newKrbFile,
"Software\\MIT\\Kerberos4"))
{
MessageBox("Failed to set \"Krb.conf\"!", "Error", MB_OK);
}
m_initKrbFile = m_newKrbFile;
}
// Krbrealms.con
if (0 != m_initKrbrealmFile.CompareNoCase(m_newKrbrealmFile))
{
// Commit changes
if (SetRegistryVariable("krb.realms", m_newKrbrealmFile,
"Software\\MIT\\Kerberos4"))
{
MessageBox("Failed to set \"krb.realms\"!", "Error", MB_OK);
}
m_initKrbrealmFile = m_newKrbrealmFile;
}
// Ticket file
if (0 != m_initTicketFile.CompareNoCase(m_newTicketFile))
{
if (getenv("KRBTKFILE"))
{
// Just in case they set (somehow) KRBTKFILE while this box is up
MessageBox("OnApply::Ticket file is set in your System's\
Environment!\nYou must first remove it.",
"Error", MB_OK);
return TRUE;
}
// Commit changes
if (SetRegistryVariable("ticketfile", m_newTicketFile,
"Software\\MIT\\Kerberos4"))
{
MessageBox("Failed to set \"ticketfile\"!", "Error", MB_OK);
}
m_initTicketFile = m_newTicketFile;
}
return TRUE;
}
VOID CKrb4ConfigFileLocation::OnOK()
{
CPropertyPage::OnOK();
}
VOID CKrb4ConfigFileLocation::DoDataExchange(CDataExchange* pDX)
{
TRACE("Entering CKrb4ConfigFileLocation::DoDataExchange -- %d\n",
pDX->m_bSaveAndValidate);
CPropertyPage::DoDataExchange(pDX);
//{{AFX_DATA_MAP(CKrb4ConfigFileLocation)
DDX_Control(pDX, IDC_EDIT_TICKET_FILE, m_ticketEditBox);
//}}AFX_DATA_MAP
}
VOID CKrb4ConfigFileLocation::OnButtonKrbBrowse()
{
CString msg;
msg.Format("Select %s Location", KRB_FILE);
CString krb_path = "*.*";
CLeashFileDialog dlgFile(TRUE, NULL, krb_path, "Kerbereos Four Config. File (.con)");
dlgFile.m_ofn.lpstrTitle = msg;
if (IDOK == dlgFile.DoModal())
{
//m_newKrbFile = dlgFile.GetSelectedFileName();
m_newKrbFile= dlgFile.GetPathName();
SetDlgItemText(IDC_EDIT_KRB_LOC, m_newKrbFile);
SetModified(TRUE);
}
}
VOID CKrb4ConfigFileLocation::OnButtonKrbrealmBrowse()
{
CString msg;
msg.Format("Select %s Location", KRBREALM_FILE);
CString krbrealm_path = "*.*";
CLeashFileDialog dlgFile(TRUE, NULL, krbrealm_path, "Kerbereos Four Config. File (.con)");
dlgFile.m_ofn.lpstrTitle = msg;
if (IDOK == dlgFile.DoModal())
{
//m_krbrealmFile = dlgFile.GetSelectedFileName();
m_newKrbrealmFile = dlgFile.GetPathName();
SetDlgItemText(IDC_EDIT_KRB_KRBREALM_LOC, m_newKrbrealmFile);
SetModified(TRUE);
}
}
/*
VOID CKrb4ConfigFileLocation::OnButtonTicketfileBrowse()
{
CString ticketPath = *.*";
CLeashFileDialog dlgFile(TRUE, NULL, ticketPath, "Kerberos Four Ticket File (.con)");
CString msg;
msg.Format("Select Location/Ticket File (Default file = %s)", TICKET_FILE);
dlgFile.m_ofn.lpstrTitle = msg;
while (TRUE)
{
if (IDOK == dlgFile.DoModal())
{
m_newTicketFile = dlgFile.GetPathName();
SetDlgItemText(IDC_EDIT_TICKET_FILE, m_newTicketFile);
SetModified(TRUE);
break;
}
else
break;
}
}
*/
void CKrb4ConfigFileLocation::OnChangeEditKrbLoc()
{
if (!m_startupPage1)
{
GetDlgItemText(IDC_EDIT_KRB_LOC, m_newKrbFile);
SetModified(TRUE);
}
}
void CKrb4ConfigFileLocation::OnChangeEditKrbrealmLoc()
{
if (!m_startupPage1)
{
GetDlgItemText(IDC_EDIT_KRBREALM_LOC, m_newKrbrealmFile);
SetModified(TRUE);
}
}
void CKrb4ConfigFileLocation::OnChangeEditTicketFile()
{
if (!m_startupPage1)
{
GetDlgItemText(IDC_EDIT_TICKET_FILE, m_newTicketFile);
SetModified(TRUE);
}
}
VOID CKrb4ConfigFileLocation::OnShowWindow(BOOL bShow, UINT nStatus)
{
CPropertyPage::OnShowWindow(bShow, nStatus);
}
VOID CKrb4ConfigFileLocation::OnCancel()
{
CPropertyPage::OnCancel();
}
void CKrb4ConfigFileLocation::OnHelp()
{
#ifdef CALL_HTMLHELP
AfxGetApp()->HtmlHelp(HID_KRB4_PROPERTIES_COMMAND);
#else
AfxGetApp()->WinHelp(HID_KRB4_PROPERTIES_COMMAND);
#endif
}
BOOL CKrb4ConfigFileLocation::PreTranslateMessage(MSG* pMsg)
{
// TODO: Add your specialized code here and/or call the base class
CString wmsg;
if (m_startupPage1)
{
if (m_noKrbFileStartupWarning)
{
wmsg.Format("OnInitDialog::Can't locate configuration file: %s.",
KRB_FILE);
MessageBox(wmsg, "Leash", MB_OK);
m_noKrbFileStartupWarning = FALSE;
}
if (m_noKrbrealmFileStartupWarning)
{
wmsg.Format("OnInitDialog::Can't locate configuration file: %s.",
KRBREALM_FILE);
MessageBox(wmsg, "Leash", MB_OK);
m_noKrbrealmFileStartupWarning = FALSE;
}
}
m_startupPage1 = FALSE;
return CPropertyPage::PreTranslateMessage(pMsg);
}
BEGIN_MESSAGE_MAP(CKrb4ConfigFileLocation, CPropertyPage)
//{{AFX_MSG_MAP(CKrb4ConfigFileLocation)
ON_BN_CLICKED(IDC_BUTTON_KRB_BROWSE, OnButtonKrbBrowse)
ON_BN_CLICKED(IDC_BUTTON_KRBREALM_BROWSE, OnButtonKrbrealmBrowse)
ON_WM_SHOWWINDOW()
ON_EN_CHANGE(IDC_EDIT_TICKET_FILE, OnChangeEditTicketFile)
ON_COMMAND(ID_HELP, OnHelp)
ON_EN_CHANGE(IDC_EDIT_KRB_LOC, OnChangeEditKrbLoc)
ON_EN_CHANGE(IDC_EDIT_KRBREALM_LOC, OnChangeEditKrbrealmLoc)
//}}AFX_MSG_MAP
END_MESSAGE_MAP()
///////////////////////////////////////////////////////////////////////
// CKrb4Properties
IMPLEMENT_DYNAMIC(CKrb4Properties, CPropertySheet)
CKrb4Properties::CKrb4Properties(UINT nIDCaption, CWnd* pParentWnd,
UINT iSelectPage)
:CPropertySheet(nIDCaption, pParentWnd, iSelectPage)
{
}
CKrb4Properties::CKrb4Properties(LPCTSTR pszCaption, CWnd* pParentWnd,
UINT iSelectPage)
:CPropertySheet(pszCaption, pParentWnd, iSelectPage)
{
AddPage(&m_fileLocation);
}
CKrb4Properties::~CKrb4Properties()
{
}
BEGIN_MESSAGE_MAP(CKrb4Properties, CPropertySheet)
//{{AFX_MSG_MAP(CKrb4Properties)
// NOTE - the ClassWizard will add and remove mapping macros here.
//}}AFX_MSG_MAP
END_MESSAGE_MAP()
///////////////////////////////////////////////////////////////////////
// CKrb4Properties message handlers
| 4,579 |
1,609 | package com.mossle.security.client;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.PostConstruct;
import com.mossle.security.impl.SpringSecurityUserAuth;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.core.userdetails.UsernameNotFoundException;
public class MemoryUserDetailsService implements UserDetailsService {
private static Logger logger = LoggerFactory
.getLogger(MemoryUserDetailsService.class);
private String text;
private Map<String, SpringSecurityUserAuth> map = new HashMap<String, SpringSecurityUserAuth>();
@PostConstruct
public void init() {
if (text == null) {
logger.info("text not exists");
return;
}
for (String line : text.split("\n")) {
String[] array = line.split(",");
String username = array[0];
List<String> permissions = new ArrayList<String>(
Arrays.asList(array));
permissions.remove(0);
SpringSecurityUserAuth userAuth = new SpringSecurityUserAuth();
userAuth.setId(username);
userAuth.setUsername(username);
userAuth.setDisplayName(username);
userAuth.setPermissions(permissions);
map.put(username, userAuth);
}
}
public UserDetails loadUserByUsername(String username)
throws UsernameNotFoundException {
if (!map.containsKey(username)) {
throw new UsernameNotFoundException(username, null);
}
return map.get(username);
}
public void setText(String text) {
this.text = text;
}
}
| 744 |
1,041 | package org.tests.sp.model.car;
import org.tests.sp.model.IdEntity;
import javax.persistence.Entity;
import javax.persistence.Table;
@Entity
@Table(name = "sp_car_door")
public class Door extends IdEntity {
private static final long serialVersionUID = 2399600193947163469L;
private String name;
public Door(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
| 143 |
2,334 | package com.gjiazhe.panoramaimageview.sample;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.gjiazhe.panoramaimageview.GyroscopeObserver;
import com.gjiazhe.panoramaimageview.PanoramaImageView;
public class RecyclerViewSampleActivity extends AppCompatActivity {
private GyroscopeObserver gyroscopeObserver;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_recyclerview_sample);
gyroscopeObserver = new GyroscopeObserver();
RecyclerView recyclerView = (RecyclerView) findViewById(R.id.rv);
recyclerView.setLayoutManager(new LinearLayoutManager(this));
recyclerView.setAdapter(new MyAdapter());
}
@Override
protected void onResume() {
super.onResume();
gyroscopeObserver.register(this);
}
@Override
protected void onPause() {
super.onPause();
gyroscopeObserver.unregister();
}
private class MyAdapter extends RecyclerView.Adapter<MyAdapter.MyViewHolder> {
@Override
public MyViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
LayoutInflater inflater = LayoutInflater.from(parent.getContext());
View view = inflater.inflate(R.layout.item_sample, parent, false);
return new MyViewHolder(view);
}
@Override
public void onBindViewHolder(final MyViewHolder holder, int position) {
switch (position % 3) {
case 0 : holder.panoramaImageView.setImageResource(R.drawable.horizontal1); break;
case 1 : holder.panoramaImageView.setImageResource(R.drawable.horizontal2); break;
case 2 : holder.panoramaImageView.setImageResource(R.drawable.horizontal3); break;
}
}
@Override
public int getItemCount() {
return 6;
}
class MyViewHolder extends RecyclerView.ViewHolder {
PanoramaImageView panoramaImageView;
MyViewHolder(View itemView) {
super(itemView);
panoramaImageView = (PanoramaImageView) itemView.findViewById(R.id.panorama_image_view);
panoramaImageView.setGyroscopeObserver(gyroscopeObserver);
}
}
}
}
| 1,053 |
2,962 | <reponame>stari4ek/storio<filename>storio-content-resolver/src/test/java/com/pushtorefresh/storio3/contentresolver/operations/put/PutContentValuesStub.java
package com.pushtorefresh.storio3.contentresolver.operations.put;
import android.content.ContentValues;
import android.net.Uri;
import android.support.annotation.NonNull;
import com.pushtorefresh.storio3.contentresolver.StorIOContentResolver;
import com.pushtorefresh.storio3.test.FlowableBehaviorChecker;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import io.reactivex.Completable;
import io.reactivex.Flowable;
import io.reactivex.Single;
import io.reactivex.functions.Consumer;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
// stub class to avoid violation of DRY in tests
class PutContentValuesStub {
@NonNull
final StorIOContentResolver storIOContentResolver;
@NonNull
private final StorIOContentResolver.LowLevel lowLevel;
@NonNull
final List<ContentValues> contentValues;
@NonNull
final PutResolver<ContentValues> putResolver;
@NonNull
private final Map<ContentValues, PutResult> contentValuesToPutResultsMap;
@SuppressWarnings("unchecked")
private PutContentValuesStub(int numberOfTestItems) {
storIOContentResolver = mock(StorIOContentResolver.class);
lowLevel = mock(StorIOContentResolver.LowLevel.class);
when(storIOContentResolver.lowLevel())
.thenReturn(lowLevel);
when(storIOContentResolver.put())
.thenReturn(new PreparedPut.Builder(storIOContentResolver));
contentValues = new ArrayList<ContentValues>(numberOfTestItems);
contentValuesToPutResultsMap = new HashMap<ContentValues, PutResult>(numberOfTestItems);
for (int i = 0; i < numberOfTestItems; i++) {
final ContentValues cv = mock(ContentValues.class);
contentValues.add(cv);
contentValuesToPutResultsMap.put(cv, PutResult.newInsertResult(mock(Uri.class), TestItem.CONTENT_URI));
}
putResolver = (PutResolver<ContentValues>) mock(PutResolver.class);
when(putResolver.performPut(eq(storIOContentResolver), any(ContentValues.class)))
.thenReturn(PutResult.newInsertResult(mock(Uri.class), TestItem.CONTENT_URI));
for (final ContentValues cv : contentValues) {
final PutResult putResult = PutResult.newInsertResult(mock(Uri.class), mock(Uri.class));
contentValuesToPutResultsMap.put(cv, putResult);
when(putResolver.performPut(storIOContentResolver, cv))
.thenReturn(putResult);
}
}
@NonNull
public static PutContentValuesStub newPutStubForOneContentValues() {
return new PutContentValuesStub(1);
}
@NonNull
public static PutContentValuesStub newPutStubForMultipleContentValues() {
return new PutContentValuesStub(3);
}
void verifyBehaviorForMultipleContentValues(@NonNull PutResults<ContentValues> putResults) {
// only one call to storIOContentResolver.put() should occur
verify(storIOContentResolver, times(1)).put();
// number of calls to putResolver's performPut() should be equal to number of objects
verify(putResolver, times(contentValues.size())).performPut(eq(storIOContentResolver), any(ContentValues.class));
for (final ContentValues cv : contentValues) {
// Put Operation should be performed once for each item
verify(putResolver, times(1)).performPut(storIOContentResolver, cv);
}
}
void verifyBehaviorForMultipleContentValues(@NonNull Flowable<PutResults<ContentValues>> putResultsFlowable) {
new FlowableBehaviorChecker<PutResults<ContentValues>>()
.flowable(putResultsFlowable)
.expectedNumberOfEmissions(1)
.testAction(new Consumer<PutResults<ContentValues>>() {
@Override
public void accept(@NonNull PutResults<ContentValues> putResults) throws Exception {
verifyBehaviorForMultipleContentValues(putResults);
}
})
.checkBehaviorOfFlowable();
}
void verifyBehaviorForMultipleContentValues(@NonNull Single<PutResults<ContentValues>> putResultsSingle) {
new FlowableBehaviorChecker<PutResults<ContentValues>>()
.flowable(putResultsSingle.toFlowable())
.expectedNumberOfEmissions(1)
.testAction(new Consumer<PutResults<ContentValues>>() {
@Override
public void accept(@NonNull PutResults<ContentValues> putResults) throws Exception {
verifyBehaviorForMultipleContentValues(putResults);
}
})
.checkBehaviorOfFlowable();
}
void verifyBehaviorForMultipleContentValues(@NonNull Completable completable) {
verifyBehaviorForMultipleContentValues(completable.<PutResults<ContentValues>>toFlowable());
}
void verifyBehaviorForOneContentValues(@NonNull PutResult putResult) {
Map<ContentValues, PutResult> putResultsMap = new HashMap<ContentValues, PutResult>(1);
putResultsMap.put(contentValues.get(0), putResult);
verifyBehaviorForMultipleContentValues(PutResults.newInstance(putResultsMap));
}
void verifyBehaviorForOneContentValues(@NonNull Flowable<PutResult> putResultFlowable) {
new FlowableBehaviorChecker<PutResult>()
.flowable(putResultFlowable)
.expectedNumberOfEmissions(1)
.testAction(new Consumer<PutResult>() {
@Override
public void accept(@NonNull PutResult putResult) throws Exception {
verifyBehaviorForOneContentValues(putResult);
}
})
.checkBehaviorOfFlowable();
}
void verifyBehaviorForOneContentValues(@NonNull Single<PutResult> putResultSingle) {
new FlowableBehaviorChecker<PutResult>()
.flowable(putResultSingle.toFlowable())
.expectedNumberOfEmissions(1)
.testAction(new Consumer<PutResult>() {
@Override
public void accept(@NonNull PutResult putResult) throws Exception {
verifyBehaviorForOneContentValues(putResult);
}
})
.checkBehaviorOfFlowable();
}
void verifyBehaviorForOneContentValues(@NonNull Completable completable) {
verifyBehaviorForOneContentValues(completable.<PutResult>toFlowable());
}
}
| 2,820 |
339 | #include "stdafx.h"
#include "HookClient.h"
#include "../HookHw.h"
#include "cstrike/CrossHairFix.h"
void HookClient()
{
static bool firstRun = true;
if(!firstRun) return;
firstRun = false;
//
// detect game:
const char *gamedir = pEngfuncs->pfnGetGameDirectory();
if(!gamedir) return;
if(0 == _stricmp("cstrike",gamedir))
{
Hook_Cstrike_CrossHair_Fix();
}
}
| 196 |
518 | //
// XHUIKitMacro.h
// PinterestExample
//
// Created by dw_iOS on 14-7-14.
// Copyright (c) 2014年 嗨,我是曾宪华(@xhzengAIB),曾加入YY Inc.担任高级移动开发工程师,拍立秀App联合创始人,热衷于简洁、而富有理性的事物 QQ:543413507 主页:http://zengxianhua.com All rights reserved.
//
@import UIKit;
@import Foundation;
#define KXHGridItemWidth (145.0 * 1)
#define kXHLargeGridItemPadding 10
#define kXHScreen [[UIScreen mainScreen] bounds]
#define kXHScreenWidth CGRectGetWidth(kXHScreen)
#define XH_CELL_IDENTIFIER @"XHWaterfallCell"
#define XH_CELL_COUNT 10
// device verson float value
#define CURRENT_SYS_VERSION [[[UIDevice currentDevice] systemVersion] floatValue]
// iPad
#define kIsiPad (UI_USER_INTERFACE_IDIOM() == UIUserInterfaceIdiomPad)
// image STRETCH
#define XH_STRETCH_IMAGE(image, edgeInsets) (CURRENT_SYS_VERSION < 6.0 ? [image stretchableImageWithLeftCapWidth:edgeInsets.left topCapHeight:edgeInsets.top] : [image resizableImageWithCapInsets:edgeInsets resizingMode:UIImageResizingModeStretch])
| 455 |
318 | #ifndef ARGUMENT_BASE_HXX
#define ARGUMENT_BASE_HXX
#include <vector>
#include "argument_holder.hxx"
#include "argument_name.hxx"
#include "parser_error.hxx"
namespace parser{
class ArgumentBase{
public:
// constructor
ArgumentBase(const ArgName & ,ArgumentBase * =NULL,const std::vector<ArgumentBase * > & =std::vector<ArgumentBase * >());
// virtual with default implementation
virtual std::string longName()const;
virtual std::string shortName()const;
virtual bool hasShortName()const;
virtual std::string description()const;
virtual bool isParsed()const;
virtual void setAsParsed();
virtual void setAsUnparsedParsed();
// virtual graph function with default implementation
virtual size_t numberOfChildren()const;
virtual ArgumentBase * children(const size_t);
virtual ArgumentBase const * children(const size_t)const;
virtual ArgumentBase * parent();
virtual ArgumentBase const * parent()const;
virtual void addChild(ArgumentBase * );
virtual void setParent(ArgumentBase *);
// pure virtual functions WITHOUT implementation
virtual std::string valueToString()const =0;
virtual std::string ifParentValue()const =0;
virtual bool hasDefault() const =0;
virtual bool isScalarArg()const =0;
virtual void parse( const ArgContainer & )=0;
virtual void print(const size_t )const=0;
virtual void collectTypesEbnfs(std::set<std::string> & ebnfs)const=0;
virtual size_t depth(const size_t current)const=0;
protected:
ArgName argName_;
bool isParsed_;
ArgumentBase * parent_;
std::vector<ArgumentBase * > children_;
};
ArgumentBase::ArgumentBase
(
const ArgName & argName,
ArgumentBase * parent,
const std::vector<ArgumentBase * > & children
)
:argName_(argName),parent_(parent),children_(children){
}
// virtual with default implementation
std::string ArgumentBase::longName()const{
return argName_.longName();
}
std::string ArgumentBase::shortName()const{
return argName_.shortName();
}
bool ArgumentBase::hasShortName()const{
return argName_.hasShortName();
}
std::string ArgumentBase::description()const{
return argName_.description();
}
bool ArgumentBase::isParsed()const{
return isParsed_;
}
void ArgumentBase::setAsParsed(){
isParsed_=true;
}
void ArgumentBase::setAsUnparsedParsed(){
isParsed_=false;
}
size_t ArgumentBase::numberOfChildren()const{
return children_.size();
}
ArgumentBase * ArgumentBase::children
(
const size_t i
){
return children_[i];
}
ArgumentBase const * ArgumentBase::children
(
const size_t i
)const{
return children_[i];
}
ArgumentBase * ArgumentBase::parent(){
return parent_;
}
ArgumentBase const * ArgumentBase::parent()const{
return parent_;
}
void ArgumentBase::addChild
(
ArgumentBase * child
){
children_.push_back(child);
child->setParent(this);
}
void ArgumentBase::setParent(
ArgumentBase * parent
){
parent_=parent;
}
}
#endif /* ARGUMENT_BASE_HXX */
| 1,000 |
325 | <filename>std/src/main/java/org/cybergarage/upnp/std/av/server/object/DIDLLiteNode.java
/******************************************************************
*
* MediaServer for CyberLink
*
* Copyright (C) <NAME> 2003
*
* File : ContentNode
*
* Revision:
*
* 10/30/03
* - first revision.
* 10/26/04
* - <NAME> <<EMAIL>>
* - Changed DIDLLiteNode is a subclass of Node instead of ContentNode
* because the node has the parentID attributes.
*
******************************************************************/
package org.cybergarage.upnp.std.av.server.object;
import org.cybergarage.xml.Node;
public class DIDLLiteNode extends Node // Thanks for <NAME> (10/28/04)
{
////////////////////////////////////////////////
// Constroctor
////////////////////////////////////////////////
public DIDLLiteNode()
{
setName(DIDLLite.NAME);
setAttribute(DIDLLite.XMLNS, DIDLLite.XMLNS_URL);
setAttribute(DIDLLite.XMLNS_DC, DIDLLite.XMLNS_DC_URL);
setAttribute(DIDLLite.XMLNS_UPNP, DIDLLite.XMLNS_UPNP_URL);
}
////////////////////////////////////////////////
// Child node
////////////////////////////////////////////////
public void addContentNode(ContentNode node)
{
addNode(node);
}
public boolean removeContentNode(ContentNode node)
{
return removeNode(node);
}
}
| 420 |
3,200 | <gh_stars>1000+
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_
#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_
namespace mindspore {
enum class DataType : int {
kTypeUnknown = 0,
kObjectTypeString = 12,
kObjectTypeList = 13,
kObjectTypeTuple = 14,
kObjectTypeTensorType = 17,
kNumberTypeBegin = 29,
kNumberTypeBool = 30,
kNumberTypeInt8 = 32,
kNumberTypeInt16 = 33,
kNumberTypeInt32 = 34,
kNumberTypeInt64 = 35,
kNumberTypeUInt8 = 37,
kNumberTypeUInt16 = 38,
kNumberTypeUInt32 = 39,
kNumberTypeUInt64 = 40,
kNumberTypeFloat16 = 42,
kNumberTypeFloat32 = 43,
kNumberTypeFloat64 = 44,
kNumberTypeEnd = 46,
// add new enum here
kInvalidType = INT32_MAX,
};
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_DATA_TYPE_H_
| 459 |
602 | <gh_stars>100-1000
//
// WMZPageFixAllVC.h
// WMZPageController
//
// Created by wmz on 2021/8/11.
// Copyright © 2021 wmz. All rights reserved.
//
#import "WMZPageController.h"
NS_ASSUME_NONNULL_BEGIN
@interface WMZPageFixAllVC : WMZPageController
@end
NS_ASSUME_NONNULL_END
| 118 |
942 | <filename>tests/bson-tests.h
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_TESTS_H
#define BSON_TESTS_H
#include <bson.h>
#include <stdio.h>
#include <time.h>
BSON_BEGIN_DECLS
#define BSON_ASSERT_CMPSTR(a, b) \
do { \
if (((a) != (b)) && !!strcmp ((a), (b))) { \
fprintf (stderr, \
"FAIL\n\nAssert Failure: (line#%d) \"%s\" != \"%s\"\n", \
__LINE__, \
a, \
b); \
abort (); \
} \
} while (0)
#define BSON_ASSERT_CMPINT(a, eq, b) \
do { \
if (!((a) eq (b))) { \
fprintf (stderr, \
"FAIL\n\nAssert Failure: (line#%d)" #a " " #eq " " #b "\n", \
__LINE__); \
abort (); \
} \
} while (0)
#ifdef BSON_OS_WIN32
#include <stdarg.h>
#include <share.h>
static __inline int
bson_open (const char *filename, int flags, ...)
{
int fd = -1;
if (_sopen_s (
&fd, filename, flags | _O_BINARY, _SH_DENYNO, _S_IREAD | _S_IWRITE) ==
NO_ERROR) {
return fd;
}
return -1;
}
#define bson_close _close
#define bson_read(f, b, c) ((ssize_t) _read ((f), (b), (int) (c)))
#define bson_write _write
#else
#define bson_open open
#define bson_read read
#define bson_close close
#define bson_write write
#endif
#define bson_eq_bson(bson, expected) \
do { \
char *bson_json, *expected_json; \
const uint8_t *bson_data = bson_get_data ((bson)); \
const uint8_t *expected_data = bson_get_data ((expected)); \
int unequal; \
unsigned o; \
int off = -1; \
unequal = ((expected)->len != (bson)->len) || \
memcmp (bson_get_data ((expected)), \
bson_get_data ((bson)), \
(expected)->len); \
if (unequal) { \
bson_json = bson_as_canonical_extended_json (bson, NULL); \
expected_json = bson_as_canonical_extended_json ((expected), NULL); \
for (o = 0; o < (bson)->len && o < (expected)->len; o++) { \
if (bson_data[o] != expected_data[o]) { \
off = o; \
break; \
} \
} \
if (off == -1) { \
off = BSON_MAX ((expected)->len, (bson)->len) - 1; \
} \
fprintf (stderr, \
"bson objects unequal (byte %u):\n(%s)\n(%s)\n", \
off, \
bson_json, \
expected_json); \
{ \
int fd1 = bson_open ("failure.bad.bson", O_RDWR | O_CREAT, 0640); \
int fd2 = \
bson_open ("failure.expected.bson", O_RDWR | O_CREAT, 0640); \
BSON_ASSERT (fd1 != -1); \
BSON_ASSERT (fd2 != -1); \
BSON_ASSERT ((bson)->len == bson_write (fd1, bson_data, (bson)->len)); \
BSON_ASSERT ((expected)->len == \
bson_write (fd2, expected_data, (expected)->len)); \
bson_close (fd1); \
bson_close (fd2); \
} \
BSON_ASSERT (0); \
} \
} while (0)
static BSON_INLINE void
run_test (const char *name, void (*func) (void))
{
struct timeval begin;
struct timeval end;
struct timeval diff;
long usec;
double format;
fprintf (stdout, "%-42s : ", name);
fflush (stdout);
bson_gettimeofday (&begin);
func ();
bson_gettimeofday (&end);
fprintf (stdout, "PASS");
diff.tv_sec = end.tv_sec - begin.tv_sec;
diff.tv_usec = usec = end.tv_usec - begin.tv_usec;
if (usec < 0) {
diff.tv_sec -= 1;
diff.tv_usec = usec + 1000000;
}
format = diff.tv_sec + (diff.tv_usec / 1000000.0);
fprintf (stdout, " : %lf\n", format);
}
BSON_END_DECLS
#endif /* BSON_TESTS_H */
| 4,709 |
570 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import os.path as osp
import numpy as np
import cv2
from PIL import Image
import torch
import torchvision.transforms as transforms
from .model import BiSeNet
class FaceParser:
def __init__(self, device="cpu"):
mapper = [0, 1, 2, 3, 4, 5, 0, 11, 12, 0, 6, 8, 7, 9, 13, 0, 0, 10, 0]
self.device = device
self.dic = torch.tensor(mapper, device=device)
save_pth = osp.split(osp.realpath(__file__))[0] + '/resnet.pth'
net = BiSeNet(n_classes=19)
net.load_state_dict(torch.load(save_pth, map_location=device))
self.net = net.to(device).eval()
self.to_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
def parse(self, image: Image):
assert image.shape[:2] == (512, 512)
with torch.no_grad():
image = self.to_tensor(image).to(self.device)
image = torch.unsqueeze(image, 0)
out = self.net(image)[0]
parsing = out.squeeze(0).argmax(0)
parsing = torch.nn.functional.embedding(parsing, self.dic)
return parsing.float()
| 573 |
419 | #include "AnimationTarget.h"
#include "System/Animation/AnimationPose.h"
//-------------------------------------------------------------------------
namespace KRG::Animation
{
bool Target::TryGetTransform( Pose const* pPose, Transform& outTransform ) const
{
KRG_ASSERT( m_isSet );
//-------------------------------------------------------------------------
bool const isBoneTarget = IsBoneTarget();
if ( isBoneTarget )
{
KRG_ASSERT( pPose != nullptr );
auto pSkeleton = pPose->GetSkeleton();
int32 const boneIdx = pSkeleton->GetBoneIndex( m_boneID );
if ( boneIdx != InvalidIndex )
{
if ( m_hasOffsets )
{
Transform parentTransform = Transform::Identity;
// Get the local transform and the parent global transform
if ( m_isUsingBoneSpaceOffsets )
{
int32 const parentBoneIdx = pSkeleton->GetParentBoneIndex( m_boneID );
if ( parentBoneIdx != InvalidIndex )
{
parentTransform = pPose->GetGlobalTransform( parentBoneIdx );
}
outTransform = pPose->GetTransform( boneIdx );
}
else // Get the global transform for the target bone
{
outTransform = pPose->GetGlobalTransform( boneIdx );
}
//-------------------------------------------------------------------------
outTransform.SetRotation( outTransform.GetRotation() * m_transform.GetRotation() );
outTransform.SetTranslation( outTransform.GetTranslation() + m_transform.GetTranslation() );
if ( m_isUsingBoneSpaceOffsets && isBoneTarget )
{
outTransform *= parentTransform;
}
}
else
{
outTransform = pPose->GetGlobalTransform( boneIdx );
}
}
else
{
return false;
}
}
else // Just use the internal transform
{
outTransform = m_transform;
}
return true;
}
} | 1,296 |
446 | <filename>ratis-test/src/test/java/org/apache/ratis/util/TestTimeDuration.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ratis.util;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.apache.ratis.util.TimeDuration.Abbreviation;
import static org.apache.ratis.util.TimeDuration.parse;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class TestTimeDuration {
{
Log4jUtils.setLogLevel(TimeDuration.LOG, Level.DEBUG);
}
@Test(timeout = 1000)
public void testAbbreviation() {
Arrays.asList(TimeUnit.values())
.forEach(a -> assertNotNull(Abbreviation.valueOf(a.name())));
assertEquals(TimeUnit.values().length, Abbreviation.values().length);
final List<String> allSymbols = Arrays.stream(Abbreviation.values())
.map(Abbreviation::getSymbols)
.flatMap(List::stream)
.collect(Collectors.toList());
Arrays.asList(TimeUnit.values()).forEach(unit ->
allSymbols.stream()
.map(s -> "0" + s)
.forEach(s -> assertEquals(s, 0L, parse(s, unit))));
}
@Test(timeout = 1000)
public void testParse() {
assertEquals(1L, parse("1_000_000 ns", TimeUnit.MILLISECONDS));
assertEquals(10L, parse("10_000_000 nanos", TimeUnit.MILLISECONDS));
assertEquals(100L, parse("100_000_000 nanosecond", TimeUnit.MILLISECONDS));
assertEquals(1000L, parse("1_000_000_000 nanoseconds", TimeUnit.MILLISECONDS));
assertEquals(1L, parse("1000 us", TimeUnit.MILLISECONDS));
assertEquals(10L, parse("10000 μs", TimeUnit.MILLISECONDS));
assertEquals(100L, parse("100000 micros", TimeUnit.MILLISECONDS));
assertEquals(1000L, parse("1000000 microsecond", TimeUnit.MILLISECONDS));
assertEquals(10000L, parse("10000000 microseconds", TimeUnit.MILLISECONDS));
assertEquals(1L, parse("1 ms", TimeUnit.MILLISECONDS));
assertEquals(10L, parse("10 msec", TimeUnit.MILLISECONDS));
assertEquals(100L, parse("100 millis", TimeUnit.MILLISECONDS));
assertEquals(1000L, parse("1000 millisecond", TimeUnit.MILLISECONDS));
assertEquals(10000L, parse("10000 milliseconds", TimeUnit.MILLISECONDS));
assertEquals(1000L, parse("1 s", TimeUnit.MILLISECONDS));
assertEquals(10000L, parse("10 sec", TimeUnit.MILLISECONDS));
assertEquals(100000L, parse("100 second", TimeUnit.MILLISECONDS));
assertEquals(1000000L, parse("1000 seconds", TimeUnit.MILLISECONDS));
assertEquals(60, parse("1 m", TimeUnit.SECONDS));
assertEquals(600, parse("10 min", TimeUnit.SECONDS));
assertEquals(6000, parse("100 minutes", TimeUnit.SECONDS));
assertEquals(60000, parse("1000 minutes", TimeUnit.SECONDS));
assertEquals(60, parse("1 h", TimeUnit.MINUTES));
assertEquals(600, parse("10 hr", TimeUnit.MINUTES));
assertEquals(6000, parse("100 hour", TimeUnit.MINUTES));
assertEquals(60000, parse("1000 hours", TimeUnit.MINUTES));
assertEquals(24, parse("1 d", TimeUnit.HOURS));
assertEquals(240, parse("10 day", TimeUnit.HOURS));
assertEquals(2400, parse("100 days", TimeUnit.HOURS));
}
@Test(timeout = 1000)
public void testRoundUp() {
final long nanosPerSecond = 1_000_000_000L;
final TimeDuration oneSecond = TimeDuration.valueOf(1, TimeUnit.SECONDS);
assertEquals(-nanosPerSecond, oneSecond.roundUpNanos(-nanosPerSecond - 1));
assertEquals(-nanosPerSecond, oneSecond.roundUpNanos(-nanosPerSecond));
assertEquals(0, oneSecond.roundUpNanos(-nanosPerSecond + 1));
assertEquals(0, oneSecond.roundUpNanos(-1));
assertEquals(0, oneSecond.roundUpNanos(0));
assertEquals(nanosPerSecond, oneSecond.roundUpNanos(1));
assertEquals(nanosPerSecond, oneSecond.roundUpNanos(nanosPerSecond - 1));
assertEquals(nanosPerSecond, oneSecond.roundUpNanos(nanosPerSecond));
assertEquals(2*nanosPerSecond, oneSecond.roundUpNanos(nanosPerSecond + 1));
}
@Test(timeout = 1000)
public void testTo() {
final TimeDuration oneSecond = TimeDuration.valueOf(1, TimeUnit.SECONDS);
assertTo(1000, oneSecond, TimeUnit.MILLISECONDS);
final TimeDuration nanos = assertTo(1_000_000_000, oneSecond, TimeUnit.NANOSECONDS);
assertTo(1000, nanos, TimeUnit.MILLISECONDS);
assertTo(0, oneSecond, TimeUnit.MINUTES);
assertTo(0, nanos, TimeUnit.MINUTES);
final TimeDuration millis = TimeDuration.valueOf(1_999, TimeUnit.MILLISECONDS);
assertTo(1, millis, TimeUnit.SECONDS);
assertTo(0, millis, TimeUnit.MINUTES);
}
static TimeDuration assertTo(long expected, TimeDuration timeDuration, TimeUnit toUnit) {
final TimeDuration computed = timeDuration.to(toUnit);
assertEquals(expected, computed.getDuration());
assertEquals(toUnit, computed.getUnit());
return computed;
}
@Test(timeout = 1000)
public void testAddAndSubtract() {
final TimeDuration oneSecond = TimeDuration.valueOf(1, TimeUnit.SECONDS);
final TimeDuration tenSecond = TimeDuration.valueOf(10, TimeUnit.SECONDS);
{
final TimeDuration d = oneSecond.subtract(oneSecond);
assertEquals(0, d.getDuration());
assertEquals(TimeUnit.SECONDS, d.getUnit());
final TimeDuration sum = d.add(oneSecond);
assertEquals(1, sum.getDuration());
assertEquals(TimeUnit.SECONDS, sum.getUnit());
}
{
final TimeDuration d = tenSecond.subtract(oneSecond);
assertEquals(9, d.getDuration());
assertEquals(TimeUnit.SECONDS, d.getUnit());
final TimeDuration sum = d.add(oneSecond);
assertEquals(10, sum.getDuration());
assertEquals(TimeUnit.SECONDS, sum.getUnit());
}
{
final TimeDuration d = oneSecond.subtract(tenSecond);
assertEquals(-9, d.getDuration());
assertEquals(TimeUnit.SECONDS, d.getUnit());
final TimeDuration sum = d.add(tenSecond);
assertEquals(1, sum.getDuration());
assertEquals(TimeUnit.SECONDS, sum.getUnit());
}
final TimeDuration oneMS = TimeDuration.valueOf(1, TimeUnit.MILLISECONDS);
{
final TimeDuration d = oneSecond.subtract(oneMS);
assertEquals(999, d.getDuration());
assertEquals(TimeUnit.MILLISECONDS, d.getUnit());
final TimeDuration sum = d.add(oneSecond);
assertEquals(1999, sum.getDuration());
assertEquals(TimeUnit.MILLISECONDS, sum.getUnit());
}
{
final TimeDuration d = oneMS.subtract(oneSecond);
assertEquals(-999, d.getDuration());
assertEquals(TimeUnit.MILLISECONDS, d.getUnit());
final TimeDuration sum = d.add(oneSecond);
assertEquals(1, sum.getDuration());
assertEquals(TimeUnit.MILLISECONDS, sum.getUnit());
}
}
@Test(timeout = 1000)
public void testNegate() {
assertNegate(0);
assertNegate(1);
assertNegate(-1);
assertNegate(Long.MAX_VALUE);
Assert.assertEquals(
TimeDuration.valueOf(Long.MAX_VALUE, TimeUnit.SECONDS),
TimeDuration.valueOf(Long.MIN_VALUE, TimeUnit.SECONDS).negate());
}
private static void assertNegate(long n) {
Assert.assertEquals(
TimeDuration.valueOf(-n, TimeUnit.SECONDS),
TimeDuration.valueOf(n, TimeUnit.SECONDS).negate());
Assert.assertEquals(
TimeDuration.valueOf(n, TimeUnit.SECONDS),
TimeDuration.valueOf(-n, TimeUnit.SECONDS).negate());
}
@Test(timeout = 1000)
public void testMultiply() {
assertMultiply(0, TimeDuration.ONE_SECOND, TimeDuration.valueOf(0, TimeUnit.SECONDS));
assertMultiply(0.001, TimeDuration.ONE_SECOND, TimeDuration.ONE_MILLISECOND);
assertMultiply(0.001/60, TimeDuration.ONE_MINUTE, TimeDuration.ONE_MILLISECOND);
assertMultiply(100,
TimeDuration.valueOf(Long.MAX_VALUE / 10, TimeUnit.NANOSECONDS),
TimeDuration.valueOf(Long.MAX_VALUE / 100, TimeUnit.MICROSECONDS)
);
assertMultiply(1E-30,
TimeDuration.valueOf(1_000_000_000_000_000_000L, TimeUnit.DAYS),
TimeDuration.valueOf(86, TimeUnit.NANOSECONDS)
);
}
private static void assertMultiply(double multiplier, TimeDuration t, TimeDuration expected) {
assertMultiply(t , multiplier, expected);
assertMultiply(t.negate(), -multiplier, expected);
assertMultiply(t.negate(), multiplier, expected.negate());
assertMultiply(t , -multiplier, expected.negate());
}
private static void assertMultiply(TimeDuration t, double multiplier, TimeDuration expected) {
final TimeDuration computed = t.multiply(multiplier);
TimeDuration.LOG.info("assertMultiply: {} x {} = {} ?= {}\n\n", t, multiplier, computed, expected);
Assert.assertEquals(expected.getUnit(), computed.getUnit());
final long d = Math.abs(computed.getDuration() - expected.getDuration());
Assert.assertTrue(d <= Math.abs(expected.getDuration()) * TimeDuration.ERROR_THRESHOLD);
}
@Test(timeout = 1000)
public void testHigherLower() {
final TimeUnit[] units = {TimeUnit.NANOSECONDS, TimeUnit.MICROSECONDS, TimeUnit.MILLISECONDS,
TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS};
for(int i = 1; i < units.length; i++) {
assertHigherLower(units[i-1], units[i]);
}
Assert.assertSame(TimeUnit.NANOSECONDS, TimeDuration.lowerUnit(TimeUnit.NANOSECONDS));
Assert.assertSame(TimeUnit.DAYS, TimeDuration.higherUnit(TimeUnit.DAYS));
}
private static void assertHigherLower(TimeUnit lower, TimeUnit higher) {
Assert.assertSame(lower, TimeDuration.lowerUnit(higher));
Assert.assertSame(higher, TimeDuration.higherUnit(lower));
}
}
| 3,885 |
1,143 | <reponame>zhouyijiaren/commons<filename>src/python/twitter/common/java/constant.py
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from .util import javaify
from .java_types import *
"""
Parse constants as defined in
http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#20080
"""
class ConstantBase(object):
def __call__(self, constants):
return 'AnonymousConstant()'
def parse(self, data):
elements, _ = JavaNativeType.parse(data, *self.__class__.TYPES)
return elements
def size(self):
return sum(map(lambda typ: typ.size(), self.__class__.TYPES))
class ClassConstant(ConstantBase):
"""
u1 tag
u2 name_index
"""
TYPES = [u1, u2]
def __init__(self, data):
self._tag = u1(data[0:1]).get()
self._name_index = u2(data[1:3]).get()
def __call__(self, constants):
return str(constants[self._name_index].bytes())
class FieldrefConstant(ConstantBase):
"""
u1 tag
u2 class_index
u2 name_and_type_index
"""
TYPES = [u1, u2, u2]
def __init__(self, data):
self._tag, self._class_index, self._name_and_type_index = self.parse(data)
def __call__(self, constants):
return '%s.%s' % (
constants[self._class_index](constants),
constants[self._name_and_type_index](constants))
class MethodrefConstant(ConstantBase):
"""
u1 tag
u2 class_index
u2 name_and_type_index
"""
TYPES = [u1, u2, u2]
def __init__(self, data):
self._tag, self._class_index, self._name_and_type_index = self.parse(data)
def __call__(self, constants):
return '%s.%s' % (
constants[self._class_index](constants),
constants[self._name_and_type_index](constants))
class InterfaceMethodrefConstant(ConstantBase):
"""
u1 tag
u2 class_index
u2 name_and_type_index
"""
TYPES = [u1, u2, u2]
def __init__(self, data):
self._tag, self._class_index, self._name_and_type_index = self.parse(data)
def __call__(self, constants):
return '%s.%s' % (
constants[self._class_index](constants),
constants[self._name_and_type_index](constants))
class StringConstant(ConstantBase):
"""
u1 tag
u2 string_index
"""
TYPES = [u1, u2]
def __init__(self, data):
self._tag, self._string_index = self.parse(data)
class IntegerConstant(ConstantBase):
"""
u1 tag
u4 bytes
"""
TYPES = [u1, u4]
def __init__(self, data):
self._tag, self._bytes = self.parse(data)
class FloatConstant(ConstantBase):
"""
u1 tag
u4 bytes
"""
TYPES = [u1, u4]
def __init__(self, data):
self._tag, self._bytes = self.parse(data)
class LongConstant(ConstantBase):
"""
u1 tag
u4 high_bytes
u4 low_bytes
"""
TYPES = [u1, u4, u4]
def __init__(self, data):
self._tag, self._high_bytes, self._low_bytes = self.parse(data)
class DoubleConstant(ConstantBase):
"""
u1 tag
u4 high_bytes
u4 low_bytes
"""
TYPES = [u1, u4, u4]
def __init__(self, data):
self._tag, self._high_bytes, self._low_bytes = self.parse(data)
class NameAndTypeConstant(ConstantBase):
"""
u1 tag
u2 name_index
u2 descriptor_index
"""
TYPES = [u1, u2, u2]
def __init__(self, data):
self._tag, self._name_index, self._descriptor_index = self.parse(data)
def size(self):
return u1.size() + u2.size() + u2.size()
def __call__(self, constants):
return '%s.%s' % (
constants[self._name_index].bytes(),
constants[self._descriptor_index].bytes())
class Utf8Constant(ConstantBase):
"""
u1 tag
u2 length
u1 bytes[length]
"""
def __init__(self, data):
(self._tag, self._length), data = JavaNativeType.parse(data, u1, u2)
self._bytes = data[0:self._length]
def size(self):
return u1.size() + u2.size() + self._length
def bytes(self):
return self._bytes
def __str__(self):
return self._bytes
class Constant(object):
# http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#1221
CONSTANT_Class = 7
CONSTANT_Fieldref = 9
CONSTANT_Methodref = 10
CONSTANT_InterfaceMethodref = 11
CONSTANT_String = 8
CONSTANT_Integer = 3
CONSTANT_Float = 4
CONSTANT_Long = 5
CONSTANT_Double = 6
CONSTANT_NameAndType = 12
CONSTANT_Utf8 = 1
_BASE_TYPES = {
CONSTANT_Class: ClassConstant,
CONSTANT_Fieldref: FieldrefConstant,
CONSTANT_Methodref: MethodrefConstant,
CONSTANT_InterfaceMethodref: InterfaceMethodrefConstant,
CONSTANT_String: StringConstant,
CONSTANT_Integer: IntegerConstant,
CONSTANT_Float: FloatConstant,
CONSTANT_Long: LongConstant,
CONSTANT_Double: DoubleConstant,
CONSTANT_NameAndType: NameAndTypeConstant,
CONSTANT_Utf8: Utf8Constant
}
@staticmethod
def parse(data):
print('parse data[0] = %s' % data[0])
tag = u1(data[0]).get()
constant = Constant._BASE_TYPES[tag](data)
return constant
| 2,269 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.