max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
356 | import os
import unittest
import numpy as np
from mcfly import storage, modelgen
from test_tools import safe_remove
class StorageSuite(unittest.TestCase):
"""Basic test cases."""
def test_savemodel(self):
""" Test whether a dummy model is saved """
model = create_dummy_model()
storage.savemodel(model, self.path, self.modelname)
assert os.path.isfile(self.architecture_json_file_name) and os.path.isfile(self.weights_file_name)
def test_savemodel_keras(self):
""" Test whether a dummy model is saved """
model = create_dummy_model()
model.save(self.keras_model_file_path)
assert os.path.isfile(self.keras_model_file_path)
def test_loadmodel(self):
""" Test whether a dummy model can be save and then loaded """
model = create_dummy_model()
storage.savemodel(model, self.path, self.modelname)
loaded_model = storage.loadmodel(self.path, self.modelname)
assert hasattr(loaded_model, 'fit')
def setUp(self):
self.path = os.getcwd() + '/'
self.modelname = 'teststorage'
self.architecture_json_file_name = self.path + self.modelname + '_architecture.json'
self.weights_file_name = self.path + self.modelname + '_weights.npy'
self.keras_model_file_path = os.path.join(self.path, 'teststorage.h5')
def tearDown(self):
safe_remove(self.architecture_json_file_name)
safe_remove(self.weights_file_name)
safe_remove(self.keras_model_file_path)
def create_dummy_model():
np.random.seed(123)
num_time_steps = 100
num_channels = 2
num_samples_train = 5
model, _parameters, _type = modelgen.generate_models((num_samples_train, num_time_steps, num_channels), 5, 1)[0]
return model
if __name__ == '__main__':
unittest.main()
| 748 |
631 | from capstone.x86_const import *
from collections import OrderedDict
import random
def patch(pt):
for func in pt.funcs():
dis = func.dis()
pops = OrderedDict()
first = True
for ins in reversed(dis):
if ins.id in (X86_INS_RET, X86_INS_LEAVE):
continue
elif ins.id == X86_INS_POP:
reg = ins.operands[0].reg
# ignore a leading ebp pop so we don't screw up the frame restore
if not (first and reg == X86_REG_EBP):
pops[reg] = ins
first = False
else:
break
if len(pops) == 1:
ins = pops.values()[0]
name = ins.reg_name(ins.operands[0].reg)
pt.warn('Only one POP, function not hardened (reg %s)' % (name))
elif len(pops) > 1:
# TODO: bail on pc-relative loads? probably not a problem for CGC as it's not PIE
pt.info('[*] Hardening (%d) pops.' % (len(pops)))
remain = set(pops.keys())
pushes = OrderedDict()
extra = []
for ins in dis:
if ins.id == X86_INS_PUSH:
reg = ins.operands[0].reg
if reg in remain:
remain.remove(reg)
pushes[reg] = ins
continue
if not remain:
break
extra.append(ins)
regs = list(pops.keys())
new = list(regs)
while new[-1] == regs[-1]:
random.shuffle(new)
head = [pushes[reg] for reg in new] + extra
head_addr = min(ins.address for ins in head)
head_data = ''.join(str(ins.bytes) for ins in head)
tail = [pops[reg] for reg in reversed(new)]
tail_addr = min(ins.address for ins in tail)
tail_data = ''.join(str(ins.bytes) for ins in tail)
pt.patch(head_addr, raw=head_data, is_asm=True)
pt.patch(tail_addr, raw=tail_data, is_asm=True)
| 1,149 |
5,169 | {
"name": "SearchInTwitter",
"version": "1.0.0",
"summary": "this CocoaPod use to search twitter by string to get list of statuses",
"description": "Using this pod you can serch for Tweets string in Twitter and get list of statuses",
"homepage": "https://github.com/gal-m/SearchInTwitter.git",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"Gal": "<EMAIL>"
},
"source": {
"git": "https://github.com/gal-m/SearchInTwitter.git",
"tag": "1.0.0"
},
"platforms": {
"ios": "8.0"
},
"source_files": "SearchInTwitter/**/*",
"dependencies": {
"AFNetworking": [
]
}
}
| 257 |
1,127 | // Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <queue>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset8.hpp>
#include <ngraph/pass/manager.hpp>
#include <transformations/init_node_info.hpp>
#include <openvino/pass/make_stateful.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
using namespace testing;
using namespace ngraph;
using namespace opset8;
using namespace std;
std::shared_ptr<ov::Model> get_test_model(bool insert_squeeze, bool use_friendly_names) {
std::shared_ptr<ov::Model> model;
auto X = make_shared<Parameter>(element::f32, Shape{32, 1, 10});
auto Y = make_shared<Parameter>(element::f32, Shape{32, 1, 10});
if (!use_friendly_names) {
X->get_output_tensor(0).add_names({"x"});
Y->get_output_tensor(0).add_names({"y"});
} else {
X->set_friendly_name("x");
Y->set_friendly_name("y");
}
// -> Add -> Squeeze -> Result
// -> Result
// or
// -> Add -> Result
// -> Result
std::shared_ptr<Node> node;
node = make_shared<Add>(X, Y);
auto result0 = make_shared<Result>(node);
if (insert_squeeze)
node = make_shared<Squeeze>(node);
auto result1 = make_shared<Result>(node);
if (!use_friendly_names) {
result0->get_input_tensor(0).add_names({"res0"});
result1->get_input_tensor(0).add_names({"res1"});
} else {
result0->set_friendly_name("res0");
result1->set_friendly_name("res1");
}
model = make_shared<Function>(ResultVector{result0, result1}, ParameterVector{X, Y});
model->validate_nodes_and_infer_types();
return model;
}
std::shared_ptr<ov::Model> get_ref_model(bool insert_squeeze, bool use_friendly_names) {
std::shared_ptr<ov::Model> model;
// create ReadValue for X
auto variable_x = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::dynamic, "xres0"});
auto const_zero_x = make_shared<Constant>(element::f32, Shape{32, 1, 10}, 0);
auto read_val_x = make_shared<ReadValue>(const_zero_x, variable_x);
// create ReadValue for Y
auto variable_y = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::dynamic, "yres1"});
auto const_zero_y = make_shared<Constant>(element::f32, Shape{32, 1, 10}, 0);
auto read_val_y = make_shared<ReadValue>(const_zero_y, variable_y);
if (!use_friendly_names) {
read_val_x->get_output_tensor(0).add_names({"x"});
read_val_y->get_output_tensor(0).add_names({"y"});
} else {
read_val_x->set_friendly_name("x");
read_val_y->set_friendly_name("y");
}
// -> Add -> Squeeze -> Assign
// -> Assign
// or
// -> Add -> Assign
// -> Assign
shared_ptr<ov::Node> node;
node = make_shared<Add>(read_val_x, read_val_y);
auto assign_x = make_shared<Assign>(node, variable_x);
if (!use_friendly_names) {
node->get_output_tensor(0).add_names({"res0"});
} else {
node->set_friendly_name("res0");
}
if (insert_squeeze) {
node = make_shared<Squeeze>(node);
}
auto assign_y = make_shared<Assign>(node, variable_y);
if (!use_friendly_names) {
node->get_output_tensor(0).add_names({"res1"});
} else {
node->set_friendly_name("res1");
}
assign_x->add_control_dependency(read_val_x);
assign_y->add_control_dependency(read_val_y);
model = make_shared<Function>(ResultVector{}, SinkVector{assign_x, assign_y}, ParameterVector{});
model->validate_nodes_and_infer_types();
return model;
}
TEST(TransformationTests, make_stateful_by_tensor_name) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
f = get_test_model(true, false);
std::map<std::string, std::string> tensor_names = {{"x", "res0"}, {"y", "res1"}};
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::InitNodeInfo>();
manager.register_pass<ov::pass::MakeStateful>(tensor_names);
manager.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
f_ref = get_ref_model(true, false);
}
auto res = compare_functions(f, f_ref);
EXPECT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, make_stateful_by_param_res) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
f = get_test_model(true, true);
auto pairs = ov::pass::MakeStateful::ParamResPairs{{f->get_parameters()[0], f->get_results()[0]},
{f->get_parameters()[1], f->get_results()[1]}};
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::InitNodeInfo>();
manager.register_pass<ov::pass::MakeStateful>(pairs);
manager.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
f_ref = get_ref_model(true, true);
}
auto res = compare_functions(f, f_ref);
ASSERT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, make_stateful_dynamic_shapes) {
std::shared_ptr<ngraph::Function> f(nullptr);
{
// dynamic shapes are not supported
auto X = make_shared<Parameter>(element::f32, PartialShape::dynamic());
auto Y = make_shared<Parameter>(element::f32, PartialShape::dynamic());
X->get_output_tensor(0).add_names({"x"});
Y->get_output_tensor(0).add_names({"y"});
auto add = make_shared<Add>(X, Y);
auto result0 = make_shared<Result>(add);
auto result1 = make_shared<Result>(add);
result0->get_input_tensor(0).add_names({"res0"});
result1->get_input_tensor(0).add_names({"res1"});
f = make_shared<Function>(ResultVector{result0, result1}, ParameterVector{X, Y});
map<std::string, std::string> pair_names = {{"x", "res0"}, {"y", "res1"}};
f->validate_nodes_and_infer_types();
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::InitNodeInfo>();
manager.register_pass<ov::pass::MakeStateful>(pair_names);
try {
manager.run_passes(f);
} catch (::ov::AssertFailure ex) {
EXPECT_STR_CONTAINS(ex.what(), "MakeStateful transformation doesn't support dynamic shapes.");
} catch (...) {
FAIL() << "Expected ::ov::AssertFailure";
}
}
}
TEST(TransformationTests, make_stateful_one_out_to_several_results_by_tensor_names) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
f = get_test_model(false, false);
std::map<std::string, std::string> tensor_names = {{"x", "res0"}, {"y", "res1"}};
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::InitNodeInfo>();
manager.register_pass<ov::pass::MakeStateful>(tensor_names);
manager.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
f_ref = get_ref_model(false, false);
}
auto res = compare_functions(f, f_ref);
EXPECT_TRUE(res.first) << res.second;
}
TEST(TransformationTests, make_stateful_one_out_to_several_results_by_param_res) {
std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
{
f = get_test_model(false, true);
auto pairs = ov::pass::MakeStateful::ParamResPairs{{f->get_parameters()[0], f->get_results()[0]},
{f->get_parameters()[1], f->get_results()[1]}};
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::InitNodeInfo>();
manager.register_pass<ov::pass::MakeStateful>(pairs);
manager.run_passes(f);
ASSERT_NO_THROW(check_rt_info(f));
}
{
f_ref = get_ref_model(false, true);
}
auto res = compare_functions(f, f_ref);
EXPECT_TRUE(res.first) << res.second;
}
| 3,545 |
918 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.compaction.suite;
import java.util.ArrayList;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.compaction.action.CompactionCompleteAction;
import org.apache.gobblin.configuration.State;
import org.apache.gobblin.dataset.FileSystemDataset;
@Slf4j
public class TestCompactionSuites {
/**
* Test hive registration failure
*/
public static class HiveRegistrationCompactionSuite extends CompactionSuiteBase {
public HiveRegistrationCompactionSuite(State state) {
super(state);
}
public List<CompactionCompleteAction<FileSystemDataset>> getCompactionCompleteActions() {
ArrayList<CompactionCompleteAction<FileSystemDataset>> array = new ArrayList<>();
array.add((dataset) -> {
if (dataset.datasetURN().contains(TestCompactionSuiteFactories.DATASET_FAIL))
throw new RuntimeException("test-hive-registration-failure");
});
return array;
}
}
}
| 542 |
302 | #ifndef _LIBC_SYS_VFS_H
#define _LIBC_SYS_VFS_H 1
#include <sys/types.h>
#define fsid_t \
struct \
{ \
int __val[2]; \
}
struct statfs
{
__fsword_t f_type; /* Type of filesystem (see below) */
__fsword_t f_bsize; /* Optimal transfer block size */
fsblkcnt_t f_blocks; /* Total data blocks in filesystem */
fsblkcnt_t f_bfree; /* Free blocks in filesystem */
fsblkcnt_t f_bavail; /* Free blocks available to
unprivileged user */
fsfilcnt_t f_files; /* Total inodes in filesystem */
fsfilcnt_t f_ffree; /* Free inodes in filesystem */
fsid_t f_fsid; /* Filesystem ID */
__fsword_t f_namelen; /* Maximum length of filenames */
__fsword_t f_frsize; /* Fragment size (since Linux 2.6) */
__fsword_t f_flags; /* Mount flags of filesystem
(since Linux 2.6.36) */
__fsword_t f_spare[4];
/* Padding bytes reserved for future use */
};
int statfs(const char *path, struct statfs *buf);
int fstatfs(int fd, struct statfs *buf);
#endif
| 506 |
1,738 | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// Original file Copyright Crytek GMBH or its affiliates, used under license.
// Description : Basic and partial implemenation of IMFXEffect which serves as a base for concrete implementations
#ifndef _MFX_EFFECT_BASE_H_
#define _MFX_EFFECT_BASE_H_
#pragma once
#include <CryFlags.h>
#include "IMFXEffect.h"
class CMFXEffectBase
: public IMFXEffect
{
public:
CMFXEffectBase(const uint16 _typeFilterFlag);
//IMFXEffect (partial implementation)
virtual void SetCustomParameter(const char* customParameter, const SMFXCustomParamValue& customParameterValue) override;
virtual void PreLoadAssets() override;
virtual void ReleasePreLoadAssets() override;
//~IMFXEffect
bool CanExecute(const SMFXRunTimeEffectParams& params) const;
private:
CCryFlags<uint16> m_runtimeExecutionFilter;
};
typedef _smart_ptr<CMFXEffectBase> TMFXEffectBasePtr;
#endif // _MFX_EFFECT_BASE_H_
| 435 |
739 | <reponame>gregomebije1/ZtM-Job-Board
{
"id": "404acb19-6ebb-4102-b53f-eeb8aed20f48",
"name": "<NAME>",
"img": "",
"email":"",
"links": {
"website": "https://dyakov.cc",
"linkedin": "https://www.linkedin.com/in/petar-dyakov",
"github": "https://github.com/PepoDyakov"
},
"jobTitle": "Web Developer",
"location": {
"city": "Sofia",
"state": "",
"country": "Bulgaria"
}
}
| 200 |
7,524 | <reponame>sijad/oni2<gh_stars>1000+
#pragma once
CAMLprim value oni2_wrapPointer();
void *oni2_unwrapPointer(value data);
| 54 |
1,900 | <gh_stars>1000+
/*
* Copyright Terracotta, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ehcache.jsr107;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import javax.cache.Cache;
import javax.cache.event.CacheEntryCreatedListener;
import javax.cache.event.CacheEntryEvent;
import javax.cache.event.CacheEntryEventFilter;
import javax.cache.event.CacheEntryExpiredListener;
import javax.cache.event.CacheEntryListener;
import javax.cache.event.CacheEntryRemovedListener;
import javax.cache.event.CacheEntryUpdatedListener;
import javax.cache.event.EventType;
/**
* @author teck
*/
class EventListenerAdaptors {
static abstract class EventListenerAdaptor<K, V> implements org.ehcache.event.CacheEventListener<K, V> {
final CacheEntryEventFilter<K, V> filter;
final Cache<K, V> source;
final boolean requestsOld;
EventListenerAdaptor(Cache<K, V> source, CacheEntryEventFilter<K, V> filter, boolean requestsOld) {
this.source = source;
this.filter = filter;
this.requestsOld = requestsOld;
}
abstract org.ehcache.event.EventType getEhcacheEventType();
}
@SuppressWarnings("unchecked")
static <K, V> List<EventListenerAdaptor<K, V>> ehListenersFor(CacheEntryListener<? super K, ? super V> listener,
CacheEntryEventFilter<? super K, ? super V> filter, Cache<K, V> source, boolean requestsOld) {
List<EventListenerAdaptor<K, V>> rv = new ArrayList<>();
if (listener instanceof CacheEntryUpdatedListener) {
rv.add(new UpdatedAdaptor<>(source, (CacheEntryUpdatedListener<K, V>) listener,
(CacheEntryEventFilter<K, V>) filter, requestsOld));
}
if (listener instanceof CacheEntryCreatedListener) {
rv.add(new CreatedAdaptor<>(source, (CacheEntryCreatedListener<K, V>) listener,
(CacheEntryEventFilter<K, V>) filter, requestsOld));
}
if (listener instanceof CacheEntryRemovedListener) {
rv.add(new RemovedAdaptor<>(source, (CacheEntryRemovedListener<K, V>) listener,
(CacheEntryEventFilter<K, V>) filter, requestsOld));
}
if (listener instanceof CacheEntryExpiredListener) {
rv.add(new ExpiredAdaptor<>(source, (CacheEntryExpiredListener<K, V>) listener,
(CacheEntryEventFilter<K, V>) filter, requestsOld));
}
return rv;
}
private EventListenerAdaptors() {
//
}
static class UpdatedAdaptor<K, V> extends EventListenerAdaptor<K, V> {
private final CacheEntryUpdatedListener<K, V> listener;
UpdatedAdaptor(Cache<K, V> source, CacheEntryUpdatedListener<K, V> listener, CacheEntryEventFilter<K, V> filter,
boolean requestsOld) {
super(source, filter, requestsOld);
this.listener = listener;
}
@Override
org.ehcache.event.EventType getEhcacheEventType() {
return org.ehcache.event.EventType.UPDATED;
}
@SuppressWarnings("unchecked")
@Override
public void onEvent(org.ehcache.event.CacheEvent<? extends K, ? extends V> ehEvent) {
Eh107CacheEntryEvent<K, V> event = new Eh107CacheEntryEvent.NormalEvent<>(source, EventType.UPDATED, ehEvent, requestsOld);
if (filter.evaluate(event)) {
Set<?> events = Collections.singleton(event);
listener.onUpdated((Iterable<CacheEntryEvent<? extends K, ? extends V>>) events);
}
}
}
static class RemovedAdaptor<K, V> extends EventListenerAdaptor<K, V> {
private final CacheEntryRemovedListener<K, V> listener;
RemovedAdaptor(Cache<K, V> source, CacheEntryRemovedListener<K, V> listener, CacheEntryEventFilter<K, V> filter,
boolean requestsOld) {
super(source, filter, requestsOld);
this.listener = listener;
}
@Override
org.ehcache.event.EventType getEhcacheEventType() {
return org.ehcache.event.EventType.REMOVED;
}
@SuppressWarnings("unchecked")
@Override
public void onEvent(org.ehcache.event.CacheEvent<? extends K, ? extends V> ehEvent) {
Eh107CacheEntryEvent<K, V> event = new Eh107CacheEntryEvent.RemovingEvent<>(source, EventType.REMOVED, ehEvent, requestsOld);
if (filter.evaluate(event)) {
Set<?> events = Collections.singleton(event);
listener.onRemoved((Iterable<CacheEntryEvent<? extends K, ? extends V>>) events);
}
}
}
static class ExpiredAdaptor<K, V> extends EventListenerAdaptor<K, V> {
private final CacheEntryExpiredListener<K, V> listener;
ExpiredAdaptor(Cache<K, V> source, CacheEntryExpiredListener<K, V> listener, CacheEntryEventFilter<K, V> filter,
boolean requestsOld) {
super(source, filter, requestsOld);
this.listener = listener;
}
@Override
org.ehcache.event.EventType getEhcacheEventType() {
return org.ehcache.event.EventType.EXPIRED;
}
@SuppressWarnings("unchecked")
@Override
public void onEvent(org.ehcache.event.CacheEvent<? extends K, ? extends V> ehEvent) {
Eh107CacheEntryEvent<K, V> event = new Eh107CacheEntryEvent.RemovingEvent<>(source, EventType.EXPIRED, ehEvent, requestsOld);
if (filter.evaluate(event)) {
Set<?> events = Collections.singleton(event);
listener.onExpired((Iterable<CacheEntryEvent<? extends K, ? extends V>>) events);
}
}
}
static class CreatedAdaptor<K, V> extends EventListenerAdaptor<K, V> {
private final CacheEntryCreatedListener<K, V> listener;
CreatedAdaptor(Cache<K, V> source, CacheEntryCreatedListener<K, V> listener, CacheEntryEventFilter<K, V> filter,
boolean requestsOld) {
super(source, filter, requestsOld);
this.listener = listener;
}
@Override
org.ehcache.event.EventType getEhcacheEventType() {
return org.ehcache.event.EventType.CREATED;
}
@SuppressWarnings("unchecked")
@Override
public void onEvent(org.ehcache.event.CacheEvent<? extends K, ? extends V> ehEvent) {
Eh107CacheEntryEvent<K, V> event = new Eh107CacheEntryEvent.NormalEvent<>(source, EventType.CREATED, ehEvent, false);
if (filter.evaluate(event)) {
Set<?> events = Collections.singleton(event);
listener.onCreated((Iterable<CacheEntryEvent<? extends K, ? extends V>>) events);
}
}
}
}
| 2,363 |
1,609 | <filename>src/main/java/com/mossle/plm/persistence/manager/PlmProjectManager.java<gh_stars>1000+
package com.mossle.plm.persistence.manager;
import com.mossle.core.hibernate.HibernateEntityDao;
import com.mossle.plm.persistence.domain.PlmProject;
import org.springframework.stereotype.Service;
@Service
public class PlmProjectManager extends HibernateEntityDao<PlmProject> {
}
| 139 |
381 | package chen.testchat.adapter;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.SectionIndexer;
import org.pointstone.cugappplat.baseadapter.BaseWrappedAdapter;
import org.pointstone.cugappplat.baseadapter.BaseWrappedViewHolder;
import java.util.List;
import chen.testchat.R;
import chen.testchat.bean.User;
import chen.testchat.util.LogUtil;
/**
* 项目名称: TestChat
* 创建人: 陈锦军
* 创建时间: 2017/5/20 12:27
* QQ: 1981367757
*/
public class ContactsAdapter extends BaseWrappedAdapter<User, BaseWrappedViewHolder> {
private SectionIndexer mSectionIndexer;
public interface OnItemCheckListener{
public void onItemChecked(boolean isCheck,User user,BaseWrappedViewHolder holder);
}
private OnItemCheckListener mOnCheckedChangeListener;
public void setOnCheckedChangeListener(OnItemCheckListener onCheckedChangeListener) {
mOnCheckedChangeListener = onCheckedChangeListener;
}
public ContactsAdapter(List<User> data, int layoutId) {
super(data, layoutId);
}
@Override
protected void convert(final BaseWrappedViewHolder holder, final User data) {
LogUtil.e("这里getView211");
int position = holder.getAdapterPosition();
int selection = mSectionIndexer.getSectionForPosition(position);
LogUtil.e("selection:" + selection);
LogUtil.e("position:" + position);
if (position == mSectionIndexer.getPositionForSection(selection)) {
// 首节的一个用户
holder.setVisible(R.id.tv_contacts_item_bg, true)
.setVisible(R.id.iv_contacts_item_divider, false)
.setText(R.id.tv_contacts_item_bg, (String) mSectionIndexer.getSections()[selection]);
} else {
holder.setVisible(R.id.tv_contacts_item_bg, false)
.setVisible(R.id.iv_contacts_item_divider, true);
}
if (data.getNick() == null || data.getNick().equals("")) {
holder.setText(R.id.tv_fragment_contacts_item_name, data.getUsername());
} else {
holder.setText(R.id.tv_fragment_contacts_item_name, data.getNick());
}
holder.setImageUrl(R.id.iv_fragment_contacts_item_avatar, data.getAvatar());
if (mOnCheckedChangeListener != null) {
holder.setVisible(R.id.cb_fragment_contacts_item_check,true);
((CheckBox) holder.getView(R.id.cb_fragment_contacts_item_check))
.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
mOnCheckedChangeListener.onItemChecked(isChecked,data,holder);
}
});
}else {
holder.setVisible(R.id.cb_fragment_contacts_item_check,false);
}
}
public void setSectionIndexer(SectionIndexer indexer) {
this.mSectionIndexer = indexer;
}
@Override
public void addData(int position, User newData) {
if (data.contains(newData)) {
int index = data.indexOf(newData);
data.set(index, newData);
notifyDataSetChanged();
} else {
super.addData(position,newData);
}
}
}
| 2,067 |
338 | import logging
logger = logging.getLogger('raftos')
| 17 |
3,353 | //
// ========================================================================
// Copyright (c) 1995-2021 Mort Bay Consulting Pty Ltd and others.
//
// This program and the accompanying materials are made available under the
// terms of the Eclipse Public License v. 2.0 which is available at
// https://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
// which is available at https://www.apache.org/licenses/LICENSE-2.0.
//
// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
// ========================================================================
//
package org.eclipse.jetty.docs.programming.server.http2;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import org.eclipse.jetty.http.HttpFields;
import org.eclipse.jetty.http.HttpMethod;
import org.eclipse.jetty.http.HttpStatus;
import org.eclipse.jetty.http.HttpURI;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.http.MetaData;
import org.eclipse.jetty.http2.ErrorCode;
import org.eclipse.jetty.http2.api.Session;
import org.eclipse.jetty.http2.api.Stream;
import org.eclipse.jetty.http2.api.server.ServerSessionListener;
import org.eclipse.jetty.http2.frames.DataFrame;
import org.eclipse.jetty.http2.frames.HeadersFrame;
import org.eclipse.jetty.http2.frames.PushPromiseFrame;
import org.eclipse.jetty.http2.frames.ResetFrame;
import org.eclipse.jetty.http2.frames.SettingsFrame;
import org.eclipse.jetty.http2.server.RawHTTP2ServerConnectionFactory;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.util.BufferUtil;
import org.eclipse.jetty.util.Callback;
import org.eclipse.jetty.util.resource.Resource;
import static java.lang.System.Logger.Level.INFO;
@SuppressWarnings("unused")
public class HTTP2ServerDocs
{
public void setup() throws Exception
{
// tag::setup[]
// Create a Server instance.
Server server = new Server();
ServerSessionListener sessionListener = new ServerSessionListener.Adapter();
// Create a ServerConnector with RawHTTP2ServerConnectionFactory.
RawHTTP2ServerConnectionFactory http2 = new RawHTTP2ServerConnectionFactory(sessionListener);
// Configure RawHTTP2ServerConnectionFactory, for example:
// Configure the max number of concurrent requests.
http2.setMaxConcurrentStreams(128);
// Enable support for CONNECT.
http2.setConnectProtocolEnabled(true);
// Create the ServerConnector.
ServerConnector connector = new ServerConnector(server, http2);
// Add the Connector to the Server
server.addConnector(connector);
// Start the Server so it starts accepting connections from clients.
server.start();
// end::setup[]
}
public void accept()
{
// tag::accept[]
ServerSessionListener sessionListener = new ServerSessionListener.Adapter()
{
@Override
public void onAccept(Session session)
{
SocketAddress remoteAddress = session.getRemoteSocketAddress();
System.getLogger("http2").log(INFO, "Connection from {0}", remoteAddress);
}
};
// end::accept[]
}
public void preface()
{
// tag::preface[]
ServerSessionListener sessionListener = new ServerSessionListener.Adapter()
{
@Override
public Map<Integer, Integer> onPreface(Session session)
{
// Customize the settings, for example:
Map<Integer, Integer> settings = new HashMap<>();
// Tell the client that HTTP/2 push is disabled.
settings.put(SettingsFrame.ENABLE_PUSH, 0);
return settings;
}
};
// end::preface[]
}
public void request()
{
// tag::request[]
ServerSessionListener sessionListener = new ServerSessionListener.Adapter()
{
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame)
{
// This is the "new stream" event, so it's guaranteed to be a request.
MetaData.Request request = (MetaData.Request)frame.getMetaData();
// Return a Stream.Listener to handle the request events,
// for example request content events or a request reset.
return new Stream.Listener.Adapter();
}
};
// end::request[]
}
public void requestContent()
{
// tag::requestContent[]
ServerSessionListener sessionListener = new ServerSessionListener.Adapter()
{
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame)
{
MetaData.Request request = (MetaData.Request)frame.getMetaData();
// Return a Stream.Listener to handle the request events.
return new Stream.Listener.Adapter()
{
@Override
public void onData(Stream stream, DataFrame frame, Callback callback)
{
// Get the content buffer.
ByteBuffer buffer = frame.getData();
// Consume the buffer, here - as an example - just log it.
System.getLogger("http2").log(INFO, "Consuming buffer {0}", buffer);
// Tell the implementation that the buffer has been consumed.
callback.succeeded();
// By returning from the method, implicitly tell the implementation
// to deliver to this method more DATA frames when they are available.
}
};
}
};
// end::requestContent[]
}
public void response()
{
// tag::response[]
ServerSessionListener sessionListener = new ServerSessionListener.Adapter()
{
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame)
{
// Send a response after reading the request.
MetaData.Request request = (MetaData.Request)frame.getMetaData();
if (frame.isEndStream())
{
respond(stream, request);
return null;
}
else
{
return new Stream.Listener.Adapter()
{
@Override
public void onData(Stream stream, DataFrame frame, Callback callback)
{
// Consume the request content.
callback.succeeded();
if (frame.isEndStream())
respond(stream, request);
}
};
}
}
private void respond(Stream stream, MetaData.Request request)
{
// Prepare the response HEADERS frame.
// The response HTTP status and HTTP headers.
MetaData.Response response = new MetaData.Response(HttpVersion.HTTP_2, HttpStatus.OK_200, HttpFields.EMPTY);
if (HttpMethod.GET.is(request.getMethod()))
{
// The response content.
ByteBuffer resourceBytes = getResourceBytes(request);
// Send the HEADERS frame with the response status and headers,
// and a DATA frame with the response content bytes.
stream.headers(new HeadersFrame(stream.getId(), response, null, false))
.thenCompose(s -> s.data(new DataFrame(s.getId(), resourceBytes, true)));
}
else
{
// Send just the HEADERS frame with the response status and headers.
stream.headers(new HeadersFrame(stream.getId(), response, null, true));
}
}
// tag::exclude[]
private ByteBuffer getResourceBytes(MetaData.Request request)
{
return ByteBuffer.allocate(1024);
}
// end::exclude[]
};
// end::response[]
}
public void reset()
{
float maxRequestRate = 0F;
// tag::reset[]
ServerSessionListener sessionListener = new ServerSessionListener.Adapter()
{
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame)
{
float requestRate = calculateRequestRate();
if (requestRate > maxRequestRate)
{
stream.reset(new ResetFrame(stream.getId(), ErrorCode.REFUSED_STREAM_ERROR.code), Callback.NOOP);
return null;
}
else
{
// The request is accepted.
MetaData.Request request = (MetaData.Request)frame.getMetaData();
// Return a Stream.Listener to handle the request events.
return new Stream.Listener.Adapter();
}
}
// tag::exclude[]
private float calculateRequestRate()
{
return 0F;
}
// end::exclude[]
};
// end::reset[]
}
public void push() throws Exception
{
// tag::push[]
// The favicon bytes.
ByteBuffer faviconBuffer = BufferUtil.toBuffer(Resource.newResource("/path/to/favicon.ico"), true);
ServerSessionListener sessionListener = new ServerSessionListener.Adapter()
{
// By default, push is enabled.
private boolean pushEnabled = true;
@Override
public void onSettings(Session session, SettingsFrame frame)
{
// Check whether the client sent an ENABLE_PUSH setting.
Map<Integer, Integer> settings = frame.getSettings();
Integer enablePush = settings.get(SettingsFrame.ENABLE_PUSH);
if (enablePush != null)
pushEnabled = enablePush == 1;
}
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame)
{
MetaData.Request request = (MetaData.Request)frame.getMetaData();
if (pushEnabled && request.getURIString().endsWith("/index.html"))
{
// Push the favicon.
HttpURI pushedURI = HttpURI.build(request.getURI()).path("/favicon.ico");
MetaData.Request pushedRequest = new MetaData.Request("GET", pushedURI, HttpVersion.HTTP_2, HttpFields.EMPTY);
PushPromiseFrame promiseFrame = new PushPromiseFrame(stream.getId(), 0, pushedRequest);
stream.push(promiseFrame, new Stream.Listener.Adapter())
.thenCompose(pushedStream ->
{
// Send the favicon "response".
MetaData.Response pushedResponse = new MetaData.Response(HttpVersion.HTTP_2, HttpStatus.OK_200, HttpFields.EMPTY);
return pushedStream.headers(new HeadersFrame(pushedStream.getId(), pushedResponse, null, false))
.thenCompose(pushed -> pushed.data(new DataFrame(pushed.getId(), faviconBuffer, true)));
});
}
// Return a Stream.Listener to handle the request events.
return new Stream.Listener.Adapter();
}
};
// end::push[]
}
}
| 5,461 |
994 | #include <iostream>
#include <bits/stdc++.h>
using namespace std;
long long int n, d, i, j, temp, c, ans;
string s;
int main()
{
cin>>d>>n;
c=0;
ans=0;
for(i=0;i<n;i++){
cin>>s;
temp=0;
for(j=0;j<s.length();j++){
if(s[j]=='1'){
temp++;
}
else {
break;
}
}
if(temp==s.length()){
c=0;
}
else {
c++;
if(ans<c){
ans=c;
}
}
}
cout<<ans<<endl;
return 0;
}
| 404 |
372 | <filename>clients/google-api-services-androidenterprise/v1/1.31.0/com/google/api/services/androidenterprise/model/ManagedConfigurationsSettings.java
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.androidenterprise.model;
/**
* A managed configurations settings resource contains the set of managed properties that have been
* configured for an Android app to be applied to a set of users. The app's developer would have
* defined configurable properties in the managed configurations schema.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Google Play EMM API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class ManagedConfigurationsSettings extends com.google.api.client.json.GenericJson {
/**
* The last updated time of the managed configuration settings in milliseconds since
* 1970-01-01T00:00:00Z.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key @com.google.api.client.json.JsonString
private java.lang.Long lastUpdatedTimestampMillis;
/**
* The ID of the managed configurations settings.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String mcmId;
/**
* The name of the managed configurations settings.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/**
* The last updated time of the managed configuration settings in milliseconds since
* 1970-01-01T00:00:00Z.
* @return value or {@code null} for none
*/
public java.lang.Long getLastUpdatedTimestampMillis() {
return lastUpdatedTimestampMillis;
}
/**
* The last updated time of the managed configuration settings in milliseconds since
* 1970-01-01T00:00:00Z.
* @param lastUpdatedTimestampMillis lastUpdatedTimestampMillis or {@code null} for none
*/
public ManagedConfigurationsSettings setLastUpdatedTimestampMillis(java.lang.Long lastUpdatedTimestampMillis) {
this.lastUpdatedTimestampMillis = lastUpdatedTimestampMillis;
return this;
}
/**
* The ID of the managed configurations settings.
* @return value or {@code null} for none
*/
public java.lang.String getMcmId() {
return mcmId;
}
/**
* The ID of the managed configurations settings.
* @param mcmId mcmId or {@code null} for none
*/
public ManagedConfigurationsSettings setMcmId(java.lang.String mcmId) {
this.mcmId = mcmId;
return this;
}
/**
* The name of the managed configurations settings.
* @return value or {@code null} for none
*/
public java.lang.String getName() {
return name;
}
/**
* The name of the managed configurations settings.
* @param name name or {@code null} for none
*/
public ManagedConfigurationsSettings setName(java.lang.String name) {
this.name = name;
return this;
}
@Override
public ManagedConfigurationsSettings set(String fieldName, Object value) {
return (ManagedConfigurationsSettings) super.set(fieldName, value);
}
@Override
public ManagedConfigurationsSettings clone() {
return (ManagedConfigurationsSettings) super.clone();
}
}
| 1,241 |
13,111 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.skywalking.e2e.controller;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.skywalking.apm.toolkit.trace.TraceContext;
import org.slf4j.LoggerFactory;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class FileLogController {
private static final Logger LOG4J_LOGGER = Logger.getLogger("fileLogger");
private static final org.apache.logging.log4j.Logger LOG4J2_LOGGER = LogManager.getLogger("fileLogger");
private static final org.slf4j.Logger LOGBACK_LOGGER = LoggerFactory.getLogger("fileLogger");
@RequestMapping(value = "/file/logs/trigger")
public String trigger() {
LOG4J_LOGGER.info("log4j fileLogger ==> mills-> " + System.currentTimeMillis());
LOG4J2_LOGGER.info("log4j2 fileLogger ==> mills->" + System.currentTimeMillis());
LOGBACK_LOGGER.info("logback fileLogger ==> mills-> {}", System.currentTimeMillis());
return TraceContext.traceId();
}
}
| 580 |
886 | /*
* Copyright (c) 2019 flow.ci
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flowci.core.auth.helper;
import com.auth0.jwt.JWT;
import com.auth0.jwt.JWTVerifier;
import com.auth0.jwt.algorithms.Algorithm;
import com.auth0.jwt.exceptions.JWTDecodeException;
import com.auth0.jwt.exceptions.JWTVerificationException;
import com.auth0.jwt.exceptions.TokenExpiredException;
import com.auth0.jwt.interfaces.DecodedJWT;
import com.flowci.core.user.domain.User;
import com.flowci.util.StringHelper;
import java.time.Instant;
import java.util.Date;
/**
* @author yang
*/
public class JwtHelper {
private static final String issuer = "flow.ci";
/**
* Create jwt token, user email as JWT id
*/
public static String create(User user, int expiredAfterSeconds) {
Algorithm algorithm = Algorithm.HMAC256(user.getPasswordOnMd5());
Instant expired = Instant.now().plusSeconds(expiredAfterSeconds);
return JWT.create()
.withIssuer(issuer)
.withIssuedAt(Date.from(Instant.now()))
.withJWTId(user.getEmail())
.withClaim("role", user.getRole().toString())
.withExpiresAt(Date.from(expired))
.sign(algorithm);
}
/**
* Decode token and return user email
*/
public static String decode(String token) {
try {
DecodedJWT decode = JWT.decode(token);
return decode.getId();
} catch (JWTDecodeException e) {
return StringHelper.EMPTY;
}
}
public static boolean verify(String token, User user, boolean checkExpire) {
try {
Algorithm algorithm = Algorithm.HMAC256(user.getPasswordOnMd5());
JWTVerifier verifier = JWT.require(algorithm).withIssuer(issuer).build();
verifier.verify(token);
return true;
} catch (JWTVerificationException e) {
if (e instanceof TokenExpiredException) {
return !checkExpire;
}
return false;
}
}
}
| 1,060 |
375 | <filename>web/plugins/auth-username-password/src/main/java/io/lumify/web/auth/usernamepassword/UsernamePasswordWebAppPlugin.java
package io.lumify.web.auth.usernamepassword;
import com.google.inject.Inject;
import io.lumify.core.config.Configuration;
import io.lumify.miniweb.Handler;
import io.lumify.miniweb.handlers.StaticResourceHandler;
import io.lumify.core.bootstrap.InjectHelper;
import io.lumify.web.AuthenticationHandler;
import io.lumify.web.LumifyCsrfHandler;
import io.lumify.web.WebApp;
import io.lumify.web.WebAppPlugin;
import io.lumify.web.auth.usernamepassword.routes.Login;
import io.lumify.web.auth.usernamepassword.routes.ChangePassword;
import io.lumify.web.auth.usernamepassword.routes.LookupToken;
import io.lumify.web.auth.usernamepassword.routes.RequestToken;
import javax.servlet.ServletContext;
public class UsernamePasswordWebAppPlugin implements WebAppPlugin {
public static final String LOOKUP_TOKEN_ROUTE = "/forgotPassword";
public static final String CHANGE_PASSWORD_ROUTE = "/forgotPassword/changePassword";
private Configuration configuration;
@Override
public void init(WebApp app, ServletContext servletContext, Handler authenticationHandler) {
StaticResourceHandler jsHandler = new StaticResourceHandler(this.getClass(), "/username-password/authentication.js", "application/javascript");
StaticResourceHandler loginTemplateHandler = new StaticResourceHandler(this.getClass(), "/username-password/templates/login.hbs", "text/plain");
StaticResourceHandler lessHandler = new StaticResourceHandler(this.getClass(), "/username-password/less/login.less", "text/plain");
app.get("/jsc/configuration/plugins/authentication/authentication.js", jsHandler);
app.get("/jsc/configuration/plugins/authentication/templates/login.hbs", loginTemplateHandler);
app.get("/jsc/configuration/plugins/authentication/less/login.less", lessHandler);
app.post(AuthenticationHandler.LOGIN_PATH, InjectHelper.getInstance(Login.class));
ForgotPasswordConfiguration forgotPasswordConfiguration = new ForgotPasswordConfiguration();
configuration.setConfigurables(forgotPasswordConfiguration, ForgotPasswordConfiguration.CONFIGURATION_PREFIX);
configuration.set("web.ui." + ForgotPasswordConfiguration.CONFIGURATION_PREFIX + ".enabled", forgotPasswordConfiguration.isEnabled());
if (forgotPasswordConfiguration.isEnabled()) {
app.post("/forgotPassword/requestToken", RequestToken.class);
app.get(LOOKUP_TOKEN_ROUTE, LookupToken.class);
app.post(CHANGE_PASSWORD_ROUTE, ChangePassword.class);
}
}
@Inject
public void setConfiguration(Configuration configuration) {
this.configuration = configuration;
}
}
| 915 |
8,844 | <filename>test/fixtures/python/corpus/print-statement.A.py<gh_stars>1000+
print a, b
print c
print 0 or 1
print 0 or 1, 1 or 0,
| 52 |
1,374 | <reponame>norzak/jsweet<filename>core-lib/es6/src/main/java/def/dom/SVGPathSegLinetoVerticalRel.java
package def.dom;
public class SVGPathSegLinetoVerticalRel extends SVGPathSeg {
public double y;
public static SVGPathSegLinetoVerticalRel prototype;
public SVGPathSegLinetoVerticalRel(){}
}
| 106 |
15,577 | <filename>src/Server/HTTP/ReadHeaders.cpp
#include <Server/HTTP/ReadHeaders.h>
#include <IO/ReadBuffer.h>
#include <IO/ReadHelpers.h>
#include <Poco/Net/NetException.h>
namespace DB
{
void readHeaders(
Poco::Net::MessageHeader & headers, ReadBuffer & in, size_t max_fields_number, size_t max_name_length, size_t max_value_length)
{
char ch = 0; // silence uninitialized warning from gcc-*
std::string name;
std::string value;
name.reserve(32);
value.reserve(64);
size_t fields = 0;
while (true)
{
if (fields > max_fields_number)
throw Poco::Net::MessageException("Too many header fields");
name.clear();
value.clear();
/// Field name
while (in.peek(ch) && ch != ':' && !Poco::Ascii::isSpace(ch) && name.size() <= max_name_length)
{
name += ch;
in.ignore();
}
if (in.eof())
throw Poco::Net::MessageException("Field is invalid");
if (name.empty())
{
if (ch == '\r')
/// Start of the empty-line delimiter
break;
if (ch == ':')
throw Poco::Net::MessageException("Field name is empty");
}
else
{
if (name.size() > max_name_length)
throw Poco::Net::MessageException("Field name is too long");
if (ch != ':')
throw Poco::Net::MessageException(fmt::format("Field name is invalid or no colon found: \"{}\"", name));
}
in.ignore();
skipWhitespaceIfAny(in, true);
if (in.eof())
throw Poco::Net::MessageException("Field is invalid");
/// Field value - folded values not supported.
while (in.read(ch) && ch != '\r' && ch != '\n' && value.size() <= max_value_length)
value += ch;
if (in.eof())
throw Poco::Net::MessageException("Field is invalid");
if (ch == '\n')
throw Poco::Net::MessageException("No CRLF found");
if (value.size() > max_value_length)
throw Poco::Net::MessageException("Field value is too long");
skipToNextLineOrEOF(in);
Poco::trimRightInPlace(value);
headers.add(name, headers.decodeWord(value));
++fields;
}
}
}
| 1,079 |
318 | /*
Copyright 2017 <NAME>
<p>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<p>
http://www.apache.org/licenses/LICENSE-2.0
<p>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.crazysunj.domain.util;
import android.content.Context;
import com.google.gson.Gson;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import com.google.gson.reflect.TypeToken;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
/**
* @author: sunjian
* created on: 2017/9/22 下午3:45
* description: https://github.com/crazysunj/CrazyDaily
*/
public class JsonUtil {
private static Gson sGson = new Gson();
/**
* 转成Json字符串
*/
public static String objectToString(Object obj) {
return sGson.toJson(obj);
}
/**
* Json转Java对象
*/
public static <T> T fromJson(String json, Class<T> clz) {
return sGson.fromJson(json, clz);
}
/**
* Json转List集合
*/
public static <T> List<T> gsonToList(String gsonString, Class<T> clz) {
List<T> list = null;
if (sGson != null) {
list = sGson.fromJson(gsonString, new TypeToken<List<T>>() {
}.getType());
}
return list;
}
/**
* Json转List集合,遇到解析不了的,就使用这个
*/
public static <T> ArrayList<T> fromJsonList(String json, Class<T> clz) {
ArrayList<T> mList = new ArrayList<T>();
JsonArray array = new JsonParser().parse(json).getAsJsonArray();
for (final JsonElement elem : array) {
mList.add(sGson.fromJson(elem, clz));
}
return mList;
}
/**
* 格式化json字符串
*/
public static String formatJson(String jsonStr) {
if (null == jsonStr || "".equals(jsonStr)) {
return "";
}
StringBuilder sb = new StringBuilder();
char last = '\0';
char current = '\0';
int indent = 0;
for (int i = 0; i < jsonStr.length(); i++) {
last = current;
current = jsonStr.charAt(i);
//遇到{ [换行,且下一行缩进
switch (current) {
case '{':
case '[':
sb.append(current);
sb.append('\n');
indent++;
addIndentBlank(sb, indent);
break;
//遇到} ]换行,当前行缩进
case '}':
case ']':
sb.append('\n');
indent--;
addIndentBlank(sb, indent);
sb.append(current);
break;
//遇到,换行
case ',':
sb.append(current);
if (last != '\\') {
sb.append('\n');
addIndentBlank(sb, indent);
}
break;
default:
sb.append(current);
}
}
return sb.toString();
}
/**
* http 请求数据返回 json 中中文字符为 unicode 编码转汉字转码
*/
public static String decodeUnicode(String theString) {
char aChar;
int len = theString.length();
StringBuilder outBuffer = new StringBuilder(len);
for (int x = 0; x < len; ) {
aChar = theString.charAt(x++);
if (aChar == '\\') {
aChar = theString.charAt(x++);
if (aChar == 'u') {
int value = 0;
for (int i = 0; i < 4; i++) {
aChar = theString.charAt(x++);
switch (aChar) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
value = (value << 4) + aChar - '0';
break;
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
value = (value << 4) + 10 + aChar - 'a';
break;
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
value = (value << 4) + 10 + aChar - 'A';
break;
default:
throw new IllegalArgumentException(
"Malformed \\uxxxx encoding.");
}
}
outBuffer.append((char) value);
} else {
if (aChar == 't') {
aChar = '\t';
} else if (aChar == 'r') {
aChar = '\r';
} else if (aChar == 'n') {
aChar = '\n';
} else if (aChar == 'f') {
aChar = '\f';
}
outBuffer.append(aChar);
}
} else {
outBuffer.append(aChar);
}
}
return outBuffer.toString();
}
/**
* 添加space
*/
private static void addIndentBlank(StringBuilder sb, int indent) {
for (int i = 0; i < indent; i++) {
sb.append('\t');
}
}
/**
* 读取本地json文件
*/
public static String readLocalJson(Context context, String fileName) {
try {
InputStream inputStream = context.getResources().getAssets().open(fileName);
byte[] buffer = new byte[inputStream.available()];
int read = inputStream.read(buffer);
return new String(buffer, "UTF-8");
} catch (Exception e) {
return null;
}
}
}
| 4,075 |
14,668 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_WIN_SRC_APP_CONTAINER_H_
#define SANDBOX_WIN_SRC_APP_CONTAINER_H_
#include "base/files/file_path.h"
#include "base/win/scoped_handle.h"
#include "base/win/sid.h"
#include "base/win/windows_types.h"
#include "sandbox/win/src/acl.h"
namespace sandbox {
enum AppContainerType { kNone, kDerived, kProfile, kLowbox };
class AppContainer {
public:
// Increments the reference count of this object. The reference count must
// be incremented if this interface is given to another component.
virtual void AddRef() = 0;
// Decrements the reference count of this object. When the reference count
// is zero the object is automatically destroyed.
// Indicates that the caller is done with this interface. After calling
// release no other method should be called.
virtual void Release() = 0;
// Get a handle to a registry key for this package.
virtual bool GetRegistryLocation(REGSAM desired_access,
base::win::ScopedHandle* key) = 0;
// Get a folder path to a location for this package.
virtual bool GetFolderPath(base::FilePath* file_path) = 0;
// Get a pipe name usable by this AC.
virtual bool GetPipePath(const wchar_t* pipe_name,
base::FilePath* pipe_path) = 0;
// Do an access check based on this profile for a named object. If method
// returns true then access_status reflects whether access was granted and
// granted_access gives the final access rights. The object_type can be one of
// kFile or kRegistry which correspond to SE_FILE_OBJECT or SE_REGISTRY_KEY.
// See ::GetNamedSecurityInfo for more information about how the enumeration
// is used and what format object_name needs to be.
virtual bool AccessCheck(const wchar_t* object_name,
SecurityObjectType object_type,
DWORD desired_access,
DWORD* granted_access,
BOOL* access_status) = 0;
// Adds a capability by name to this profile.
virtual bool AddCapability(const wchar_t* capability_name) = 0;
// Adds a capability from a known list.
virtual bool AddCapability(base::win::WellKnownCapability capability) = 0;
// Adds a capability from a SID
virtual bool AddCapabilitySddl(const wchar_t* sddl_sid) = 0;
// Adds an impersonation capability by name to this profile.
virtual bool AddImpersonationCapability(const wchar_t* capability_name) = 0;
// Adds an impersonation capability from a known list.
virtual bool AddImpersonationCapability(
base::win::WellKnownCapability capability) = 0;
// Adds an impersonation capability from a SID
virtual bool AddImpersonationCapabilitySddl(const wchar_t* sddl_sid) = 0;
// Enable Low Privilege AC.
virtual void SetEnableLowPrivilegeAppContainer(bool enable) = 0;
virtual bool GetEnableLowPrivilegeAppContainer() = 0;
virtual AppContainerType GetAppContainerType() = 0;
};
} // namespace sandbox
#endif // SANDBOX_WIN_SRC_APP_CONTAINER_H_
| 1,056 |
1,855 | <filename>SevenZip/CPP/7zip/UI/FileManager/PropertyName.cpp
// PropertyName.cpp
#include "StdAfx.h"
#include "../../../Common/IntToString.h"
#include "LangUtils.h"
#include "PropertyName.h"
UString GetNameOfProperty(PROPID propID, const wchar_t *name)
{
if (propID < 1000)
{
UString s = LangString(1000 + propID);
if (!s.IsEmpty())
return s;
}
if (name)
return name;
wchar_t temp[16];
ConvertUInt32ToString(propID, temp);
return temp;
}
| 200 |
1,374 | <reponame>norzak/jsweet
package def.dom;
public class WebGLRenderbuffer extends WebGLObject {
public static WebGLRenderbuffer prototype;
public WebGLRenderbuffer(){}
}
| 55 |
327 | <gh_stars>100-1000
#include "sequencer.h"
#include "imgui.h"
#include "imgui_internal.h"
#include <cstdlib>
namespace ImSequencer
{
static bool SequencerAddDelButton(ImDrawList* draw_list, ImVec2 pos, bool add = true)
{
ImGuiIO& io = ImGui::GetIO();
ImRect delRect(pos, ImVec2(pos.x + 16, pos.y + 16));
bool overDel = delRect.Contains(io.MousePos);
int delColor = overDel ? 0xFFAAAAAA : 0x50000000;
float midy = pos.y + 16 / 2 - 0.5f;
float midx = pos.x + 16 / 2 - 0.5f;
draw_list->AddRect(delRect.Min, delRect.Max, delColor, 4);
draw_list->AddLine(ImVec2(delRect.Min.x + 3, midy), ImVec2(delRect.Max.x - 3, midy), delColor, 2);
if (add)
draw_list->AddLine(ImVec2(midx, delRect.Min.y + 3), ImVec2(midx, delRect.Max.y - 3), delColor, 2);
return overDel;
}
static int min(int a, int b) { return (a < b) ? a : b; }
static int max(int a, int b) { return (a > b) ? a : b; }
bool Sequencer(SequenceInterface *sequence, int *currentFrame, bool *expanded, int *selectedEntry, int *firstFrame, int sequenceOptions)
{
bool ret = false;
ImGuiIO& io = ImGui::GetIO();
int cx = (int)(io.MousePos.x);
int cy = (int)(io.MousePos.y);
int framePixelWidth = 10;
int legendWidth = 200;
static int movingEntry = -1;
static int movingPos = -1;
static int movingPart = -1;
int delEntry = -1;
int dupEntry = -1;
int ItemHeight = 20;
bool popupOpened = false;
ImGui::BeginGroup();
ImDrawList* draw_list = ImGui::GetWindowDrawList();
ImVec2 canvas_pos = ImGui::GetCursorScreenPos(); // ImDrawList API uses screen coordinates!
ImVec2 canvas_size = ImGui::GetContentRegionAvail(); // Resize canvas to what's available
static const int scrollBarHeight = 14;
int firstFrameUsed = firstFrame ? *firstFrame : 0;
int sequenceCount = sequence->GetItemCount();
int controlHeight = (sequenceCount + 1) * ItemHeight;
int frameCount = sequence->GetFrameCount();
if (expanded && !*expanded)
{
ImGui::InvisibleButton("canvas", ImVec2(canvas_size.x - canvas_pos.x, (float)ItemHeight));
draw_list->AddRectFilled(canvas_pos, ImVec2(canvas_size.x + canvas_pos.x, canvas_pos.y + ItemHeight), 0xFF3D3837, 0);
char tmps[512];
snprintf(tmps, sizeof(tmps), "%d Frames / %d entries", frameCount, sequenceCount);
draw_list->AddText(ImVec2(canvas_pos.x + 26, canvas_pos.y + 2), 0xFFFFFFFF, tmps);
}
else
{
bool hasScrollBar(false);
int framesPixelWidth = frameCount * framePixelWidth;
if ((framesPixelWidth + legendWidth) >= canvas_size.x)
{
hasScrollBar = true;
controlHeight += scrollBarHeight;
}
ImRect backgroundRect(canvas_pos, ImVec2(canvas_pos.x + canvas_size.x, canvas_pos.y + controlHeight));
ImGui::InvisibleButton("canvas", ImVec2(canvas_size.x, (float)controlHeight));
// full background
draw_list->AddRectFilled(backgroundRect.Min, backgroundRect.Max, 0xFF262222, 0);
// current frame top
ImRect topRect(ImVec2(canvas_pos.x + legendWidth, canvas_pos.y), ImVec2(canvas_size.x, canvas_pos.y + ItemHeight));
if (sequenceOptions&SEQUENCER_CHANGE_FRAME && currentFrame && *currentFrame >= 0 && topRect.Contains(io.MousePos) && io.MouseDown[0])
{
*currentFrame = (int)(io.MousePos.x - topRect.Min.x) / framePixelWidth;
if (*currentFrame < 0)
*currentFrame = 0;
if (*currentFrame >= frameCount)
*currentFrame = frameCount - 1;
}
//header
draw_list->AddRectFilled(canvas_pos, ImVec2(canvas_size.x + canvas_pos.x, canvas_pos.y + ItemHeight), 0xFF3D3837, 0);
if (sequenceOptions&SEQUENCER_ADD)
{
if (SequencerAddDelButton(draw_list, ImVec2(canvas_pos.x + legendWidth - ItemHeight, canvas_pos.y + 2), true) && io.MouseReleased[0])
ImGui::OpenPopup("addEntry");
if (ImGui::BeginPopup("addEntry"))
{
for (int i = 0; i < sequence->GetItemTypeCount(); i++)
if (ImGui::Selectable(sequence->GetItemTypeName(i)))
{
sequence->Add(i);
*selectedEntry = sequence->GetItemCount() - 1;
}
ImGui::EndPopup();
popupOpened = true;
}
}
for (int i = 0; i < sequenceCount; i++)
{
int type;
sequence->Get(i, NULL, NULL, &type, NULL);
ImVec2 tpos(canvas_pos.x + 3, canvas_pos.y + (i + 1) * ItemHeight + 2);
draw_list->AddText(tpos, 0xFFFFFFFF, sequence->GetItemLabel(i));
if (sequenceOptions&SEQUENCER_DEL)
{
bool overDel = SequencerAddDelButton(draw_list, ImVec2(canvas_pos.x + legendWidth - ItemHeight + 2 - 10, tpos.y + 2), false);
if (overDel && io.MouseReleased[0])
delEntry = i;
bool overDup = SequencerAddDelButton(draw_list, ImVec2(canvas_pos.x + legendWidth - ItemHeight - ItemHeight + 2 - 10, tpos.y + 2), true);
if (overDup && io.MouseReleased[0])
dupEntry = i;
}
}
// clipping rect so items bars are not visible in the legend on the left when scrolled
draw_list->PushClipRect(ImVec2(canvas_pos.x + legendWidth, canvas_pos.y), ImVec2(canvas_pos.x + canvas_size.x, canvas_pos.y + controlHeight));
// slots background
for (int i = 0; i < sequenceCount; i++)
{
unsigned int col = (i & 1) ? 0xFF3A3636 : 0xFF413D3D;
ImVec2 pos = ImVec2(canvas_pos.x + legendWidth, canvas_pos.y + ItemHeight * (i + 1) + 1);
ImVec2 sz = ImVec2(canvas_size.x + canvas_pos.x, pos.y + ItemHeight - 1);
if (!popupOpened && cy >= pos.y && cy < pos.y + ItemHeight && movingEntry == -1 && cx>canvas_pos.x && cx < canvas_pos.x + canvas_size.x)
{
col += 0x80201008;
pos.x -= legendWidth;
}
draw_list->AddRectFilled(pos, sz, col, 0);
}
for (int i = 0; i <= frameCount; i++)
{
bool baseIndex = ((i % 10) == 0) || (i == frameCount);
bool halfIndex = (i % 5) == 0;
int px = (int)canvas_pos.x + i * framePixelWidth + legendWidth - firstFrameUsed * framePixelWidth;
int tiretStart = baseIndex ? 4 : (halfIndex ? 10 : 14);
int tiretEnd = baseIndex ? controlHeight : ItemHeight;
if (px <= (canvas_size.x + canvas_pos.x) && px >= (canvas_pos.x + legendWidth))
{
draw_list->AddLine(ImVec2((float)px, canvas_pos.y + (float)tiretStart), ImVec2((float)px, canvas_pos.y + (float)tiretEnd - 1), 0xFF606060, 1);
draw_list->AddLine(ImVec2((float)px, canvas_pos.y + (float)ItemHeight), ImVec2((float)px, canvas_pos.y + (float)controlHeight - 1), 0x30606060, 1);
}
if (baseIndex)
{
char tmps[512];
snprintf(tmps, sizeof(tmps), "%d", (i == frameCount) ? i : (i / 10));
draw_list->AddText(ImVec2((float)px + 3.f, canvas_pos.y), 0xFFBBBBBB, tmps);
}
}
draw_list->AddLine(canvas_pos, ImVec2(canvas_pos.x, canvas_pos.y + controlHeight), 0xFF000000, 1);
draw_list->AddLine(ImVec2(canvas_pos.x, canvas_pos.y + ItemHeight), ImVec2(canvas_size.x, canvas_pos.y + ItemHeight), 0xFF000000, 1);
// selection
bool selected = selectedEntry && (*selectedEntry >= 0);
if (selected)
{
draw_list->AddRectFilled(ImVec2(canvas_pos.x, canvas_pos.y + ItemHeight * (*selectedEntry + 1)), ImVec2(canvas_pos.x + canvas_size.x, canvas_pos.y + ItemHeight * (*selectedEntry + 2)), 0x801080FF, 1.f);
}
// slots
for (int i = 0; i < sequenceCount; i++)
{
int *start, *end;
unsigned int color;
sequence->Get(i, &start, &end, NULL, &color);
ImVec2 pos = ImVec2(canvas_pos.x + legendWidth - firstFrameUsed * framePixelWidth, canvas_pos.y + ItemHeight * (i + 1) + 1);
ImVec2 slotP1(pos.x + *start * framePixelWidth, pos.y + 2);
ImVec2 slotP2(pos.x + *end * framePixelWidth + framePixelWidth, pos.y + ItemHeight - 2);
unsigned int slotColor = color | 0xFF000000;
if (slotP1.x <= (canvas_size.x + canvas_pos.x) && slotP1.x >= (canvas_pos.x + legendWidth))
{
draw_list->AddRectFilled(slotP1, slotP2, slotColor, 2);
}
ImRect rects[3] = { ImRect(slotP1, ImVec2(slotP1.x + framePixelWidth / 2, slotP2.y))
, ImRect(ImVec2(slotP2.x - framePixelWidth / 2, slotP1.y), slotP2)
, ImRect(slotP1, slotP2) };
const unsigned int quadColor[] = { 0xFFFFFFFF, 0xFFFFFFFF, slotColor + (selected ? 0 : 0x202020) };
if (movingEntry == -1 && (sequenceOptions&SEQUENCER_EDIT_STARTEND) && backgroundRect.Contains(io.MousePos))
{
for (int j = 2; j >= 0; j--)
{
ImRect& rc = rects[j];
if (!rc.Contains(io.MousePos))
continue;
draw_list->AddRectFilled(rc.Min, rc.Max, quadColor[j], 2);
}
for (int j = 0; j < 3; j++)
{
ImRect& rc = rects[j];
if (!rc.Contains(io.MousePos))
continue;
if (io.MouseDown[0])
{
movingEntry = i;
movingPos = cx;
movingPart = j + 1;
break;
}
}
}
}
//ImGui::PopClipRect();
// moving
if (backgroundRect.Contains(io.MousePos) && movingEntry >= 0)
{
ImGui::CaptureMouseFromApp();
int diffFrame = (cx - movingPos) / framePixelWidth;
if (std::abs(diffFrame) > 0)
{
int *start, *end;
sequence->Get(movingEntry, &start, &end, NULL, NULL);
int & l = *start;
int & r = *end;
if (movingPart & 1)
l += diffFrame;
if (movingPart & 2)
r += diffFrame;
if (l < 0)
{
if (movingPart & 2)
r -= l;
l = 0;
}
if (movingPart & 1 && l > r)
l = r;
if (movingPart & 2 && r < l)
r = l;
movingPos += diffFrame * framePixelWidth;
}
if (!io.MouseDown[0])
{
// single select
if (!diffFrame && movingPart && selectedEntry)
{
*selectedEntry = movingEntry;
ret = true;
}
movingEntry = -1;
}
}
// cursor
if (currentFrame && *currentFrame >= 0)
{
float cursorOffset = canvas_pos.x + legendWidth + *currentFrame * framePixelWidth + framePixelWidth / 2;
draw_list->AddLine(ImVec2(cursorOffset, canvas_pos.y), ImVec2(cursorOffset, canvas_pos.y + controlHeight), 0x402A2AFF, 4);
}
draw_list->PopClipRect();
// copy paste
if (sequenceOptions&SEQUENCER_COPYPASTE)
{
ImRect rectCopy(ImVec2(canvas_pos.x + 100, canvas_pos.y + 2)
, ImVec2(canvas_pos.x + 100 + 30, canvas_pos.y + ItemHeight - 2));
bool inRectCopy = rectCopy.Contains(io.MousePos);
unsigned int copyColor = inRectCopy ? 0xFF1080FF : 0xFF000000;
draw_list->AddText(rectCopy.Min, copyColor, "Copy");
ImRect rectPaste(ImVec2(canvas_pos.x + 140, canvas_pos.y + 2)
, ImVec2(canvas_pos.x + 140 + 30, canvas_pos.y + ItemHeight - 2));
bool inRectPaste = rectPaste.Contains(io.MousePos);
unsigned int pasteColor = inRectPaste ? 0xFF1080FF : 0xFF000000;
draw_list->AddText(rectPaste.Min, pasteColor, "Paste");
if (inRectCopy && io.MouseReleased[0])
{
sequence->Copy();
}
if (inRectPaste && io.MouseReleased[0])
{
sequence->Paste();
}
}
//
if (hasScrollBar)
{
int scrollBarStartHeight = controlHeight - scrollBarHeight;
// ratio = number of frames visible in control / number to total frames
int visibleFrameCount = (int)floorf((canvas_size.x - legendWidth) / framePixelWidth);
float barWidthRatio = visibleFrameCount / (float)frameCount;
float barWidthInPixels = barWidthRatio * (canvas_size.x - legendWidth);
float startFrameOffset = ((float)firstFrameUsed / (float)frameCount) * (canvas_size.x - legendWidth);
ImVec2 scrollBarA(canvas_pos.x + legendWidth, canvas_pos.y + scrollBarStartHeight);
ImVec2 scrollBarB(canvas_pos.x + legendWidth + canvas_size.x, canvas_pos.y + controlHeight);
draw_list->AddRectFilled(scrollBarA, scrollBarB, 0xFF222222, 0);
ImRect scrollBarRect(scrollBarA, scrollBarB);
bool inScrollBar = scrollBarRect.Contains(io.MousePos);
if (inScrollBar && io.MouseDown[0] && firstFrame)
{
*firstFrame = (int)(frameCount * ((io.MousePos.x - (float)legendWidth - canvas_pos.x) / (canvas_size.x - legendWidth)));
*firstFrame = max(min(*firstFrame - visibleFrameCount / 2, frameCount - visibleFrameCount), 0);
}
ImVec2 scrollBarC(canvas_pos.x + legendWidth + startFrameOffset, canvas_pos.y + scrollBarStartHeight + 2);
ImVec2 scrollBarD(canvas_pos.x + legendWidth + barWidthInPixels + startFrameOffset, canvas_pos.y + controlHeight - 2);
draw_list->AddRectFilled(scrollBarC, scrollBarD, inScrollBar ? 0xFF606060 : 0xFF505050, 2);
}
}
ImGui::EndGroup();
if (expanded)
{
bool overExpanded = SequencerAddDelButton(draw_list, ImVec2(canvas_pos.x + 2, canvas_pos.y + 2), !*expanded);
if (overExpanded && io.MouseReleased[0])
*expanded = !*expanded;
}
if (delEntry != -1)
{
sequence->Del(delEntry);
if (selectedEntry && (*selectedEntry == delEntry || *selectedEntry >= sequence->GetItemCount()))
*selectedEntry = -1;
}
if (dupEntry != -1)
{
sequence->Duplicate(dupEntry);
}
return ret;
}
}
//
// ImSequencer demo interface
//
//
static const char* SequencerItemTypeNames[] = { "Camera","Music", "ScreenEffect", "FadeIn", "Animation" };
struct MySequence : public ImSequencer::SequenceInterface
{
// interface with sequencer
virtual int GetFrameCount() const { return mFrameCount; }
virtual int GetItemCount() const { return (int)myItems.size(); }
virtual int GetItemTypeCount() const { return sizeof(SequencerItemTypeNames)/sizeof(char*); }
virtual const char *GetItemTypeName(int typeIndex) const { return SequencerItemTypeNames[typeIndex]; }
virtual const char *GetItemLabel(int index) const
{
static char tmps[512];
sprintf_s(tmps, "[%02d] %s", index, SequencerItemTypeNames[myItems[index].mType]);
return tmps;
}
virtual void Get(int index, int** start, int** end, int *type, unsigned int *color)
{
MySequenceItem &item = myItems[index];
if (color)
*color = 0xFFAA8080; // same color for everyone, return color based on type
if (start)
*start = &item.mFrameStart;
if (end)
*end = &item.mFrameEnd;
if (type)
*type = item.mType;
}
virtual void Add(int type) { myItems.push_back(MySequenceItem{ type, 0, 10 }); };
virtual void Del(int index) { myItems.erase(myItems.begin() + index); }
virtual void Duplicate(int index) { myItems.push_back(myItems[index]); }
// my datas
MySequence() : mFrameCount(0) {}
int mFrameCount;
struct MySequenceItem
{
int mType;
int mFrameStart, mFrameEnd;
};
std::vector<MySequenceItem> myItems;
};
void sequencer_demo() {
// sequence with default values
static MySequence mySequence;
if( !mySequence.mFrameCount ) {
mySequence.mFrameCount = 100;
mySequence.myItems.push_back(MySequence::MySequenceItem{ 0, 10, 30 });
mySequence.myItems.push_back(MySequence::MySequenceItem{ 1, 20, 30 });
mySequence.myItems.push_back(MySequence::MySequenceItem{ 3, 12, 60 });
mySequence.myItems.push_back(MySequence::MySequenceItem{ 2, 61, 90 });
mySequence.myItems.push_back(MySequence::MySequenceItem{ 4, 90, 99 });
}
// let's create the sequencer
static int selectedEntry = -1;
static int firstFrame = 0;
static bool expanded = true;
//ImGui::SetNextWindowPos(ImVec2(10, 350));
//ImGui::SetNextWindowSize(ImVec2(740, 380));
if( ImGui::Begin("Sequencer") ) {
ImGui::InputInt("Frame count", &mySequence.mFrameCount);
Sequencer(&mySequence, NULL, &expanded, &selectedEntry, &firstFrame, ImSequencer::SEQUENCER_EDIT_STARTEND | ImSequencer::SEQUENCER_ADD | ImSequencer::SEQUENCER_DEL | ImSequencer::SEQUENCER_COPYPASTE);
// add a UI to edit that particular item
if (selectedEntry != -1)
{
MySequence::MySequenceItem &item = mySequence.myItems[selectedEntry];
ImGui::Text("I am a %s, please edit me", SequencerItemTypeNames[item.mType]);
// switch (type) ....
}
}
ImGui::End();
}
#if 0
// using nem0's would be:
static bool show = 1;
ImGui::Begin("Timeline", &show);
const float timescale = 600.f;
ImGui::BeginTimeline("Hello", timescale);
static float v1[] = {25, 75}; ImGui::TimelineEvent("spawn particle", v1);
static float v2[] = {50, 75}; ImGui::TimelineEvent("run script", v2);
static float v3[] = {15, 55}; ImGui::TimelineEvent("play sound##1", v3);
static float v4[] = {35, 95}; ImGui::TimelineEvent("play bgm##2", v4);
static float time[] = {0, 1};
ImGui::EndTimeline( timeloop(60) /*, time*/ );
auto t = timeloop(60);
// if( t >= v3[0] && t <= v3[1] ) audio_sfx();
// if( t >= v4[0] && t <= v4[1] ) audio_bgm();
ImGui::End();
#endif
| 10,319 |
1,738 | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// Original file Copyright Crytek GMBH or its affiliates, used under license.
#include "StdAfx.h"
#include "MaterialHelpers.h"
#include "StringHelpers.h"
#include "PathHelpers.h"
#include "properties.h"
MaterialHelpers::MaterialInfo::MaterialInfo()
{
this->id = -1;
this->name = "";
this->physicalize = "None";
this->diffuseTexture = "";
this->diffuseColor[0] = this->diffuseColor[1] = this->diffuseColor[2] = 1.0f;
this->specularColor[0] = this->specularColor[1] = this->specularColor[2] = 1.0f;
this->emissiveColor[0] = this->emissiveColor[1] = this->emissiveColor[2] = 0.0f;
}
std::string MaterialHelpers::PhysicsIDToString(const int physicsID)
{
switch (physicsID)
{
case 1:
return "Default";
break;
case 2:
return "ProxyNoDraw";
break;
case 3:
return "NoCollide";
break;
case 4:
return "Obstruct";
break;
default:
return "None";
break;
}
}
bool MaterialHelpers::WriteMaterials(const std::string& filename, const std::vector<MaterialInfo>& materialList)
{
FILE* materialFile = fopen(filename.c_str(), "w");
if (materialFile)
{
fprintf(materialFile, "<Material MtlFlags=\"524544\" >\n");
fprintf(materialFile, " <SubMaterials>\n");
for (int i = 0; i < materialList.size(); i++)
{
const MaterialInfo& material = materialList[i];
fprintf(materialFile, " <Material Name=\"%s\" ", material.name.c_str());
if (strcmp(material.physicalize.c_str(), "ProxyNoDraw") == 0)
{
fprintf(materialFile, "MtlFlags=\"1152\" Shader=\"Nodraw\" GenMask=\"0\" ");
}
else
{
fprintf(materialFile, "MtlFlags=\"524416\" Shader=\"Illum\" GenMask=\"100000000\" ");
}
fprintf(materialFile, "SurfaceType=\"\" MatTemplate=\"\" ");
fprintf(materialFile, "Diffuse=\"%f,%f,%f\" ", material.diffuseColor[0], material.diffuseColor[1], material.diffuseColor[2]);
fprintf(materialFile, "Specular=\"%f,%f,%f\" ", material.specularColor[0], material.specularColor[1], material.specularColor[2]);
fprintf(materialFile, "Emissive=\"%f,%f,%f\" ", material.emissiveColor[0], material.emissiveColor[1], material.emissiveColor[2]);
fprintf(materialFile, "Shininess=\"10\" ");
fprintf(materialFile, "Opacity=\"1\" ");
fprintf(materialFile, ">\n");
fprintf(materialFile, " <Textures>\n");
// Write out diffuse texture.
if (material.diffuseTexture.length() > 0)
{
//fprintf( materialFile, " <Texture Map=\"Diffuse\" File=\"%s\" >\n", ProcessTexturePath( material.diffuseTexture ).c_str() );
fprintf(materialFile, " <Texture Map=\"Diffuse\" File=\"%s\" >\n", material.diffuseTexture.c_str());
fprintf(materialFile, " <TexMod />\n");
fprintf(materialFile, " </Texture>\n");
}
fprintf(materialFile, " </Textures>\n");
fprintf(materialFile, " </Material>\n");
}
fprintf(materialFile, " </SubMaterials>\n");
fprintf(materialFile, "</Material>\n");
fclose(materialFile);
return true;
}
else
{
return false;
}
} | 1,731 |
2,171 | <filename>src/test/unit/io/cmd_line_test.cpp
#include <stan/io/cmd_line.hpp>
#include <gtest/gtest.h>
#include <test/unit/util.hpp>
TEST(io_cmd_line, cmd_line_0) {
int argc = 0;
const char* argv[1];
argv[0] = "foo";
stan::io::cmd_line cl(argc, argv);
EXPECT_FALSE(cl.has_key("bar"));
std::string y;
EXPECT_FALSE(cl.val<std::string>("bar", y));
}
TEST(io_cmd_line, cmd_line_string) {
int argc = 2;
const char* argv[2];
argv[0] = "prog";
argv[1] = "--foo=bar";
stan::io::cmd_line cl(argc, argv);
std::string x;
EXPECT_TRUE(cl.val<std::string>("foo", x));
EXPECT_EQ("bar", x);
}
TEST(io_cmd_line, cmd_line_int) {
int argc = 6;
const char* argv[6];
argv[0] = "prog.exe";
argv[1] = "--foo=bar";
argv[2] = "--bz";
argv[3] = "--bing=3";
argv[4] = "ahoy";
argv[5] = "17";
stan::io::cmd_line cl(argc, argv);
EXPECT_EQ("prog.exe", cl.command());
std::string x;
EXPECT_TRUE(cl.val<std::string>("foo", x));
EXPECT_EQ("bar", x);
unsigned int y;
EXPECT_TRUE(cl.val<unsigned int>("bing", y));
EXPECT_EQ(3U, y);
EXPECT_TRUE(cl.has_flag("bz"));
EXPECT_FALSE(cl.has_flag("bza"));
EXPECT_FALSE(cl.val<unsigned int>("bongo", y));
EXPECT_EQ(2U, cl.bare_size());
std::string z;
EXPECT_TRUE(cl.bare<std::string>(0, z));
EXPECT_EQ("ahoy", z);
EXPECT_TRUE(cl.bare<unsigned int>(1, y));
EXPECT_EQ(17U, y);
double aaa;
EXPECT_FALSE(cl.bare<double>(2, aaa));
}
TEST(io_cmd_line, spaces) {
int argc = 3;
const char* argv[3];
argv[0] = "prog";
argv[1] = "arg 1";
argv[2] = "--foo=arg 2";
stan::io::cmd_line cl(argc, argv);
std::string x;
EXPECT_EQ(1U, cl.bare_size());
EXPECT_TRUE(cl.bare<std::string>(0, x));
EXPECT_EQ("arg 1", x);
std::string y;
EXPECT_TRUE(cl.val<std::string>("foo", y));
EXPECT_EQ("arg 2", y);
}
TEST(io_cmd_line, pad_help_option) {
stan::test::capture_std_streams();
EXPECT_NO_THROW(stan::io::pad_help_option(0, "foo", 2));
std::stringstream out;
EXPECT_NO_THROW(stan::io::pad_help_option(&out, "foo", 2));
EXPECT_EQ(" foo\n ", out.str());
stan::test::reset_std_streams();
EXPECT_EQ("", stan::test::cout_ss.str());
EXPECT_EQ("", stan::test::cerr_ss.str());
}
| 1,058 |
835 | from ..._protos.public.registry import RegistryService_pb2
from . import _LockLevel
class Open(_LockLevel):
"""
Changes to the model version are allowed.
Examples
--------
.. code-block:: python
from verta.registry import lock
reg_model = client.create_registered_model("My Model", workspace="my-org")
model_ver = reg_model.create_version("My Model v0", lock_level=lock.Open())
# equivalently:
# reg_model.create_version("My Model v0", lock_level=lock.open)
"""
_LOCK_LEVEL = RegistryService_pb2.ModelVersionLockLevelEnum.ModelVersionLockLevel.OPEN
| 225 |
743 | package pl.allegro.tech.hermes.integration;
import com.jayway.awaitility.Duration;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import pl.allegro.tech.hermes.api.OAuthProvider;
import pl.allegro.tech.hermes.api.Subscription;
import pl.allegro.tech.hermes.api.SubscriptionOAuthPolicy;
import pl.allegro.tech.hermes.api.Topic;
import pl.allegro.tech.hermes.test.helper.builder.SubscriptionBuilder;
import pl.allegro.tech.hermes.test.helper.oauth.server.OAuthTestServer;
import javax.ws.rs.core.Response;
import java.io.IOException;
import static javax.ws.rs.core.Response.Status.CREATED;
import static pl.allegro.tech.hermes.api.SubscriptionOAuthPolicy.clientCredentialsGrantOAuthPolicy;
import static pl.allegro.tech.hermes.api.SubscriptionOAuthPolicy.passwordGrantOAuthPolicy;
import static pl.allegro.tech.hermes.integration.test.HermesAssertions.assertThat;
import static pl.allegro.tech.hermes.test.helper.builder.OAuthProviderBuilder.oAuthProvider;
public class OAuthIntegrationTest extends IntegrationTest {
private OAuthTestServer oAuthTestServer;
SubscriptionOAuthPolicy usernamePasswordOAuthPolicy = passwordGrantOAuthPolicy("provider1")
.withUsername("testUser1")
.withPassword("<PASSWORD>")
.build();
SubscriptionOAuthPolicy clientCredentialsOAuthPolicy = clientCredentialsGrantOAuthPolicy("provider1")
.build();
@BeforeClass
public void initialize() throws IOException {
oAuthTestServer = new OAuthTestServer();
oAuthTestServer.start();
oAuthTestServer.registerClient("client1", "secret1");
oAuthTestServer.registerResourceOwner("testUser1", "password1");
OAuthProvider provider = oAuthProvider("provider1")
.withTokenEndpoint(oAuthTestServer.getTokenEndpoint())
.withClientId("client1")
.withClientSecret("secret1")
.withRequestTimeout(10 * 1000)
.withTokenRequestInitialDelay(100)
.withTokenRequestMaxDelay(10000)
.build();
operations.createOAuthProvider(provider);
}
@AfterClass
public void tearDown() {
oAuthTestServer.stop();
}
@BeforeMethod
public void initializeAlways() {
oAuthTestServer.revokeAllTokens();
oAuthTestServer.clearResourceAccessCounters();
oAuthTestServer.clearTokenIssueCounters();
}
@Test
public void shouldSendMessageToUsernamePasswordGrantOAuthSecuredEndpoint() {
// given
Topic topic = operations.buildTopic("publishAndConsumeOAuthGroup", "topic1");
Subscription subscription = SubscriptionBuilder.subscription(topic, "subscription1")
.withEndpoint(oAuthTestServer.getUsernamePasswordSecuredResourceEndpoint("testUser1"))
.withOAuthPolicy(usernamePasswordOAuthPolicy).build();
operations.createSubscription(topic, subscription);
// when
Response response = publisher.publish(topic.getQualifiedName(), "hello world");
assertThat(response).hasStatus(CREATED);
// then
wait.awaitAtMost(Duration.TEN_SECONDS).until(() -> oAuthTestServer.getResourceAccessCount("testUser1") == 1);
}
@Test
public void shouldInvalidateRevokedAndRequestNewTokenForUsernamePasswordGrantOAuthSecuredEndpoint() {
// given
Topic topic = operations.buildTopic("publishAndConsumeOAuthGroup2", "topic2");
Subscription subscription = SubscriptionBuilder.subscription(topic, "subscription2")
.withEndpoint(oAuthTestServer.getUsernamePasswordSecuredResourceEndpoint("testUser1"))
.withOAuthPolicy(usernamePasswordOAuthPolicy).build();
operations.createSubscription(topic, subscription);
// when
Response response = publisher.publish(topic.getQualifiedName(), "hello world");
assertThat(response).hasStatus(CREATED);
// then
wait.awaitAtMost(Duration.TEN_SECONDS).until(() -> oAuthTestServer.getResourceAccessCount("testUser1") == 1);
// and when
oAuthTestServer.revokeAllTokens();
response = publisher.publish(topic.getQualifiedName(), "hello again");
assertThat(response).hasStatus(CREATED);
// then
wait.awaitAtMost(Duration.TEN_SECONDS).until(() -> oAuthTestServer.getResourceAccessCount("testUser1") == 2);
}
@Test
public void shouldSendMessageToClientCredentialsGrantOAuthSecuredEndpoint() {
// given
Topic topic = operations.buildTopic("publishAndConsumeOAuthGroup", "topic3");
Subscription subscription = SubscriptionBuilder.subscription(topic, "subscription3")
.withEndpoint(oAuthTestServer.getClientCredentialsSecuredResourceEndpoint("client1"))
.withOAuthPolicy(clientCredentialsOAuthPolicy).build();
operations.createSubscription(topic, subscription);
// when
Response response = publisher.publish(topic.getQualifiedName(), "hello world");
assertThat(response).hasStatus(CREATED);
// then
wait.awaitAtMost(Duration.TEN_SECONDS).until(() -> oAuthTestServer.getResourceAccessCount("client1") == 1);
}
@Test
public void shouldInvalidateRevokedAndRequestNewTokenForClientCredentialsGrantSecuredEndpoint() {
// given
Topic topic = operations.buildTopic("publishAndConsumeOAuthGroup", "topic4");
Subscription subscription = SubscriptionBuilder.subscription(topic, "subscription4")
.withEndpoint(oAuthTestServer.getClientCredentialsSecuredResourceEndpoint("client1"))
.withOAuthPolicy(clientCredentialsOAuthPolicy).build();
operations.createSubscription(topic, subscription);
// when
Response response = publisher.publish(topic.getQualifiedName(), "hello world");
assertThat(response).hasStatus(CREATED);
// then
wait.awaitAtMost(Duration.TEN_SECONDS).until(() -> oAuthTestServer.getResourceAccessCount("client1") == 1);
// and when
oAuthTestServer.revokeAllTokens();
response = publisher.publish(topic.getQualifiedName(), "hello again");
assertThat(response).hasStatus(CREATED);
// then
wait.awaitAtMost(Duration.TEN_SECONDS).until(() -> oAuthTestServer.getResourceAccessCount("client1") == 2);
}
}
| 2,423 |
839 | <filename>rt/frontend/jaxws/src/main/java/org/apache/cxf/jaxws/support/JaxWsImplementorInfo.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.jaxws.support;
import java.lang.annotation.Annotation;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.List;
import java.util.ResourceBundle;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.jws.WebService;
import javax.xml.namespace.QName;
import javax.xml.ws.BindingType;
import javax.xml.ws.Provider;
import javax.xml.ws.Service;
import javax.xml.ws.ServiceMode;
import javax.xml.ws.WebServiceException;
import javax.xml.ws.WebServiceProvider;
import javax.xml.ws.soap.SOAPBinding;
import org.apache.cxf.common.classloader.ClassLoaderUtils;
import org.apache.cxf.common.logging.LogUtils;
import org.apache.cxf.common.util.PackageUtils;
import org.apache.cxf.common.util.StringUtils;
import org.apache.cxf.jaxb.JAXBEncoderDecoder;
public class JaxWsImplementorInfo {
private static final Logger LOG = LogUtils.getL7dLogger(JaxWsImplementorInfo.class);
private static final ResourceBundle BUNDLE = LOG.getResourceBundle();
private Class<?> implementorClass;
private Class<?> seiClass;
private ParameterizedType seiType;
private List<WebService> wsAnnotations = new ArrayList<>(2);
private WebServiceProvider wsProviderAnnotation;
public JaxWsImplementorInfo(Class<?> ic) {
implementorClass = ic;
initialize();
}
public Class<?> getSEIClass() {
return seiClass;
}
public ParameterizedType getSEIType() {
return seiType;
}
public Class<?> getImplementorClass() {
return implementorClass;
}
public Class<?> getEndpointClass() {
Class<?> endpointInterface = getSEIClass();
if (null == endpointInterface) {
endpointInterface = getImplementorClass();
}
return endpointInterface;
}
public String getWsdlLocation() {
for (WebService service : wsAnnotations) {
if (!StringUtils.isEmpty(service.wsdlLocation())) {
return service.wsdlLocation();
}
}
if (null != wsProviderAnnotation
&& !StringUtils.isEmpty(wsProviderAnnotation.wsdlLocation())) {
return wsProviderAnnotation.wsdlLocation();
}
return null;
}
/**
* See use of targetNamespace in {@link WebService}.
*
* @return the qualified name of the service.
*/
public QName getServiceName() {
String serviceName = null;
String namespace = null;
// serviceName cannot be specified on SEI so check impl class only
if (!wsAnnotations.isEmpty()) {
int offset = 1;
if (seiClass == null) {
offset = 0;
}
//traverse up the parent impl classes for this info as well, but
//not the last one which would be the sei annotation
for (int x = 0; x < wsAnnotations.size() - offset; x++) {
if (StringUtils.isEmpty(serviceName)) {
serviceName = wsAnnotations.get(x).serviceName();
}
if (StringUtils.isEmpty(namespace)) {
namespace = wsAnnotations.get(x).targetNamespace();
}
}
}
if ((serviceName == null || namespace == null)
&& wsProviderAnnotation != null) {
serviceName = wsProviderAnnotation.serviceName();
namespace = wsProviderAnnotation.targetNamespace();
}
if (StringUtils.isEmpty(serviceName)) {
serviceName = implementorClass.getSimpleName() + "Service";
}
if (StringUtils.isEmpty(namespace)) {
namespace = getDefaultNamespace(implementorClass);
}
return new QName(namespace, serviceName);
}
/**
* See use of targetNamespace in {@link WebService}.
*
* @return the qualified name of the endpoint.
*/
public QName getEndpointName() {
String portName = null;
String namespace = null;
String name = null;
// portName cannot be specified on SEI so check impl class only
if (!wsAnnotations.isEmpty()) {
int offset = 1;
if (seiClass == null) {
offset = 0;
}
//traverse up the parent impl classes for this info as well, but
//not the last one which would be the sei annotation
for (int x = 0; x < wsAnnotations.size() - offset; x++) {
if (StringUtils.isEmpty(portName)) {
portName = wsAnnotations.get(x).portName();
}
if (StringUtils.isEmpty(namespace)) {
namespace = wsAnnotations.get(x).targetNamespace();
}
if (StringUtils.isEmpty(name)) {
name = wsAnnotations.get(x).name();
}
}
}
if ((portName == null || namespace == null)
&& wsProviderAnnotation != null) {
portName = wsProviderAnnotation.portName();
namespace = wsProviderAnnotation.targetNamespace();
}
if (StringUtils.isEmpty(portName)
&& !StringUtils.isEmpty(name)) {
portName = name + "Port";
}
if (StringUtils.isEmpty(portName)) {
portName = implementorClass.getSimpleName() + "Port";
}
if (StringUtils.isEmpty(namespace)) {
namespace = getDefaultNamespace(implementorClass);
}
return new QName(namespace, portName);
}
public QName getInterfaceName() {
String name = null;
String namespace = null;
if (seiClass != null) {
WebService service = seiClass.getAnnotation(WebService.class);
if (!StringUtils.isEmpty(service.name())) {
name = service.name();
}
if (!StringUtils.isEmpty(service.targetNamespace())) {
namespace = service.targetNamespace();
}
} else {
for (WebService service : wsAnnotations) {
if (!StringUtils.isEmpty(service.name()) && name == null) {
name = service.name();
}
if (!StringUtils.isEmpty(service.targetNamespace()) && namespace == null) {
namespace = service.targetNamespace();
}
}
}
if (name == null) {
if (seiClass != null) {
name = seiClass.getSimpleName();
} else if (implementorClass != null) {
name = implementorClass.getSimpleName();
}
}
if (namespace == null) {
if (seiClass != null) {
namespace = getDefaultNamespace(seiClass);
} else if (implementorClass != null) {
namespace = getDefaultNamespace(implementorClass);
}
}
return new QName(namespace, name);
}
private String getDefaultNamespace(Class<?> clazz) {
String pkg = PackageUtils.getNamespace(PackageUtils.getPackageName(clazz));
return StringUtils.isEmpty(pkg) ? "http://unknown.namespace/" : pkg;
}
private String getWSInterfaceName(Class<?> implClz) {
if (implClz.isInterface()
&& implClz.getAnnotation(WebService.class) != null) {
return implClz.getName();
}
Class<?>[] clzs = implClz.getInterfaces();
for (Class<?> clz : clzs) {
if (null != clz.getAnnotation(WebService.class)) {
return clz.getName();
}
}
return null;
}
private String getImplementorClassName() {
for (WebService service : wsAnnotations) {
if (!StringUtils.isEmpty(service.endpointInterface())) {
return service.endpointInterface();
}
}
return null;
}
protected static boolean ifAnnotationLoadedByOtherClassLoader(Class<?> cls,
Class<? extends Annotation> annotationClass) {
for (Annotation an : cls.getAnnotations()) {
if (an.annotationType() != null
&& annotationClass.getName().equals(an.annotationType().getName())) {
return true;
}
}
return false;
}
private void initialize() {
Class<?> cls = implementorClass;
while (cls != null) {
WebService annotation = cls.getAnnotation(WebService.class);
if (annotation != null) {
wsAnnotations.add(annotation);
if (cls.isInterface()) {
cls = null;
}
} else {
// check if there are annotations has the same name with WebServices
if (ifAnnotationLoadedByOtherClassLoader(cls, WebService.class)) {
LOG.log(Level.WARNING,
"WEBSERVICE_ANNOTATIONS_IS_LOADED_BY_OTHER_CLASSLOADER",
WebService.class.getName());
}
}
if (cls != null) {
cls = cls.getSuperclass();
}
}
String sei = getImplementorClassName();
boolean seiFromWsAnnotation = true;
if (StringUtils.isEmpty(sei)) {
seiFromWsAnnotation = false;
sei = getWSInterfaceName(implementorClass);
}
if (!StringUtils.isEmpty(sei)) {
try {
seiClass = ClassLoaderUtils.loadClass(sei, implementorClass);
} catch (ClassNotFoundException ex) {
throw new WebServiceException(BUNDLE.getString("SEI_LOAD_FAILURE_MSG"), ex);
}
WebService seiAnnotation = seiClass.getAnnotation(WebService.class);
if (null == seiAnnotation) {
throw new WebServiceException(BUNDLE.getString("SEI_WITHOUT_WEBSERVICE_ANNOTATION_EXC"));
}
if (seiFromWsAnnotation
&& (!StringUtils.isEmpty(seiAnnotation.portName())
|| !StringUtils.isEmpty(seiAnnotation.serviceName())
|| !StringUtils.isEmpty(seiAnnotation.endpointInterface()))) {
String expString = BUNDLE.getString("ILLEGAL_ATTRIBUTE_IN_SEI_ANNOTATION_EXC");
throw new WebServiceException(expString);
}
wsAnnotations.add(seiAnnotation);
for (int x = implementorClass.getInterfaces().length - 1; x >= 0; x--) {
if (seiClass.equals(implementorClass.getInterfaces()[x])) {
Type type = implementorClass.getGenericInterfaces()[x];
if (type instanceof ParameterizedType) {
seiType = (ParameterizedType)type;
}
}
}
}
wsProviderAnnotation = getWebServiceProviderAnnotation(implementorClass);
}
private static WebServiceProvider getWebServiceProviderAnnotation(Class<?> cls) {
if (cls == null) {
return null;
}
WebServiceProvider ann = cls.getAnnotation(WebServiceProvider.class);
if (null != ann) {
return ann;
}
if (ifAnnotationLoadedByOtherClassLoader(cls, WebServiceProvider.class)) {
LOG.log(Level.WARNING,
"WEBSERVICE_ANNOTATIONS_IS_LOADED_BY_OTHER_CLASSLOADER",
WebServiceProvider.class.getName());
}
for (Class<?> inf : cls.getInterfaces()) {
if (null != inf.getAnnotation(WebServiceProvider.class)) {
return inf.getAnnotation(WebServiceProvider.class);
}
if (ifAnnotationLoadedByOtherClassLoader(cls, WebServiceProvider.class)) {
LOG.log(Level.WARNING,
"WEBSERVICE_ANNOTATIONS_IS_LOADED_BY_OTHER_CLASSLOADER",
WebServiceProvider.class.getName());
}
}
return getWebServiceProviderAnnotation(cls.getSuperclass());
}
public boolean isWebServiceProvider() {
return Provider.class.isAssignableFrom(implementorClass);
}
public WebServiceProvider getWsProvider() {
return wsProviderAnnotation;
}
public Service.Mode getServiceMode() {
ServiceMode m = implementorClass.getAnnotation(ServiceMode.class);
if (m != null && m.value() != null) {
return m.value();
}
return Service.Mode.PAYLOAD;
}
public Class<?> getProviderParameterType() {
return doGetProviderParameterType(implementorClass);
}
private static Class<?> doGetProviderParameterType(Class<?> c) {
while (c != null) {
Type[] intfTypes = c.getGenericInterfaces();
for (Type t : intfTypes) {
Class<?> clazz = JAXBEncoderDecoder.getClassFromType(t);
if (Provider.class.isAssignableFrom(clazz)) {
if (Provider.class == clazz) {
Type[] paramTypes = ((ParameterizedType)t).getActualTypeArguments();
return JAXBEncoderDecoder.getClassFromType(paramTypes[0]);
}
return doGetProviderParameterType(clazz);
}
}
c = c.getSuperclass();
}
return null;
}
public String getBindingType() {
BindingType bType = implementorClass.getAnnotation(BindingType.class);
if (bType != null) {
return bType.value();
}
return SOAPBinding.SOAP11HTTP_BINDING;
}
}
| 6,656 |
335 | <reponame>Safal08/Hacktoberfest-1<gh_stars>100-1000
{
"word": "Omission",
"definitions": [
"Someone or something that has been left out or excluded.",
"The action of excluding or leaving out someone or something.",
"A failure to fulfil a moral or legal obligation."
],
"parts-of-speech": "Noun"
} | 127 |
325 | package com.box.l10n.mojito.smartling;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
@Configuration
@ConfigurationProperties("test.l10n.smartling")
public class SmartlingTestConfig {
public String projectId = null;
public String fileUri = null;
public String getProjectId() {
return projectId;
}
public void setProjectId(String projectId) {
this.projectId = projectId;
}
public String getFileUri() {
return fileUri;
}
public void setFileUri(String fileUri) {
this.fileUri = fileUri;
}
}
| 235 |
4,879 | <gh_stars>1000+
typedef NS_ENUM(NSUInteger, MWMRouterResultCode) {
MWMRouterResultCodeNoError = 0,
MWMRouterResultCodeCancelled = 1,
MWMRouterResultCodeNoCurrentPosition = 2,
MWMRouterResultCodeInconsistentMWMandRoute = 3,
MWMRouterResultCodeRouteFileNotExist = 4,
MWMRouterResultCodeStartPointNotFound = 5,
MWMRouterResultCodeEndPointNotFound = 6,
MWMRouterResultCodePointsInDifferentMWM = 7,
MWMRouterResultCodeRouteNotFound = 8,
MWMRouterResultCodeNeedMoreMaps = 9,
MWMRouterResultCodeInternalError = 10,
MWMRouterResultCodeFileTooOld = 11,
MWMRouterResultCodeIntermediatePointNotFound = 12,
MWMRouterResultCodeTransitRouteNotFoundNoNetwork = 13,
MWMRouterResultCodeTransitRouteNotFoundTooLongPedestrian = 14,
MWMRouterResultCodeRouteNotFoundRedressRouteError = 15,
MWMRouterResultCodeHasWarnings = 16
} NS_SWIFT_NAME(RouterResultCode);
| 287 |
2,167 | <gh_stars>1000+
from flask import Flask
from oso import NotFoundError
from .models import users_db, Repository
from .oso import oso
app = Flask(__name__)
def serialize(r):
return str(r)
# implemented here to not polute code samples in model.md
class User:
@staticmethod
def get_current_user():
return users_db["larry"]
# docs: begin-show-route
@app.route("/repo/<name>")
def repo_show(name):
repo = Repository.get_by_name(name)
try:
# docs: begin-authorize
oso.authorize(User.get_current_user(), "read", repo)
# docs: end-authorize
return f"<h1>A Repo</h1><p>Welcome to repo {repo.name}</p>", 200
except NotFoundError:
return f"<h1>Whoops!</h1><p>Repo named {name} was not found</p>", 404
# docs: end-show-route
| 332 |
1,481 | package apoc.redis;
import apoc.util.MissingDependencyException;
import apoc.util.Util;
import java.lang.reflect.Constructor;
import java.nio.charset.Charset;
import java.time.Duration;
import java.util.Collections;
import java.util.Map;
import static io.lettuce.core.RedisURI.DEFAULT_TIMEOUT;
public class RedisConfig {
public enum Codec {
STRING(StringRedisConnection.class), BYTE_ARRAY(ByteArrayRedisConnection.class);
private Class<? extends RedisConnection> redisConnectionClass;
Codec(Class<? extends RedisConnection> redisConnectionClass) {
this.redisConnectionClass = redisConnectionClass;
}
public RedisConnection getRedisConnection(String uri, Map<String, Object> config) {
try {
RedisConfig redisConfig = new RedisConfig(config);
Constructor<?> constructor = redisConnectionClass.getConstructor(String.class, RedisConfig.class);
return (RedisConnection) constructor.newInstance(uri, redisConfig);
} catch (NoClassDefFoundError e) {
throw new MissingDependencyException("Cannot find the Redis client jar. \n" +
"Please put the lettuce-core-6.1.1.RELEASE.jar into plugin folder. \n" +
"See the documentation: https://neo4j.com/labs/apoc/4.1/database-integration/redis/");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
private final Charset charset;
private final Duration timeout;
private final boolean autoReconnect;
private final boolean right;
private final Charset scriptCharset;
private final Codec codec;
public RedisConfig(Map<String, Object> config) {
if (config == null) config = Collections.emptyMap();
this.charset = Charset.forName((String) config.getOrDefault("charset", "UTF-8"));
this.timeout = Duration.ofSeconds((long) config.getOrDefault("timeout", DEFAULT_TIMEOUT));
this.scriptCharset = Charset.forName((String) config.getOrDefault("scriptCharset", "UTF-8"));
this.autoReconnect = Util.toBoolean(config.getOrDefault("autoReconnect", true));
this.right = Util.toBoolean(config.getOrDefault("right", true));
this.codec = Codec.valueOf((config.getOrDefault("codec", Codec.STRING.name()).toString().toUpperCase()));
}
public boolean isRight() {
return right;
}
public boolean isAutoReconnect() {
return autoReconnect;
}
public Charset getScriptCharset() {
return scriptCharset;
}
public Charset getCharset() {
return charset;
}
public Duration getTimeout() {
return timeout;
}
public Codec getCodec() {
return codec;
}
}
| 1,129 |
1,825 | #import <Foundation/Foundation.h>
typedef NSString *AVAudioSessionLocation;
typedef NSString *NSNotificationName;
const AVAudioSessionLocation AVAudioSessionPolarPatternSubcardioid = @"Subcardioid";
const AVAudioSessionLocation AVAudioSessionOrientationTop = @"Top";
const AVAudioSessionLocation AVAudioSessionOrientationBottom = @"Bottom";
const AVAudioSessionLocation AVAudioSessionOrientationFront = @"Front";
const AVAudioSessionLocation AVAudioSessionOrientationBack = @"Back";
const AVAudioSessionLocation AVAudioSessionPolarPatternOmnidirectional = @"Omnidirectional";
const AVAudioSessionLocation AVAudioSessionPolarPatternCardioid = @"Cardioid";
const NSNotificationName AVAudioSessionRouteChangeNotification = @"AVAudioSessionRouteChangeNotification";
typedef NSString *AVCaptureSessionPreset;
const AVCaptureSessionPreset AVCaptureSessionPreset1280x720 = @"AVCaptureSessionPreset1280x720";
| 280 |
382 | <gh_stars>100-1000
package com.fangxu.dota2helper.util;
import android.content.Context;
import android.widget.Toast;
/**
* Created by lenov0 on 2016/4/13.
*/
public class ToastUtil {
public static void showToast(Context context, String message) {
if (context != null) {
Toast.makeText(context.getApplicationContext(), message, Toast.LENGTH_SHORT).show();
}
}
public static void showToast(Context context, int strResId) {
String str = getStringRes(context, strResId);
if (str != null) {
Toast.makeText(context.getApplicationContext(), str, Toast.LENGTH_SHORT).show();
}
}
private static String getStringRes(Context context, int strResId) {
if (context != null) {
return context.getApplicationContext().getResources().getString(strResId);
}
return null;
}
}
| 348 |
4,095 | <reponame>Tengboy/SeeWeather
package com.xiecc.seeWeather.modules.main.adapter;
import android.support.v4.content.ContextCompat;
import android.view.View;
import com.xiecc.seeWeather.R;
import java.util.HashMap;
import java.util.Map;
public class CardCityHelper {
public static final int SUNNY_CODE = 100;
public static final int RAINY_CODE = 300;
public static final int CLOUDY_CODE = 500;
private static final String SHANG_HAI = "上海";
private static final String BEI_JING = "北京";
private static final String SU_ZHOU = "苏州";
private static final String OTHER = "其他";
private static Map<WeatherInfo, Integer> sMap = new HashMap<>();
static {
// 上海
sMap.put(new WeatherInfo(SUNNY_CODE, SHANG_HAI), R.mipmap.city_shanghai_sunny);
sMap.put(new WeatherInfo(RAINY_CODE, SHANG_HAI), R.mipmap.city_shanghai_rainy);
sMap.put(new WeatherInfo(CLOUDY_CODE, SHANG_HAI), R.mipmap.city_shanghai_cloudy);
// 北京
sMap.put(new WeatherInfo(SUNNY_CODE, BEI_JING), R.mipmap.city_beijing_sunny);
sMap.put(new WeatherInfo(RAINY_CODE, BEI_JING), R.mipmap.city_beijing_rainy);
sMap.put(new WeatherInfo(CLOUDY_CODE, BEI_JING), R.mipmap.city_beijing_cloudy);
// 苏州
sMap.put(new WeatherInfo(SUNNY_CODE, SU_ZHOU), R.mipmap.city_suzhou_sunny);
sMap.put(new WeatherInfo(RAINY_CODE, SU_ZHOU), R.mipmap.city_suzhou_rain);
sMap.put(new WeatherInfo(CLOUDY_CODE, SU_ZHOU), R.mipmap.city_suzhou_cloudy);
// 其他
sMap.put(new WeatherInfo(SUNNY_CODE, OTHER), R.mipmap.city_other_sunny);
sMap.put(new WeatherInfo(RAINY_CODE, OTHER), R.mipmap.city_other_rainy);
sMap.put(new WeatherInfo(CLOUDY_CODE, OTHER), R.mipmap.city_other_cloudy);
}
void applyStatus(int code, String city, View view) {
if (code >= 300 && code < 408) {
code = RAINY_CODE;
} else if (code > 100 && code < 300) {
code = CLOUDY_CODE;
} else {
code = SUNNY_CODE;
}
if (!city.matches(String.format("(?:%s|%s|%s)", SU_ZHOU, SHANG_HAI, BEI_JING))) {
city = OTHER;
}
Integer mipRes = sMap.get(new WeatherInfo(code, city));
if (mipRes != null) {
view.setBackground(ContextCompat.getDrawable(view.getContext(), mipRes));
}
}
private static class WeatherInfo {
int weatherCode;
String city;
public WeatherInfo(int weatherCode, String city) {
this.weatherCode = weatherCode;
this.city = city;
}
private String code() {
return String.valueOf(String.format("%s%s", weatherCode, city));
}
@Override
public int hashCode() {
return 31 * 17 + code().hashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof WeatherInfo) {
return this.code().equals(((WeatherInfo) o).code());
}
return super.equals(o);
}
}
}
| 1,462 |
703 | <reponame>Tekh-ops/ezEngine
#pragma once
#include <Core/ResourceManager/ResourceHandle.h>
#include <PhysXPlugin/Components/PxComponent.h>
struct ezMsgPhysicsAddImpulse;
struct ezMsgPhysicsAddForce;
namespace physx
{
class PxArticulationLink;
class PxArticulation;
class PxAggregate;
class PxSphericalJoint;
struct PxFilterData;
} // namespace physx
using ezSurfaceResourceHandle = ezTypedResourceHandle<class ezSurfaceResource>;
//////////////////////////////////////////////////////////////////////////
class EZ_PHYSXPLUGIN_DLL ezPxRopeComponentManager : public ezComponentManager<class ezPxRopeComponent, ezBlockStorageType::Compact>
{
public:
ezPxRopeComponentManager(ezWorld* pWorld);
~ezPxRopeComponentManager();
virtual void Initialize() override;
private:
void Update(const ezWorldModule::UpdateContext& context);
};
//////////////////////////////////////////////////////////////////////////
class EZ_PHYSXPLUGIN_DLL ezPxRopeComponent : public ezPxComponent
{
EZ_DECLARE_COMPONENT_TYPE(ezPxRopeComponent, ezPxComponent, ezPxRopeComponentManager);
//////////////////////////////////////////////////////////////////////////
// ezComponent
public:
virtual void SerializeComponent(ezWorldWriter& stream) const override;
virtual void DeserializeComponent(ezWorldReader& stream) override;
virtual void OnSimulationStarted() override;
virtual void OnActivated() override;
virtual void OnDeactivated() override;
//////////////////////////////////////////////////////////////////////////
// ezPxRopeComponent
public:
ezPxRopeComponent();
~ezPxRopeComponent();
bool GetDisableGravity() const { return m_bDisableGravity; } // [ property ]
void SetDisableGravity(bool b); // [ property ]
void SetSurfaceFile(const char* szFile); // [ property ]
const char* GetSurfaceFile() const; // [ property ]
ezUInt8 m_uiCollisionLayer = 0; // [ property ]
ezUInt16 m_uiPieces = 16; // [ property ]
float m_fThickness = 0.05f; // [ property ]
float m_fSlack = 0.3f; // [ property ]
bool m_bAttachToOrigin = true; // [ property ]
bool m_bAttachToAnchor = true; // [ property ]
ezAngle m_MaxBend = ezAngle::Degree(30); // [ property ]
ezAngle m_MaxTwist = ezAngle::Degree(15); // [ property ]
void SetAnchorReference(const char* szReference); // [ property ]
void SetAnchor(ezGameObjectHandle hActor);
void AddForceAtPos(ezMsgPhysicsAddForce& msg);
void AddImpulseAtPos(ezMsgPhysicsAddImpulse& msg);
private:
void CreateRope();
ezResult CreateSegmentTransforms(ezDynamicArray<ezTransform>& transforms, float& out_fPieceLength) const;
void DestroyPhysicsShapes();
void Update();
void SendPreviewPose();
void CreateFilterData(physx::PxFilterData& filter);
physx::PxMaterial* GetPxMaterial();
physx::PxJoint* CreateJoint(const ezGameObjectHandle& hTarget, const ezTransform& location, physx::PxRigidBody* pLink, const ezTransform& linkOffset);
void UpdatePreview();
ezSurfaceResourceHandle m_hSurface;
ezGameObjectHandle m_hAnchor;
float m_fTotalMass = 1.0f;
float m_fMaxForcePerFrame = 0.0f;
float m_fBendStiffness = 0.0f;
float m_fBendDamping = 50.0f;
float m_fTwistStiffness = 0.0f;
float m_fTwistDamping = 50.0f;
ezUInt32 m_uiShapeID = ezInvalidIndex;
bool m_bSelfCollision = false;
bool m_bDisableGravity = false;
ezVec3 m_vPreviewRefPos = ezVec3::ZeroVector();
physx::PxAggregate* m_pAggregate = nullptr;
physx::PxArticulation* m_pArticulation = nullptr;
physx::PxJoint* m_pJointOrigin = nullptr;
physx::PxJoint* m_pJointAnchor = nullptr;
ezDynamicArray<physx::PxArticulationLink*> m_ArticulationLinks;
ezUInt32 m_uiUserDataIndex = ezInvalidIndex;
private:
const char* DummyGetter() const { return nullptr; }
};
| 1,389 |
2,261 | img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='LoadTextAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomScaling', size=800, scale=(0.75, 2.5)),
dict(
type='RandomCropFlip', crop_ratio=0.5, iter_num=1, min_area_ratio=0.2),
dict(
type='RandomCropPolyInstances',
instance_key='gt_masks',
crop_ratio=0.8,
min_side_ratio=0.3),
dict(
type='RandomRotatePolyInstances',
rotate_ratio=0.5,
max_angle=60,
pad_with_fixed_color=False),
dict(type='SquareResizePad', target_size=800, pad_ratio=0.6),
dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'),
dict(type='DRRGTargets'),
dict(type='Pad', size_divisor=32),
dict(
type='CustomFormatBundle',
keys=[
'gt_text_mask', 'gt_center_region_mask', 'gt_mask',
'gt_top_height_map', 'gt_bot_height_map', 'gt_sin_map',
'gt_cos_map', 'gt_comp_attribs'
],
visualize=dict(flag=False, boundary_key='gt_text_mask')),
dict(
type='Collect',
keys=[
'img', 'gt_text_mask', 'gt_center_region_mask', 'gt_mask',
'gt_top_height_map', 'gt_bot_height_map', 'gt_sin_map',
'gt_cos_map', 'gt_comp_attribs'
])
]
test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 640),
flip=False,
transforms=[
dict(type='Resize', img_scale=(1024, 640), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
| 1,043 |
640 | <gh_stars>100-1000
/* DCT/CCC - display Drive Code Table data */
/*
Converted to z88dk by <NAME> from a MISOSYS C example
Original code was commented out rather than removed
To build:
zcc +trs80 -create-app -subtype=disk -O3 -DAMALLOC dct.c
zcc +trs80 -create-app -subtype=disk -DAMALLOC -compiler=sdcc --max-allocs-per-node200000 dct.c
** LDOS is required, change the GTDCT address if you have a Model 4 **
If the file output redirection is not needed,
add "-lndos" to reduce the program size
*/
#include <stdio.h>
//#include <z80regs.h>
//#option INLIB
#define GTDCT 0x478f // Model I, III
// #define GTDCT 81 // Model 4
#define MAXDRIVES 8
#define JP 0xC3
#define RET 0xC9
#define enabled(dct) (dct->dct_instr == JP)
typedef unsigned char uchar;
struct dct
{ uchar dct_instr; /* JP or RET instr */
char *dct_driver_addr; /* driver address */
/* DCT byte 4 flags: */
unsigned dct_inhibit: 1; /* 7: inhibit @CKDRV */
unsigned dct_ddc : 1; /* 6: 1=double den controller */
unsigned dct_2sid : 1; /* 5: floppy:1=2-sided drive */
#define dct_dbl dct_2sid /* 5: hard:double cyl count */
unsigned dct_al : 1; /* 4: 1=alien disk controller */
unsigned dct_faddr : 4; /* 3-0: floppy drive addr (1,2,4,8) */
/* DCT byte 3 flags: */
unsigned dct_wp : 1; /* 7: 1=software write protect */
unsigned dct_dden : 1; /* 6: 1=DDEN / 0=SDEN oper */
unsigned dct_8in : 1; /* 5: 1=8" / 0=5.25" drive */
unsigned dct_side : 1; /* 4: side select 1/0 */
unsigned dct_hard : 1; /* 3: 1=hard / 0=floppy */
unsigned dct_dly : 1; /* 2: 1=0.5 / 0=1.0 sec delay */
#define dct_fixd dct_dly /* 2: hard:1=fixed/0=removable */
unsigned dct_step : 2; /* 1-0: FDC step rate (0,1,2,3) */
#define dct_haddr dct_step /* 1-0: hard drive address */
uchar dct_curr; /* current cylinder pos */
uchar dct_high; /* highest cylinder */
/* DCT byte 8 allocation data: */
unsigned dct_gpc : 3; /* 7-5: granules per cyl (0-7) */
unsigned dct_spg : 5; /* 4-0: sectors per gran (0-31) */
/* DCT byte 7 allocation data: */
unsigned dct_head : 3; /* 7-5: hard: # of heads (0-7) */
unsigned dct_spc : 5; /* 4-0: sectors per cyl (0-31) */
uchar dct_dir; /* directory cylinder */
} *dct;
int i, dblbit;
//printf(", %s ms step", step[dct->dct_8in][dct->dct_step]);
//union REGS regs;
//char *step[2][4] = { "6", "12", "20", "30", "3", "6", "10", "15" };
//char *delay[2][2] = { "1.0", "0.5", "1.0", "0.0" };
char *step[2][4] = { {"6", "12", "20", "30"}, {"3", "6", "10", "15"} };
char *delay[2][2] = { {"1.0", "0.5"}, {"1.0", "0.0"} };
int get_dct(int i)
{
// SCCZ80 accepts both #asm and __asm, we use the latter to be compatible to SDCC
__asm
pop hl
pop bc ; regs.C = i;
push bc
push hl
call GTDCT ; call(GTDCT, ®s);
push iy
pop hl ; regs.IY
__endasm;
}
main()
{ for (i = 0; i < MAXDRIVES; ++i)
{ //regs.C = i;
//call(GTDCT, ®s);
//dct = (struct dct *) regs.IY;
dct = (struct dct *)get_dct(i); // <-- replaces the original "call"
printf("Drive %d ", i);
if (enabled(dct))
{ printf("enabled:\n");
printf("\t%srite-protected\n", dct->dct_wp ? "W" : "Not w");
printf("\t%s #", dct->dct_hard ? "Rigid" : "Floppy");
printf("%d", dct->dct_hard ? dct->dct_haddr : dct->dct_faddr);
printf(", %s\"", dct->dct_8in ? "8" : "5");
if (dct->dct_hard)
printf(", %d head(s)", dct->dct_head + 1);
else
{ printf(", %sD", dct->dct_dden ? "D" : "S");
printf(", %sS", dct->dct_2sid ? "D" : "S");
printf(", %s sec delay", delay[dct->dct_8in][dct->dct_dly]);
printf(", %s ms step", step[dct->dct_8in][dct->dct_step]);
}
printf("\n");
printf("\t@CKDRV %sinhibited\n", dct->dct_inhibit ? "" : "not ");
dblbit = dct->dct_hard ? dct->dct_dbl : 0;
printf("\tCylinders %u, directory cylinder %u\n",
(dct->dct_high + 1) << dblbit, dct->dct_dir << dblbit);
if (! dct->dct_hard)
dblbit = dct->dct_2sid;
printf("\tSectors per cylinder %d\n",
(dct->dct_spc + 1) << dblbit);
printf("\tGranules per cylinder %d\n",
(dct->dct_gpc + 1) << dblbit);
printf("\tSectors per granule %d\n", dct->dct_spg + 1);
}
else
printf("disabled\n");
printf("\n");
}
}
| 2,033 |
4,816 | <filename>src/llvmir2hll/support/valid_state.cpp
/**
* @file src/llvmir2hll/support/valid_state.cpp
* @brief Implementation of ValidState.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
#include "retdec/llvmir2hll/support/valid_state.h"
namespace retdec {
namespace llvmir2hll {
/**
* @brief Constructs a new valid state.
*/
ValidState::ValidState(): validState(true) {}
/**
* @brief Returns @c true if the object is in a valid state, @c false otherwise.
*/
bool ValidState::isInValidState() const {
return validState;
}
/**
* @brief Sets the object's state to invalid.
*/
void ValidState::invalidateState() {
validState = false;
}
/**
* @brief Sets the object's state to valid.
*/
void ValidState::validateState() {
validState = true;
}
} // namespace llvmir2hll
} // namespace retdec
| 282 |
2,144 | <filename>pinot-segment-local/src/main/java/org/apache/pinot/segment/local/utils/nativefst/automaton/Automaton.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.segment.local.utils.nativefst.automaton;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
/**
* Finite-state automaton with regular expression operations.
* <p>
* Class invariants:
* <ul>
* <li> An automaton is either represented explicitly (with {@link State} and {@link Transition} objects)
* or with a singleton string ({@link #expandSingleton()}) in case
* the automaton is known to accept exactly one string.
* (Implicitly, all states and transitions of an automaton are reachable from its initial state.)
* <li> Automata are always reduced (see {@link #reduce()})
* and have no transitions to dead states (see {@link #removeDeadTransitions()}).
* <li> Automata provided as input to operations are generally assumed to be disjoint.
* </ul>
* <p>
*/
public class Automaton implements Serializable, Cloneable {
/**
* Minimize using Huffman's O(n<sup>2</sup>) algorithm.
* This is the standard text-book algorithm.
*/
public static final int MINIMIZE_HUFFMAN = 0;
/**
* Minimize using Brzozowski's O(2<sup>n</sup>) algorithm.
* This algorithm uses the reverse-determinize-reverse-determinize trick, which has a bad
* worst-case behavior but often works very well in practice
* (even better than Hopcroft's!).
*/
public static final int MINIMIZE_BRZOZOWSKI = 1;
/**
* Minimize using Hopcroft's O(n log n) algorithm.
*/
public static final int MINIMIZE_HOPCROFT = 2;
/**
* Minimize using Valmari's O(n + m log m) algorithm.
*/
public static final int MINIMIZE_VALMARI = 3;
/** Minimize always flag. */
public static boolean _minimizeAlways = false;
/** Selects whether operations may modify the input automata (default: <code>false</code>). */
public static boolean _allowMutation = false;
/** Selects minimization algorithm (default: <code>MINIMIZE_HOPCROFT</code>). */
public static int _minimization = MINIMIZE_HOPCROFT;
/** Initial state of this automaton. */
State _initial;
/** If true, then this automaton is definitely deterministic
(i.e., there are no choices for any run, but a run may crash). */
boolean _deterministic;
/** Hash code. Recomputed by {@link #minimize()}. */
int _hashCode;
/** Singleton string. Null if not applicable. */
String _singleton;
/**
* Constructs a new automaton that accepts the empty language.
* Using this constructor, automata can be constructed manually from
* {@link State} and {@link Transition} objects.
* @see #setInitialState(State)
* @see State
* @see Transition
*/
public Automaton() {
_initial = new State();
_deterministic = true;
_singleton = null;
}
/**
* Sets or resets allow mutate flag.
* If this flag is set, then all automata operations may modify automata given as input;
* otherwise, operations will always leave input automata languages unmodified.
* By default, the flag is not set.
* @param flag if true, the flag is set
* @return previous value of the flag
*/
static public boolean setAllowMutate(boolean flag) {
boolean b = _allowMutation;
_allowMutation = flag;
return b;
}
/**
* Assigns consecutive numbers to the given states.
*/
static void setStateNumbers(Set<State> states) {
if (states.size() == Integer.MAX_VALUE) {
throw new IllegalArgumentException("number of states exceeded Integer.MAX_VALUE");
}
int number = 0;
for (State s : states) {
s._number = number++;
}
}
/**
* Returns a sorted array of transitions for each state (and sets state numbers).
*/
static Transition[][] getSortedTransitions(Set<State> states) {
setStateNumbers(states);
Transition[][] transitions = new Transition[states.size()][];
for (State s : states) {
transitions[s._number] = s.getSortedTransitionArray(false);
}
return transitions;
}
/**
* See {@link MinimizationOperations#minimize(Automaton)}.
* Returns the automaton being given as argument.
*/
public static Automaton minimize(Automaton a) {
a.minimize();
return a;
}
void checkMinimizeAlways() {
if (_minimizeAlways) {
minimize();
}
}
boolean isSingleton() {
return _singleton != null;
}
/**
* Gets initial state.
* @return state
*/
public State getInitialState() {
expandSingleton();
return _initial;
}
/**
* Sets initial state.
* @param s state
*/
public void setInitialState(State s) {
_initial = s;
_singleton = null;
}
/**
* Returns the set of states that are reachable from the initial state.
* @return set of {@link State} objects
*/
public Set<State> getStates() {
expandSingleton();
Set<State> visited;
visited = new HashSet<>();
LinkedList<State> worklist = new LinkedList<State>();
worklist.add(_initial);
visited.add(_initial);
while (!worklist.isEmpty()) {
State s = worklist.removeFirst();
Collection<Transition> tr;
tr = s._transitionSet;
for (Transition t : tr) {
if (!visited.contains(t._to)) {
visited.add(t._to);
worklist.add(t._to);
}
}
}
return visited;
}
/**
* Returns the set of reachable accept states.
* @return set of {@link State} objects
*/
public Set<State> getAcceptStates() {
expandSingleton();
HashSet<State> accepts = new HashSet<>();
Set<State> visited;
visited = new HashSet<>();
LinkedList<State> worklist = new LinkedList<State>();
worklist.add(_initial);
visited.add(_initial);
while (!worklist.isEmpty()) {
State s = worklist.removeFirst();
if (s._accept) {
accepts.add(s);
}
Collection<Transition> tr;
tr = s._transitionSet;
for (Transition t : tr) {
if (!visited.contains(t._to)) {
visited.add(t._to);
worklist.add(t._to);
}
}
}
return accepts;
}
/**
* Adds transitions to explicit crash state to ensure that transition function is total.
*/
void totalize() {
State s = new State();
s._transitionSet.add(new Transition(Character.MIN_VALUE, Character.MAX_VALUE, s));
for (State p : getStates()) {
int maxi = Character.MIN_VALUE;
for (Transition t : p.getSortedTransitions(false)) {
if (t._min > maxi) {
p._transitionSet.add(new Transition((char) maxi, (char) (t._min - 1), s));
}
if (t._max + 1 > maxi) {
maxi = t._max + 1;
}
}
if (maxi <= Character.MAX_VALUE) {
p._transitionSet.add(new Transition((char) maxi, Character.MAX_VALUE, s));
}
}
}
/**
* Reduces this automaton.
* An automaton is "reduced" by combining overlapping and adjacent edge intervals with same destination.
*/
public void reduce() {
if (isSingleton()) {
return;
}
Set<State> states = getStates();
setStateNumbers(states);
for (State s : states) {
List<Transition> st = s.getSortedTransitions(true);
s.resetTransitions();
State p = null;
int min = -1;
int max = -1;
for (Transition t : st) {
if (p == t._to) {
if (t._min <= max + 1) {
if (t._max > max) {
max = t._max;
}
} else {
if (p != null) {
s._transitionSet.add(new Transition((char) min, (char) max, p));
}
min = t._min;
max = t._max;
}
} else {
if (p != null) {
s._transitionSet.add(new Transition((char) min, (char) max, p));
}
p = t._to;
min = t._min;
max = t._max;
}
}
if (p != null) {
s._transitionSet.add(new Transition((char) min, (char) max, p));
}
}
clearHashCode();
}
/**
* Returns sorted array of all interval start points.
*/
char[] getStartPoints() {
//TODO: move to bitsets
Set<Character> pointset = new HashSet<Character>();
pointset.add(Character.MIN_VALUE);
for (State s : getStates()) {
for (Transition t : s._transitionSet) {
pointset.add(t._min);
if (t._max < Character.MAX_VALUE) {
pointset.add((char) (t._max + 1));
}
}
}
char[] points = new char[pointset.size()];
int n = 0;
for (Character m : pointset) {
points[n++] = m;
}
// Remove once move to bitsets
Arrays.sort(points);
return points;
}
private Set<State> getLiveStates(Set<State> states) {
HashMap<State, Set<State>> map = new HashMap<State, Set<State>>();
for (State s : states) {
map.put(s, new HashSet<>());
}
for (State s : states) {
for (Transition t : s._transitionSet) {
map.get(t._to).add(s);
}
}
Set<State> live = new HashSet<>(getAcceptStates());
LinkedList<State> worklist = new LinkedList<State>(live);
while (!worklist.isEmpty()) {
State s = worklist.removeFirst();
for (State p : map.get(s)) {
if (!live.contains(p)) {
live.add(p);
worklist.add(p);
}
}
}
return live;
}
/**
* Removes transitions to dead states and calls {@link #reduce()} and {@link #clearHashCode()}.
* (A state is "dead" if no accept state is reachable from it.)
*/
public void removeDeadTransitions() {
clearHashCode();
if (isSingleton()) {
return;
}
Set<State> states = getStates();
Set<State> live = getLiveStates(states);
for (State s : states) {
Set<Transition> st = s._transitionSet;
s.resetTransitions();
for (Transition t : st) {
if (live.contains(t._to)) {
s._transitionSet.add(t);
}
}
}
reduce();
}
/**
* Expands singleton representation to normal representation.
* Does nothing if not in singleton representation.
*/
public void expandSingleton() {
if (isSingleton()) {
State p = new State();
_initial = p;
for (int i = 0; i < _singleton.length(); i++) {
State q = new State();
q._number = i;
p._transitionSet.add(new Transition(_singleton.charAt(i), q));
p = q;
}
p._accept = true;
_deterministic = true;
_singleton = null;
}
}
/**
* Returns the number of states in this automaton.
*/
public int getNumberOfStates() {
if (isSingleton()) {
return _singleton.length() + 1;
}
return getStates().size();
}
/**
* Returns the number of transitions in this automaton. This number is counted
* as the total number of edges, where one edge may be a character interval.
*/
public int getNumberOfTransitions() {
if (isSingleton()) {
return _singleton.length();
}
int c = 0;
for (State s : getStates()) {
c += s._transitionSet.size();
}
return c;
}
/**
* Returns true if the language of this automaton is equal to the language
* of the given automaton. Implemented using <code>hashCode</code> and
* <code>subsetOf</code>.
*/
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof Automaton)) {
return false;
}
Automaton a = (Automaton) obj;
if (isSingleton() && a.isSingleton()) {
return _singleton.equals(a._singleton);
}
return hashCode() == a.hashCode() && subsetOf(a) && a.subsetOf(this);
}
/**
* Returns hash code for this automaton. The hash code is based on the
* number of states and transitions in the minimized automaton.
* Invoking this method may involve minimizing the automaton.
*/
@Override
public int hashCode() {
if (_hashCode == 0) {
minimize();
}
return _hashCode;
}
/**
* Recomputes the hash code.
* The automaton must be minimal when this operation is performed.
*/
void recomputeHashCode() {
_hashCode = getNumberOfStates() * 3 + getNumberOfTransitions() * 2;
if (_hashCode == 0) {
_hashCode = 1;
}
}
/**
* Must be invoked when the stored hash code may no longer be valid.
*/
void clearHashCode() {
_hashCode = 0;
}
/**
* Returns a string representation of this automaton.
*/
@Override
public String toString() {
StringBuilder b = new StringBuilder();
if (isSingleton()) {
b.append("singleton: ");
for (char c : _singleton.toCharArray()) {
Transition.appendCharString(c, b);
}
b.append("\n");
} else {
Set<State> states = getStates();
setStateNumbers(states);
b.append("initial state: ").append(_initial._number).append("\n");
for (State s : states) {
b.append(s.toString());
}
}
return b.toString();
}
/**
* Returns a clone of this automaton, expands if singleton.
*/
Automaton cloneExpanded() {
Automaton a = clone();
a.expandSingleton();
return a;
}
/**
* Returns a clone of this automaton unless <code>allow_mutation</code> is set, expands if singleton.
*/
Automaton cloneExpandedIfRequired() {
if (_allowMutation) {
expandSingleton();
return this;
} else {
return cloneExpanded();
}
}
/**
* Returns a clone of this automaton.
*/
@Override
public Automaton clone() {
try {
Automaton a = (Automaton) super.clone();
if (!isSingleton()) {
HashMap<State, State> m = new HashMap<State, State>();
Set<State> states = getStates();
for (State s : states) {
m.put(s, new State());
}
for (State s : states) {
State p = m.get(s);
p._accept = s._accept;
if (s == _initial) {
a._initial = p;
}
for (Transition t : s._transitionSet) {
p._transitionSet.add(new Transition(t._min, t._max, m.get(t._to)));
}
}
}
return a;
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
/**
* Returns a clone of this automaton, or this automaton itself if <code>allow_mutation</code> flag is set.
*/
Automaton cloneIfRequired() {
if (_allowMutation) {
return this;
} else {
return clone();
}
}
/**
* See {@link BasicOperations#optional(Automaton)}.
*/
public Automaton optional() {
return BasicOperations.optional(this);
}
/**
* See {@link BasicOperations#repeat(Automaton)}.
*/
public Automaton repeat() {
return BasicOperations.repeat(this);
}
/**
* See {@link BasicOperations#repeat(Automaton, int)}.
*/
public Automaton repeat(int min) {
return BasicOperations.repeat(this, min);
}
/**
* See {@link BasicOperations#repeat(Automaton, int, int)}.
*/
public Automaton repeat(int min, int max) {
return BasicOperations.repeat(this, min, max);
}
/**
* See {@link BasicOperations#complement(Automaton)}.
*/
public Automaton complement() {
return BasicOperations.complement(this);
}
/**
* See {@link BasicOperations#minus(Automaton, Automaton)}.
*/
public Automaton minus(Automaton a) {
return BasicOperations.minus(this, a);
}
/**
* See {@link BasicOperations#intersection(Automaton, Automaton)}.
*/
public Automaton intersection(Automaton a) {
return BasicOperations.intersection(this, a);
}
/**
* See {@link BasicOperations#subsetOf(Automaton, Automaton)}.
*/
public boolean subsetOf(Automaton a) {
return BasicOperations.subsetOf(this, a);
}
/**
* See {@link BasicOperations#determinize(Automaton)}.
*/
public void determinize() {
BasicOperations.determinize(this);
}
/**
* See {@link BasicOperations#addEpsilons(Automaton, Collection)}.
*/
public void addEpsilons(Collection<StatePair> pairs) {
BasicOperations.addEpsilons(this, pairs);
}
/**
* See {@link BasicOperations#isEmptyString(Automaton)}.
*/
public boolean isEmptyString() {
return BasicOperations.isEmptyString(this);
}
/**
* See {@link BasicOperations#isEmpty(Automaton)}.
*/
public boolean isEmpty() {
return BasicOperations.isEmpty(this);
}
/**
* See {@link BasicOperations#run(Automaton, String)}.
*/
public boolean run(String s) {
return BasicOperations.run(this, s);
}
/**
* See {@link MinimizationOperations#minimize(Automaton)}.
*/
public void minimize() {
MinimizationOperations.minimize(this);
}
}
| 6,736 |
1,375 | <gh_stars>1000+
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Command-line tool to generate boilerplate DV testbench.
The generated objects are extended from dv_lib / cip_lib.
"""
import argparse
import logging as log
import re
import sys
import gen_agent
import gen_env
VENDOR_DEFAULT = "lowrisc"
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"name",
metavar="[ip/block name]",
help="""Name of the ip/block for which the UVM TB is being generated.
This should just name the block, not the path to it.""")
parser.add_argument(
"-a",
"--gen-agent",
action='store_true',
help="Generate UVM agent code extended from DV library")
parser.add_argument(
"-s",
"--has-separate-host-device-driver",
action='store_true',
help=
"""IP / block agent creates a separate driver for host and device modes.
(Ignored if -a switch is not passed.)""")
parser.add_argument("-e",
"--gen-env",
action='store_true',
help="Generate testbench UVM env code")
parser.add_argument(
"-c",
"--is-cip",
action='store_true',
help=
"""Is comportable IP - this will result in code being extended from CIP
library. If switch is not passed, the code will be extended from DV
library instead. (Ignored if -e switch is not passed.)""")
parser.add_argument(
"-hr",
"--has-ral",
default=False,
action='store_true',
help="""Specify whether the DUT has CSRs and thus needs a UVM RAL model.
This option is required if either --is_cip or --has_interrupts
are enabled.""")
parser.add_argument(
"-hi",
"--has-interrupts",
default=False,
action='store_true',
help="""CIP has interrupts. Create interrupts interface in tb""")
parser.add_argument(
"-ha",
"--has-alerts",
default=False,
action='store_true',
help="""CIP has alerts. Create alerts interface in tb""")
parser.add_argument(
"-he",
"--has-edn",
default=False,
action='store_true',
help="""CIP has EDN connection. Create edn pull interface in tb""")
parser.add_argument(
"-ea",
"--env-agents",
nargs="+",
metavar="agt1 agt2",
help="""Env creates an interface agent specified here. They are
assumed to already exist. Note that the list is space-separated,
and not comma-separated. (ignored if -e switch is not passed)"""
)
parser.add_argument(
"-ao",
"--agent-outdir",
metavar="[hw/dv/sv]",
help="""Path to place the agent code. A directory called <name>_agent is
created at this location. (default set to './<name>')""")
parser.add_argument(
"-eo",
"--env-outdir",
metavar="[hw/ip/<ip>]",
help="""Path to place the full testbench code. It creates 3 directories
- dv, data, and doc. The DV doc and the testplan Hjson files are placed
in the doc and data directories respectively. These are to be merged
into the IP's root directory (with the existing data and doc
directories). Under dv, it creates 3 sub-directories - env, tb, and
tests - to place all of the testbench sources. (default set to
'./<name>'.)""")
parser.add_argument(
"-v",
"--vendor",
default=VENDOR_DEFAULT,
help=
"""Name of the vendor / entity developing the testbench. This is used
to set the VLNV of the FuseSoC core files.""")
args = parser.parse_args()
# The name should be alphanumeric.
if re.search(r"\W", args.name):
log.error("The block name '%s' contains non-alphanumeric characters.",
args.name)
sys.exit(1)
if not args.agent_outdir:
args.agent_outdir = args.name
if not args.env_outdir:
args.env_outdir = args.name
# The has_ral option must be set if either is_cip or has_interrupts is set,
# as both require use of a RAL model. As such, it is disallowed to not have
# has_ral set if one of these options is set.
if not args.has_ral and (args.is_cip or args.has_interrupts):
args.has_ral = True
print("NOTE: --has_ral switch is enabled since either "
"--is_cip or --has_interrupts is set.")
if args.gen_agent:
gen_agent.gen_agent(args.name, args.has_separate_host_device_driver,
args.agent_outdir, args.vendor)
if args.gen_env:
if not args.env_agents:
args.env_agents = []
gen_env.gen_env(args.name, args.is_cip, args.has_ral,
args.has_interrupts, args.has_alerts, args.num_edn,
args.env_agents, args.env_outdir, args.vendor)
if __name__ == '__main__':
main()
| 2,322 |
568 | <reponame>JanStoltman/CompositeAndroid<gh_stars>100-1000
package com.pascalwelsch.compositeandroid.core;
public interface Removable {
void remove();
}
| 56 |
362 | <reponame>lostways/sncli
# Copyright (c) 2014 <NAME>
# Licensed under the MIT License
import urwid
class UserInput(urwid.Edit):
def __init__(self, config, caption, edit_text, callback_func, args):
self.config = config
self.callback_func = callback_func
self.callback_func_args = args
super(UserInput, self).__init__(caption=caption,
edit_text=edit_text,
wrap='clip')
def keypress(self, size, key):
size = (size[0],) # if this isn't here then urwid freaks out...
if key == 'esc':
self.callback_func(self.callback_func_args, None)
elif key == 'enter':
self.callback_func(self.callback_func_args, self.edit_text)
else:
return super(UserInput, self).keypress(size, key)
return None
| 418 |
742 | <reponame>nuclearsandwich/fastrtps-debian
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file HeartbeatResponseDelay.h
*
*/
#ifndef HEARTBEATRESPONSEDELAY_H_
#define HEARTBEATRESPONSEDELAY_H_
#ifndef DOXYGEN_SHOULD_SKIP_THIS_PUBLIC
#include "../../resources/TimedEvent.h"
#include "../../common/CDRMessage_t.h"
#include "../../messages/RTPSMessageGroup.h"
namespace eprosima {
namespace fastrtps{
namespace rtps {
class StatefulReader;
class WriterProxy;
/**
* Class HeartbeatResponseDelay, TimedEvent used to delay the response to a specific HB.
* @ingroup READER_MODULE
*/
class HeartbeatResponseDelay:public TimedEvent
{
public:
virtual ~HeartbeatResponseDelay();
/**
* @param p_WP
* @param interval
*/
HeartbeatResponseDelay(WriterProxy* p_WP,double interval);
/**
* Method invoked when the event occurs
*
* @param code Code representing the status of the event
* @param msg Message associated to the event
*/
void event(EventCode code, const char* msg= nullptr);
//!Pointer to the WriterProxy associated with this specific event.
WriterProxy* mp_WP;
//!CDRMessage_t used in the response.
RTPSMessageGroup_t m_cdrmessages;
};
}
} /* namespace rtps */
} /* namespace eprosima */
#endif
#endif /* HEARTBEATRESPONSEDELAY_H_ */
| 785 |
316 | from ipaddress import IPv4Network
from arango.typings import Json
from core.db.model import QueryModel
from core.query.model import FunctionTerm
def has_desired_change(cursor: str, fn: FunctionTerm) -> str:
return (
f"{cursor}.desired.{fn.property_path}!=null && "
+ f"{cursor}.reported.{fn.property_path}!={cursor}.desired.{fn.property_path}"
)
def in_subnet(cursor: str, bind_vars: Json, fn: FunctionTerm, model: QueryModel) -> str:
"""
Assumptions and requirements:
- this fn only works for IPv4 addresses
- ip addresses are stored as 4 octet with digit string in the reported section
- one argument is given which defines the ip/mask
:param cursor: the cursor to read from
:param bind_vars: the bind_vars to send to arango
:param fn: the function definition
:param model: the related query model.
:return: the AQL filter statement
"""
if len(fn.args) != 1:
raise AttributeError("Function in_subnet expects exactly one argument. Example: 1.2.3.4/24")
network = IPv4Network(fn.args[0], strict=False)
mask = int(network.netmask)
expected = int(network.network_address) & mask
length = str(len(bind_vars))
bind_vars[length] = expected
return f"BIT_AND(IPV4_TO_NUMBER({cursor}.{fn.property_path}), {mask}) == @{length}"
def has_key(cursor: str, bind_vars: Json, fn: FunctionTerm, model: QueryModel) -> str:
assert (
len(fn.args) == 1
), "has_key(path.to.property, name_of_prop) or has_key(path.to.property, [name_of_prop_a, name_of_prop_b])"
args = [fn.args[0]] if isinstance(fn.args[0], str) else fn.args[0]
for arg in args:
assert isinstance(arg, str), f"has_key: argument must be string, but got: {arg}"
prop = f"fn{len(bind_vars)}"
if len(args) == 0:
return "true"
elif len(args) == 1:
bind_vars[prop] = fn.args[0]
return f"HAS({cursor}.{fn.property_path}, @{prop})"
else:
bind_vars[prop] = args
return f"@{prop} ALL IN ATTRIBUTES({cursor}.{fn.property_path}, true)"
def as_arangodb_function(cursor: str, bind_vars: Json, fn: FunctionTerm, model: QueryModel) -> str:
if fn.fn == "has_key":
return has_key(cursor, bind_vars, fn, model)
if fn.fn == "in_subnet":
return in_subnet(cursor, bind_vars, fn, model)
if fn.fn == "has_desired_change":
return has_desired_change(cursor, fn)
else:
raise AttributeError(f"Function {fn} does not exist!")
| 1,009 |
460 | <filename>trunk/mac/BumpTop/VisualPhysicsActorAnimation.h
/*
* Copyright 2012 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BUMPTOP_VISUALPHYSICSACTORANIMATION_H_
#define BUMPTOP_VISUALPHYSICSACTORANIMATION_H_
#if defined(OS_WIN)
#include <boost/cstdint.hpp>
using boost::int64_t;
#endif
#include "CppTweener.h"
struct BumpPose;
class RoomSurface;
class VisualPhysicsActor;
class VisualPhysicsActorAnimation : public QObject, public tween::TweenerListener {
Q_OBJECT
public:
explicit VisualPhysicsActorAnimation(VisualPhysicsActor* visual_physics_actor,
int64_t length,
Ogre::Vector3 final_position,
Ogre::Quaternion final_orientation,
RoomSurface* final_surface = NULL,
Ogre::Real final_alpha_modulation_factor = 1,
Ogre::Real final_actor_size_factor = 1,
int16_t transition_style = tween::CUBIC);
// TODO: when I changed this to virtual it seemed
// to cause bad accesses
~VisualPhysicsActorAnimation();
virtual tween::TweenerParam* tweener_param();
virtual VisualPhysicsActor* visual_physics_actor();
virtual bool is_pending_deletion();
virtual void start();
virtual void moveToFinalPositionAndEndAnimation();
virtual void endAnimation();
virtual BumpPose final_pose();
virtual Ogre::Vector3 final_scale();
// Implementations of interfaces from TweenerListener
void onStart(const tween::TweenerParam& param);
void onStep(const tween::TweenerParam& param);
void onComplete(const tween::TweenerParam& param);
signals:
void onAnimationComplete(VisualPhysicsActorAnimation* animation);
protected:
Ogre::Real position_x_;
Ogre::Real position_y_;
Ogre::Real position_z_;
Ogre::Real orientation_w_;
Ogre::Real orientation_x_;
Ogre::Real orientation_y_;
Ogre::Real orientation_z_;
Ogre::Real final_alpha_modulation_factor_;
Ogre::Real alpha_modulation_factor_;
Ogre::Vector3 final_scale_;
Ogre::Real final_actor_scale_factor_;
Ogre::Real actor_scale_x_;
Ogre::Real actor_scale_y_;
Ogre::Real actor_scale_z_;
Ogre::Vector3 original_actor_scale_;
Ogre::Vector3 final_position_;
Ogre::Quaternion final_orientation_;
RoomSurface* final_surface_;
bool previous_physics_enabled_;
bool pending_deletion_;
bool is_set_pose_enabled_;
VisualPhysicsActor* visual_physics_actor_;
tween::TweenerParam param_;
};
#endif // BUMPTOP_VISUALPHYSICSACTORANIMATION_H_
| 1,120 |
713 | <reponame>pferraro/infinispan
package org.infinispan.server.configuration;
import java.util.Map;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.attributes.ConfigurationElement;
import org.infinispan.server.Server;
import org.infinispan.server.configuration.endpoint.SinglePortServerConfigurationBuilder;
import org.infinispan.server.core.configuration.ProtocolServerConfigurationBuilder;
import org.infinispan.server.network.NetworkAddress;
public class SocketBindingsConfiguration extends ConfigurationElement<SocketBindingsConfiguration> {
static final AttributeDefinition<Integer> PORT_OFFSET = AttributeDefinition.builder(Attribute.PORT_OFFSET, null, Integer.class).build();
static final AttributeDefinition<String> DEFAULT_INTERFACE = AttributeDefinition.builder(Attribute.DEFAULT_INTERFACE, null, String.class).build();
private final Map<String, SocketBindingConfiguration> socketBindings;
static AttributeSet attributeDefinitionSet() {
return new AttributeSet(SocketBindingsConfiguration.class, PORT_OFFSET, DEFAULT_INTERFACE);
}
SocketBindingsConfiguration(AttributeSet attributes, Map<String, SocketBindingConfiguration> socketBindings) {
super(Element.SOCKET_BINDINGS, attributes);
this.socketBindings = socketBindings;
}
public Integer offset() {
return attributes.attribute(SocketBindingsConfiguration.PORT_OFFSET).get();
}
Map<String, SocketBindingConfiguration> socketBindings() {
return socketBindings;
}
public void applySocketBinding(String bindingName, ProtocolServerConfigurationBuilder builder, SinglePortServerConfigurationBuilder singlePort) {
if (!socketBindings.containsKey(bindingName)) {
throw Server.log.unknownSocketBinding(bindingName);
}
SocketBindingConfiguration binding = socketBindings.get(bindingName);
NetworkAddress networkAddress = binding.interfaceConfiguration().getNetworkAddress();
String host = networkAddress.getAddress().getHostAddress();
int port = binding.port() + offset();
if (builder != singlePort) {
// Ensure we are using a different socket binding than the one used by the single-port endpoint
if (builder.startTransport() && singlePort.host().equals(host) && singlePort.port() == port) {
throw Server.log.protocolCannotUseSameSocketBindingAsEndpoint();
}
}
builder.socketBinding(bindingName).host(host).port(port);
}
}
| 770 |
790 | """
25. Reverse lookups
This demonstrates the reverse lookup features of the database API.
"""
from django.db import models
class User(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Poll(models.Model):
question = models.CharField(max_length=200)
creator = models.ForeignKey(User)
def __unicode__(self):
return self.question
class Choice(models.Model):
name = models.CharField(max_length=100)
poll = models.ForeignKey(Poll, related_name="poll_choice")
related_poll = models.ForeignKey(Poll, related_name="related_choice")
def __unicode__(self):
return self.name
| 239 |
2,943 | /*
* Copyright 2015 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.secbro.qark.intentsniffer.services;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
public class BootReceiver extends BroadcastReceiver {
@Override
public void onReceive(Context context, Intent intent) {
Intent service = new Intent(context, BroadcastStealerService.class);
context.startService(service);
}
} | 248 |
3,486 | package com.thinkaurelius.titan.graphdb.types.vertices;
import com.thinkaurelius.titan.core.Cardinality;
import com.thinkaurelius.titan.core.PropertyKey;
import com.thinkaurelius.titan.graphdb.transaction.StandardTitanTx;
import com.thinkaurelius.titan.graphdb.types.TypeDefinitionCategory;
import org.apache.tinkerpop.gremlin.structure.Direction;
public class PropertyKeyVertex extends RelationTypeVertex implements PropertyKey {
public PropertyKeyVertex(StandardTitanTx tx, long id, byte lifecycle) {
super(tx, id, lifecycle);
}
@Override
public Class<?> dataType() {
return getDefinition().getValue(TypeDefinitionCategory.DATATYPE,Class.class);
}
@Override
public Cardinality cardinality() {
return super.multiplicity().getCardinality();
}
@Override
public final boolean isPropertyKey() {
return true;
}
@Override
public final boolean isEdgeLabel() {
return false;
}
@Override
public boolean isUnidirected(Direction dir) {
return dir==Direction.OUT;
}
}
| 395 |
601 | <filename>samples/js-display-list/config/config.json
{
"$schema": "https://developer.microsoft.com/json-schemas/spfx-build/config.2.0.schema.json",
"version": "2.0",
"externals": {
"@microsoft/sp-client-base": "node_modules/@microsoft/sp-client-base/dist/sp-client-base.js",
"@microsoft/sp-client-preview": "node_modules/@microsoft/sp-client-preview/dist/sp-client-preview.js",
"@microsoft/sp-lodash-subset": "node_modules/@microsoft/sp-lodash-subset/dist/sp-lodash-subset.js",
"office-ui-fabric-react": "node_modules/office-ui-fabric-react/dist/office-ui-fabric-react.js",
"react": "node_modules/react/dist/react.min.js",
"react-dom": "node_modules/react-dom/dist/react-dom.min.js",
"react-dom/server": "node_modules/react-dom/dist/react-dom-server.min.js"
},
"localizedResources": {
"jsDisplayListStrings": "lib/webparts/jsDisplayList/loc/{locale}.js"
},
"bundles": {
"js-display-list-web-part": {
"components": [
{
"entrypoint": "./lib/webparts/jsDisplayList/JsDisplayListWebPart.js",
"manifest": "./src/webparts/jsDisplayList/JsDisplayListWebPart.manifest.json"
}
]
}
}
}
| 504 |
16,461 | <gh_stars>1000+
#import <Foundation/Foundation.h>
typedef NS_ENUM(NSInteger, DevMenuRNGestureHandlerDirection) {
DevMenuRNGestureHandlerDirectionRight = 1,
DevMenuRNGestureHandlerDirectionLeft = 2,
DevMenuRNGestureHandlerDirectionUp = 4,
DevMenuRNGestureHandlerDirectionDown = 8,
};
| 114 |
329 | // Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.cloudera.api.v11;
import com.cloudera.api.model.ApiUserSessionList;
import com.cloudera.api.v1.UsersResource;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
@Consumes({MediaType.APPLICATION_JSON})
@Produces({MediaType.APPLICATION_JSON})
public interface UsersResourceV11 extends UsersResource {
/**
* Return a list of the sessions associated with interactive authenticated
* users in Cloudera Manager.
* <p>
* Note that these sessions are only associated with users who log into the
* web interface. API users will not appear.
*
* @return A list of user sessions
*/
@GET
@Path("/sessions")
public ApiUserSessionList getSessions();
}
| 463 |
369 | /*
* Copyright © 2014 <NAME>, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package io.cdap.cdap.internal.io;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.reflect.TypeToken;
import io.cdap.cdap.internal.asm.ClassDefinition;
import io.cdap.cdap.internal.lang.Fields;
import java.lang.reflect.Method;
import java.lang.reflect.Type;
/**
* A {@link FieldAccessorFactory} that uses ASM to generate a specific {@link FieldAccessor} class
* for each field. The resulting {@link FieldAccessor} instance will be cached and reused.
*/
public final class ASMFieldAccessorFactory implements FieldAccessorFactory {
private final LoadingCache<FieldEntry, FieldAccessor> fieldAccessorCache;
public ASMFieldAccessorFactory() {
this.fieldAccessorCache = CacheBuilder.newBuilder().build(new FieldAccessorLoader());
}
@Override
public FieldAccessor getFieldAccessor(TypeToken<?> type, String fieldName) {
return fieldAccessorCache.getUnchecked(new FieldEntry(type, fieldName));
}
/**
* The {@link CacheLoader} for generating instance of {@link FieldAccessor} instance.
*/
private static final class FieldAccessorLoader extends CacheLoader<FieldEntry, FieldAccessor> {
// See if are able to use the "defineClass" method in the ClassLoader of the field class.
private final Method defineClass;
FieldAccessorLoader() {
Method defineClass = null;
try {
defineClass = ClassLoader.class.getDeclaredMethod("defineClass", String.class,
byte[].class, int.class, int.class);
defineClass.setAccessible(true);
} catch (Exception e) {
// ok to ignore this exception, it will resort to the slow reflection way.
}
this.defineClass = defineClass;
}
@Override
public FieldAccessor load(FieldEntry key) throws Exception {
// See if are able to use the "defineClass" method in the ClassLoader of the field class.
Method defineClass = null;
try {
defineClass = ClassLoader.class.getDeclaredMethod("defineClass", String.class,
byte[].class, int.class, int.class);
defineClass.setAccessible(true);
} catch (Exception e) {
// ok to ignore this exception, it will resort to the slow reflection way.
}
// Generate the FieldAccessor class bytecode.
ClassDefinition classDef = new FieldAccessorGenerator()
.generate(key.getType().getRawType(),
Fields.findField(key.getType().getType(), key.getFieldName()),
defineClass == null);
return createAccessor(key.getType(), classDef);
}
private FieldAccessor createAccessor(TypeToken<?> type, ClassDefinition classDef) throws Exception {
// Must use the same classloader as the type.
ClassLoader classLoader = type.getRawType().getClassLoader();
String className = classDef.getClassName();
Method findLoadedClass = ClassLoader.class.getDeclaredMethod("findLoadedClass", String.class);
findLoadedClass.setAccessible(true);
Class<?> result = (Class<?>) findLoadedClass.invoke(classLoader, className);
if (result == null) {
// Try to define the class from the same classloader of the given type.
byte[] bytecode = classDef.getBytecode();
result = (Class<?>) defineClass.invoke(classLoader, className, bytecode, 0, bytecode.length);
}
return (FieldAccessor) result.getConstructor(Type.class).newInstance(type.getType());
}
}
}
| 1,512 |
3,083 | typedef int (func)(const int *a, const int *b);
static func g;
/*
OUTPUT:
{
"includes": [],
"skipped_ranges": [],
"usr2func": [{
"usr": 8105378401105136463,
"detailed_name": "static int g(const int *, const int *)",
"qual_name_offset": 11,
"short_name": "g",
"bases": [],
"vars": [],
"callees": [],
"kind": 12,
"parent_kind": 0,
"storage": 0,
"declarations": ["2:13-2:14|2:1-2:14|1|-1"],
"derived": [],
"uses": []
}],
"usr2type": [{
"usr": 10383876566159302459,
"detailed_name": "typedef int (func)(const int *, const int *)",
"qual_name_offset": 12,
"short_name": "func",
"spell": "1:14-1:18|1:1-1:47|2|-1",
"bases": [],
"funcs": [],
"types": [],
"vars": [],
"alias_of": 0,
"kind": 252,
"parent_kind": 0,
"declarations": [],
"derived": [],
"instances": [],
"uses": ["2:8-2:12|4|-1"]
}],
"usr2var": []
}
*/ | 540 |
3,084 | /*++
Copyright (c) 2005 Microsoft Corporation
All rights reserved.
THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
PARTICULAR PURPOSE.
File Name:
porientdata.h
Abstract:
PageOrientation data structure definition. This provides a convenient
description of the PrintSchema PageOrientation feature.
--*/
#pragma once
#include "porientschema.h"
namespace XDPrintSchema
{
namespace PageOrientation
{
struct PageOrientationData
{
PageOrientationData() :
orientation(Landscape)
{
}
EOrientationOption orientation;
};
}
}
| 348 |
689 | <filename>code/Behavior/Command/DrawCommand.h
#pragma once
#include "Drawable.h"
#include "ICommand.h"
class DrawCommand : public ICommand
{
public:
DrawCommand(int x, int y, Drawable *drawable) : m_x(x), m_y(y), m_drawable(drawable) {}
virtual void execute() override
{
m_drawable->draw(m_x, m_y);
}
private:
int m_x;
int m_y;
Drawable *m_drawable;
}; | 171 |
310 | package org.seasar.doma.internal.jdbc.scalar;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.SQLXML;
import java.sql.Time;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.util.function.Supplier;
import org.seasar.doma.wrapper.ArrayWrapper;
import org.seasar.doma.wrapper.BigDecimalWrapper;
import org.seasar.doma.wrapper.BigIntegerWrapper;
import org.seasar.doma.wrapper.BlobWrapper;
import org.seasar.doma.wrapper.BooleanWrapper;
import org.seasar.doma.wrapper.ByteWrapper;
import org.seasar.doma.wrapper.BytesWrapper;
import org.seasar.doma.wrapper.ClobWrapper;
import org.seasar.doma.wrapper.DateWrapper;
import org.seasar.doma.wrapper.DoubleWrapper;
import org.seasar.doma.wrapper.EnumWrapper;
import org.seasar.doma.wrapper.FloatWrapper;
import org.seasar.doma.wrapper.IntegerWrapper;
import org.seasar.doma.wrapper.LocalDateTimeWrapper;
import org.seasar.doma.wrapper.LocalDateWrapper;
import org.seasar.doma.wrapper.LocalTimeWrapper;
import org.seasar.doma.wrapper.LongWrapper;
import org.seasar.doma.wrapper.NClobWrapper;
import org.seasar.doma.wrapper.ObjectWrapper;
import org.seasar.doma.wrapper.PrimitiveBooleanWrapper;
import org.seasar.doma.wrapper.PrimitiveByteWrapper;
import org.seasar.doma.wrapper.PrimitiveDoubleWrapper;
import org.seasar.doma.wrapper.PrimitiveFloatWrapper;
import org.seasar.doma.wrapper.PrimitiveIntWrapper;
import org.seasar.doma.wrapper.PrimitiveLongWrapper;
import org.seasar.doma.wrapper.PrimitiveShortWrapper;
import org.seasar.doma.wrapper.SQLXMLWrapper;
import org.seasar.doma.wrapper.ShortWrapper;
import org.seasar.doma.wrapper.StringWrapper;
import org.seasar.doma.wrapper.TimeWrapper;
import org.seasar.doma.wrapper.TimestampWrapper;
import org.seasar.doma.wrapper.UtilDateWrapper;
public final class BasicScalarSuppliers {
public static Supplier<Scalar<Array, Array>> ofArray() {
return () -> new BasicScalar<>(new ArrayWrapper());
}
public static Supplier<Scalar<BigDecimal, BigDecimal>> ofBigDecimal() {
return () -> new BasicScalar<>(new BigDecimalWrapper());
}
public static Supplier<Scalar<BigInteger, BigInteger>> ofBigInteger() {
return () -> new BasicScalar<>(new BigIntegerWrapper());
}
public static Supplier<Scalar<Blob, Blob>> ofBlob() {
return () -> new BasicScalar<>(new BlobWrapper());
}
public static Supplier<Scalar<Boolean, Boolean>> ofBoolean() {
return () -> new BasicScalar<>(new BooleanWrapper());
}
public static Supplier<Scalar<Byte, Byte>> ofByte() {
return () -> new BasicScalar<>(new ByteWrapper());
}
public static Supplier<Scalar<byte[], byte[]>> ofBytes() {
return () -> new BasicScalar<>(new BytesWrapper());
}
public static Supplier<Scalar<Clob, Clob>> ofClob() {
return () -> new BasicScalar<>(new ClobWrapper());
}
public static Supplier<Scalar<Date, Date>> ofDate() {
return () -> new BasicScalar<>(new DateWrapper());
}
public static Supplier<Scalar<Double, Double>> ofDouble() {
return () -> new BasicScalar<>(new DoubleWrapper());
}
public static <E extends Enum<E>> Supplier<Scalar<E, E>> ofEnum(Class<E> enumClass) {
return () -> new BasicScalar<>(new EnumWrapper<>(enumClass));
}
public static Supplier<Scalar<Float, Float>> ofFloat() {
return () -> new BasicScalar<>(new FloatWrapper());
}
public static Supplier<Scalar<Integer, Integer>> ofInteger() {
return () -> new BasicScalar<>(new IntegerWrapper());
}
public static Supplier<Scalar<LocalDate, LocalDate>> ofLocalDate() {
return () -> new BasicScalar<>(new LocalDateWrapper());
}
public static Supplier<Scalar<LocalDateTime, LocalDateTime>> ofLocalDateTime() {
return () -> new BasicScalar<>(new LocalDateTimeWrapper());
}
public static Supplier<Scalar<LocalTime, LocalTime>> ofLocalTime() {
return () -> new BasicScalar<>(new LocalTimeWrapper());
}
public static Supplier<Scalar<Long, Long>> ofLong() {
return () -> new BasicScalar<>(new LongWrapper());
}
public static Supplier<Scalar<NClob, NClob>> ofNClob() {
return () -> new BasicScalar<>(new NClobWrapper());
}
public static Supplier<Scalar<Object, Object>> ofObject() {
return () -> new BasicScalar<>(new ObjectWrapper());
}
public static Supplier<Scalar<Boolean, Boolean>> ofPrimitiveBoolean() {
return () -> new BasicScalar<>(new PrimitiveBooleanWrapper());
}
public static Supplier<Scalar<Byte, Byte>> ofPrimitiveByte() {
return () -> new BasicScalar<>(new PrimitiveByteWrapper());
}
public static Supplier<Scalar<Double, Double>> ofPrimitiveDouble() {
return () -> new BasicScalar<>(new PrimitiveDoubleWrapper());
}
public static Supplier<Scalar<Float, Float>> ofPrimitiveFloat() {
return () -> new BasicScalar<>(new PrimitiveFloatWrapper());
}
public static Supplier<Scalar<Integer, Integer>> ofPrimitiveInt() {
return () -> new BasicScalar<>(new PrimitiveIntWrapper());
}
public static Supplier<Scalar<Long, Long>> ofPrimitiveLong() {
return () -> new BasicScalar<>(new PrimitiveLongWrapper());
}
public static Supplier<Scalar<Short, Short>> ofPrimitiveShort() {
return () -> new BasicScalar<>(new PrimitiveShortWrapper());
}
public static Supplier<Scalar<Short, Short>> ofShort() {
return () -> new BasicScalar<>(new ShortWrapper());
}
public static Supplier<Scalar<SQLXML, SQLXML>> ofSQLXML() {
return () -> new BasicScalar<>(new SQLXMLWrapper());
}
public static Supplier<Scalar<String, String>> ofString() {
return () -> new BasicScalar<>(new StringWrapper());
}
public static Supplier<Scalar<Time, Time>> ofTime() {
return () -> new BasicScalar<>(new TimeWrapper());
}
public static Supplier<Scalar<Timestamp, Timestamp>> ofTimestamp() {
return () -> new BasicScalar<>(new TimestampWrapper());
}
public static Supplier<Scalar<java.util.Date, java.util.Date>> ofUtilDate() {
return () -> new BasicScalar<>(new UtilDateWrapper());
}
}
| 2,147 |
841 | <filename>resteasy-core/src/main/java/org/jboss/resteasy/plugins/server/servlet/HttpResponseFactory.java
package org.jboss.resteasy.plugins.server.servlet;
import org.jboss.resteasy.spi.HttpResponse;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
/**
* @author <a href="mailto:<EMAIL>"><NAME></a>
* @version $Revision: 1 $
*/
public interface HttpResponseFactory
{
HttpResponse createResteasyHttpResponse(HttpServletResponse response, HttpServletRequest request);
}
| 173 |
948 | /*
* Copyright (c) 2013, ADVANSEE - http://www.advansee.com/
* <NAME> <<EMAIL>>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \addtogroup cc2538-adc
* @{
*
* \file
* Implementation of the cc2538 ADC driver
*/
#include "contiki.h"
#include "dev/soc-adc.h"
#include "dev/cctest.h"
#include "dev/rfcore-xreg.h"
#include "dev/adc.h"
#include "reg.h"
#include <stdint.h>
/*---------------------------------------------------------------------------*/
void
adc_init(void)
{
/* Start conversions only manually */
REG(SOC_ADC_ADCCON1) |= SOC_ADC_ADCCON1_STSEL;
}
/*---------------------------------------------------------------------------*/
int16_t
adc_get(uint8_t channel, uint8_t ref, uint8_t div)
{
uint32_t cctest_tr0, rfcore_xreg_atest;
int16_t res;
/* On-chip temperature sensor */
if(channel == SOC_ADC_ADCCON_CH_TEMP) {
/* Connect the temperature sensor to the ADC */
cctest_tr0 = REG(CCTEST_TR0);
REG(CCTEST_TR0) = cctest_tr0 | CCTEST_TR0_ADCTM;
/* Enable the temperature sensor */
rfcore_xreg_atest = REG(RFCORE_XREG_ATEST);
REG(RFCORE_XREG_ATEST) = (rfcore_xreg_atest & ~RFCORE_XREG_ATEST_ATEST_CTRL) |
RFCORE_XREG_ATEST_ATEST_CTRL_TEMP;
}
/* Start a single extra conversion with the given parameters */
REG(SOC_ADC_ADCCON3) = (REG(SOC_ADC_ADCCON3) &
~(SOC_ADC_ADCCON3_EREF | SOC_ADC_ADCCON3_EDIV | SOC_ADC_ADCCON3_ECH)) |
ref | div | channel;
/* Poll until end of conversion */
while(!(REG(SOC_ADC_ADCCON1) & SOC_ADC_ADCCON1_EOC));
/* Read conversion result, reading SOC_ADC_ADCH last to clear
* SOC_ADC_ADCCON1.EOC */
res = REG(SOC_ADC_ADCL) & 0xfc;
res |= REG(SOC_ADC_ADCH) << 8;
/* On-chip temperature sensor */
if(channel == SOC_ADC_ADCCON_CH_TEMP) {
/* Restore the initial temperature sensor state and connection (better for
* power consumption) */
REG(RFCORE_XREG_ATEST) = rfcore_xreg_atest;
REG(CCTEST_TR0) = cctest_tr0;
}
/* Return conversion result */
return res;
}
/** @} */
| 1,308 |
348 | {"nom":"Sarrazac","circ":"3ème circonscription","dpt":"Dordogne","inscrits":299,"abs":99,"votants":200,"blancs":26,"nuls":7,"exp":167,"res":[{"nuance":"MDM","nom":"<NAME>","voix":93},{"nuance":"SOC","nom":"<NAME>","voix":74}]} | 93 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Essey-lès-Nancy","circ":"1ère circonscription","dpt":"Meurthe-et-Moselle","inscrits":6129,"abs":3191,"votants":2938,"blancs":20,"nuls":18,"exp":2900,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":980},{"nuance":"DVD","nom":"<NAME>","voix":594},{"nuance":"SOC","nom":"Mme <NAME>","voix":417},{"nuance":"FN","nom":"Mme <NAME>","voix":326},{"nuance":"FI","nom":"M. <NAME>","voix":250},{"nuance":"UDI","nom":"M. <NAME>","voix":114},{"nuance":"ECO","nom":"M. <NAME>","voix":56},{"nuance":"DLF","nom":"M. <NAME>","voix":40},{"nuance":"COM","nom":"Mme <NAME>","voix":31},{"nuance":"DIV","nom":"M. <NAME>","voix":23},{"nuance":"EXD","nom":"M. Pierre-<NAME>","voix":20},{"nuance":"DIV","nom":"M. <NAME>","voix":17},{"nuance":"ECO","nom":"Mme <NAME>","voix":13},{"nuance":"EXG","nom":"Mme <NAME>","voix":12},{"nuance":"DVG","nom":"M. <NAME>","voix":7}]} | 370 |
3,428 | {"id":"00411","group":"spam-2","checksum":{"type":"MD5","value":"e606c6408dbcda1a60be16896197bace"},"text":"From <EMAIL> Mon Jun 24 17:05:40 2002\nReturn-Path: [email protected]\nDelivery-Date: Tue May 21 16:36:12 2002\nReceived: from mandark.labs.netnoteinc.com ([213.105.180.140]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g4LFZje14967 for\n <<EMAIL>>; Tue, 21 May 2002 16:35:47 +0100\nReceived: from ridus.researchnet.co.kr ([203.251.80.158]) by\n mandark.labs.netnoteinc.com (8.11.2/8.11.2) with ESMTP id g4LFZWD26639 for\n <<EMAIL>>; Tue, 21 May 2002 16:35:38 +0100\nReceived: from nmonline.com.cn (213-97-186-96.uc.nombres.ttd.es\n [213.97.186.96]) by ridus.researchnet.co.kr with SMTP (Microsoft Exchange\n Internet Mail Service Version 5.5.2448.0) id L2R7Q3H7; Wed, 22 May 2002\n 00:35:30 +0900\nX-Priority: 3\nTo: <EMAIL>\nSubject: Great Idea for You byrdshot\nX-Mailer: Microsoft Outlook Express 5.57.4141.2408\nMessage-Id: <<EMAIL>._<EMAIL>>\nX-Msmail-Priority: Normal\nDate: Tue, 21 May 2002 08:38:23 -0700\nCc: <EMAIL>, <EMAIL>, <EMAIL>,\n <EMAIL>, <EMAIL>, <EMAIL>,\n <EMAIL>, <EMAIL>, <EMAIL>\nFrom: _rebeccallewellyn1__@<EMAIL>.<EMAIL>\nReceived: from nmonline.com.cn by WY860DYR3F8.nmonline.com.cn with SMTP\n for <EMAIL>; Tue, 21 May 2002 08:38:23 -0700\nX-Keywords: \nContent-Type: text/html; charset=us-ascii\nContent-Transfer-Encoding: 7BIT\n\n<HTML><P ALIGN=CENTER><FONT SIZE=5 PTSIZE=18 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"><B>Mortgage Rates Are About To Rise</FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=2 PTSIZE=10 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"></B><BR>\n</FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=5 PTSIZE=18 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"><B>Cash In Now!</FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=2 PTSIZE=10 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"></B><BR>\n<P ALIGN=LEFT><BR>\n<P ALIGN=CENTER></FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=4 PTSIZE=14 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\">Our programs will help you with:</FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=2 PTSIZE=10 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"><BR>\n<P ALIGN=LEFT><BR>\n<P ALIGN=CENTER></FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=3 PTSIZE=12 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\">-Debt Consolidation<BR>\n<BR>\n-2nd Mortgage<BR>\n<BR>\n-Refianance<BR>\n<BR>\n-Home Improvement</FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=2 PTSIZE=10 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"><BR>\n<P ALIGN=LEFT><BR>\n<P ALIGN=CENTER></FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=4 PTSIZE=14 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\">Our free no obligation quite has already helped <BR>\nthousands of homeowners, just like you.</FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=2 PTSIZE=10 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"><BR>\n<P ALIGN=LEFT><BR>\n<BR>\n<P ALIGN=CENTER></FONT><FONT COLOR=\"#0000ff\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=5 PTSIZE=18 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"><A HREF=\"http://www.mortgageloanfreequotes.com\">Click Here to start saving</A></FONT><FONT COLOR=\"#000000\" BACK=\"#ffffff\" style=\"BACKGROUND-COLOR: #ffffff\" SIZE=2 PTSIZE=10 FAMILY=\"SANSSERIF\" FACE=\"Arial\" LANG=\"0\"><BR>\n<P ALIGN=LEFT><BR>\n<BR>\n<BR>\n<BR>\n<P ALIGN=CENTER>If you would rather not be included in our future mailings, click <A HREF=\"mailto:<EMAIL>\">here</A>.</P></P></P></P></P></P></P></P></P></P></P></FONT></HTML>\n\n"} | 1,625 |
17,702 | <filename>Source/Readers/HTKDeserializers/MLFUtils.h
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include <unordered_map>
#include <vector>
#include <string>
#include <boost/algorithm/string.hpp>
#include <boost/noncopyable.hpp>
#include <boost/range/iterator_range_core.hpp>
namespace CNTK {
// Representation of a state list table.
// The table is preserved in memory, the number of states is only expected to be a couple of thousands,
// so it is fine to keep all in memory.
class StateTable : boost::noncopyable
{
public:
void ReadStateList(const std::wstring& stateListPath);
const std::vector<bool>& SilStateMask() const
{
return m_silStateMask;
}
const std::unordered_map<std::string, size_t>& States() const
{
return m_stateTable;
}
private:
bool IsSilState(const std::string& stateName) const
{
return stateName.size() > 3 && !strncmp(stateName.c_str(), "sil", 3);
}
static std::vector<boost::iterator_range<char*>> ReadNonEmptyLines(const std::wstring& path, std::vector<char>& buffer);
std::vector<bool> m_silStateMask; // [state index] => true if is sil state (cached)
std::unordered_map<std::string, size_t> m_stateTable; // for state <=> index
};
typedef std::shared_ptr<StateTable> StateTablePtr;
typedef unsigned short ClassIdType;
// Representation of an MLF range.
class MLFFrameRange
{
static const double s_htkTimeToFrame;
uint32_t m_firstFrame; // start frame
uint32_t m_numFrames; // number of frames
ClassIdType m_classId; // numeric state id
public:
// Parses format with original HTK state align MLF format and state list and builds an MLFFrameRange.
void Build(const std::vector<boost::iterator_range<char*>>& tokens, const std::unordered_map<std::string, size_t>& stateTable, size_t byteOffset);
ClassIdType ClassId() const { return m_classId; }
uint32_t FirstFrame() const { return m_firstFrame; }
uint32_t NumFrames() const { return m_numFrames; }
// Note: preserving logic of the old speech reader.
// Parse the time range.
// There are two formats:
// - original HTK
// - Dong's hacked format: ts te senonename senoneid
static std::pair<size_t, size_t> ParseFrameRange(const std::vector<boost::iterator_range<char*>>& tokens, size_t byteOffset);
void Save(unsigned int firstFrame, unsigned int numFrames, size_t uid);
private:
void VerifyAndSaveRange(const std::pair<size_t, size_t>& frameRange, size_t uid, size_t byteOffset);
};
// Utility class for parsing an MLF utterance.
class MLFUtteranceParser
{
const StateTablePtr m_states;
public:
MLFUtteranceParser(const StateTablePtr& states) : m_states(states)
{}
bool Parse(const boost::iterator_range<char*>& utteranceData, std::vector<MLFFrameRange>& result, size_t sequenceOffset);
};
}
| 1,266 |
2,550 | //
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "create_samples_mean.h"
// Creates pairs of samples of differentially private means.
// Each sample-pair replicates a unique scenario constructed in the proto for
// BoundedMeanDpTest.java, available here:
// https://github.com/google/differential-privacy/blob/main/proto/testing/bounded_mean_dp_test_cases.textproto.
namespace differential_privacy {
namespace testing {
const std::string mean_samples_folder = "../statisticaltester/boundedmeansamples";
double DiscretizeMean(double true_value, double granularity) {
if (granularity > 0) {
double abs_value = abs(true_value);
double scaled_sample = true_value/granularity;
if (abs_value >= 1L << 54) {
double discretized_value = scaled_sample * granularity;
return discretized_value;
} else {
double discretized_value = std::round(scaled_sample) * granularity;
return discretized_value;
}
}
else {
std::cout << "Granularity must be positive. Try again, please." << std::endl;
}
}
// Construct the BoundedMean algorithm.
double DPMean(std::vector<double> values, double granularity,
double epsilon, int max_partitions, int max_contributions, int lower, int upper) {
std::unique_ptr<BoundedMean<double>> boundedmean =
BoundedMean<double>::Builder()
.SetEpsilon(epsilon)
.SetMaxPartitionsContributed(max_partitions)
.SetMaxContributionsPerPartition(max_contributions)
.SetLower(lower)
.SetUpper(upper)
.Build()
.ValueOrDie();
base::StatusOr<Output> result = boundedmean->Result(values.begin(),
values.end());
Output obj = result.ValueOrDie();
return GetValue<double>(obj);
}
// Construct the BoundedMean algorithm for large values.
// Additional parameters enable large values to be added
// to the algorithm one by one.
double DPLargeMean(double initial_value, double extra_values_length,
double extra_value, double granularity, double epsilon, int max_partitions,
int max_contributions, int lower, int upper) {
std::unique_ptr<BoundedMean<double>> boundedmean =
BoundedMean<double>::Builder()
.SetEpsilon(epsilon)
.SetMaxPartitionsContributed(max_partitions)
.SetMaxContributionsPerPartition(max_contributions)
.SetLower(lower)
.SetUpper(upper)
.Build()
.ValueOrDie();
// Add entry with initial value
boundedmean->AddEntry(initial_value,granularity);
// Add entry with subsequent values
for (int i=0; i<extra_values_length; i++) {
boundedmean->AddEntry(extra_value);
}
return GetValue<double>(boundedmean->PartialResult().ValueOrDie());
}
// Creates a folder to contain all samples with a particular ratio value
// (e.g., R95). Every folder contains 22 subfolders for each unique sample-pair.
// Every subfolder contains seven runs of each sample-pair (14 files in total).
void CreateSingleScenarioMean(int scenario, std::vector<double>valuesA,
std::vector<double>valuesB, double granularity, double epsilon,
int max_partitions, int max_contributions, int lower, int upper,
int number_of_samples, double ratio, double initial_value,
double extra_values_length, double extra_value) {
double implemented_epsilon = epsilon / ratio;
std::string filepath = mean_samples_folder+"/R"
+std::to_string(static_cast<int>(ratio*100))+"/Scenario"
+std::to_string(scenario);
mkdir(filepath.c_str(), 0777);
for (int i=0; i<7; i++) {
std::ofstream samplefileA;
std::ofstream samplefileB;
samplefileA.open(filepath+"/TestCase"+std::to_string(i)+"A.txt");
samplefileB.open(filepath+"/TestCase"+std::to_string(i)+"B.txt");
if (extra_values_length == 0) {
for (int i=0; i<number_of_samples; i++) {
double outputA = DPMean(valuesA, granularity,
implemented_epsilon, max_partitions, max_contributions, lower, upper);
double discretized_outputA = DiscretizeMean(outputA, granularity);
samplefileA << discretized_outputA << "\n";
double outputB = DPMean(valuesB, granularity,
implemented_epsilon, max_partitions, max_contributions, lower, upper);
double discretized_outputB = DiscretizeMean(outputB, granularity);
samplefileB << discretized_outputB << "\n";
}
samplefileA.close();
samplefileB.close();
}
else {
for (int i=0; i<number_of_samples; i++) {
double outputA = DPMean(valuesA, granularity,
implemented_epsilon, max_partitions, max_contributions, lower, upper);
double discretized_outputA = DiscretizeMean(outputA, granularity);
samplefileA << discretized_outputA << "\n";
double outputB = DPLargeMean(initial_value, extra_values_length,
extra_value, granularity, implemented_epsilon, max_partitions,
max_contributions, lower, upper);
double discretized_outputB = DiscretizeMean(outputB, granularity);
samplefileB << discretized_outputB << "\n";
}
samplefileA.close();
samplefileB.close();
}
}
}
// Runs each sample-pair with parameters that replicate those specified in:
// https://github.com/google/differential-privacy/blob/main/proto/testing/bounded_mean_dp_test_cases.textproto.
void GenerateAllScenariosMean(double ratio) {
const int num_of_samples = 100;
double small_epsilon = 0.1;
double default_epsilon = std::log(3);
double large_epsilon = 2*std::log(3);
// Laplace noise, empty mean, default parameters
std::vector<double>zero_vector{0};
std::vector<double>valuesB1{1000};
CreateSingleScenarioMean(1,zero_vector,valuesB1,0.0078125,default_epsilon,1,1,0,1,num_of_samples,ratio);
// Laplace noise, empty mean, many partitions contributed
std::vector<double>valuesB2(1000,25);
CreateSingleScenarioMean(2,zero_vector,valuesB2,0.25,default_epsilon,25,1,0,1,num_of_samples,ratio);
// Laplace noise, empty mean, many contributions per partition
std::vector<double>valuesB3(1000,10);
CreateSingleScenarioMean(3,zero_vector,valuesB3,0.25,default_epsilon,1,10,0,1,num_of_samples,ratio);
// Laplace noise, empty mean, large bounds
std::vector<double>valuesB4{-50000};
CreateSingleScenarioMean(4,zero_vector,valuesB4,0.25,default_epsilon,1,1,-50,50,num_of_samples,ratio);
// Laplace noise, empty mean, small epsilon
std::vector<double>valuesB5{1000};
CreateSingleScenarioMean(5,zero_vector,valuesB5,0.0625,small_epsilon,1,1,0,1,num_of_samples,ratio);
// Laplace noise, empty mean, large epsilon
std::vector<double>valuesB6{1000};
CreateSingleScenarioMean(6,zero_vector,valuesB6,0.03125,large_epsilon,1,1,0,1,num_of_samples,ratio);
// Laplace noise, small positive mean, default parameters
std::vector<double>valuesA7{1};
std::vector<double>valuesB7{1,-1000};
CreateSingleScenarioMean(7,valuesA7,valuesB7,0.015625,default_epsilon,1,1,0,1,num_of_samples,ratio);
// Laplace noise, small positive mean, many partitions contributed
CreateSingleScenarioMean(8,zero_vector,zero_vector,0.25,default_epsilon,25,1,0,1,num_of_samples,ratio,
1,25,-1000);
// Laplace noise, small positive mean, many contributions per partition
CreateSingleScenarioMean(9,zero_vector,zero_vector,0.25,default_epsilon,1,10,0,1,num_of_samples,ratio,
1,10,-1000);
// Laplace noise, small positive mean, small epsilon
std::vector<double>valuesA10{1};
std::vector<double>valuesB10{1,-1000};
CreateSingleScenarioMean(10,valuesA10,valuesB10,0.0625,small_epsilon,1,1,0,1,num_of_samples,ratio);
// Laplace noise, small positive mean, large epsilon
std::vector<double>valuesA11{1};
std::vector<double>valuesB11{1,-1000};
CreateSingleScenarioMean(11,valuesA11,valuesB11,0.0625,large_epsilon,1,1,0,1,num_of_samples,ratio);
// Laplace noise, small positive mean, multiple entries
std::vector<double>valuesA12{0.64872,0.12707,0.00128,0.14684,0.86507};
std::vector<double>valuesB12{0.64872,0.12707,0.00128,0.14684,0.86507,1000};
CreateSingleScenarioMean(12,valuesA12,valuesB12,0.015625,default_epsilon,1,1,0,1,num_of_samples,ratio);
// Laplace noise, large positive mean, default parameters
std::vector<double>valuesA13{50};
std::vector<double>valuesB13{50,-1000};
CreateSingleScenarioMean(13,valuesA13,valuesB13,0.25,default_epsilon,1,1,0,50,num_of_samples,ratio);
// Laplace noise, large positive mean, many partitions contributed
CreateSingleScenarioMean(14,zero_vector,zero_vector,2,default_epsilon,25,1,0,50,
num_of_samples,ratio,50,25,-1000);
// Laplace noise, large positive mean, many contributions per partition
CreateSingleScenarioMean(15,zero_vector,zero_vector,1,default_epsilon,1,10,0,50,
num_of_samples,ratio,50,10,-1000);
// Laplace noise, large positive mean, small epsilon
std::vector<double>valuesA16{50};
std::vector<double>valuesB16{50,-1000};
CreateSingleScenarioMean(16,valuesA16,valuesB16,2,small_epsilon,1,1,0,50,num_of_samples,ratio);
// Laplace noise, large positive mean, large epsilon
std::vector<double>valuesA17{50};
std::vector<double>valuesB17{50,-1000};
CreateSingleScenarioMean(17,valuesA17,valuesB17,0.5,large_epsilon,1,1,0,50,num_of_samples,ratio);
// Laplace noise, large positive mean, multiple entries
std::vector<double>valuesA18{32,43606,35.35006,40.73424,32.53939,7.081785};
std::vector<double>valuesB18{32,43606,35.35006,40.73424,32.53939,7.081785,-1000};
CreateSingleScenarioMean(18,valuesA18,valuesB18,0.25,default_epsilon,1,1,0,50,num_of_samples,ratio);
// Laplace noise, large mixed mean, default parameters
std::vector<double>valuesA19{-50};
std::vector<double>valuesB19{-50,50000};
CreateSingleScenarioMean(19,valuesA19,valuesB19,0.5,default_epsilon,1,1,-50,50,num_of_samples,ratio);
// Laplace noise, large mixed mean, many partitions contributed
CreateSingleScenarioMean(20,zero_vector,zero_vector,2,default_epsilon,25,1,-50,50,
num_of_samples,ratio,-50,25,50000);
// Laplace noise, large mixed mean, many contributions per partition
CreateSingleScenarioMean(21,zero_vector,zero_vector,2,default_epsilon,1,10,-50,50,
num_of_samples,ratio,-50,10,50000);
// Laplace noise, large mixed mean, multiple entries
std::vector<double>valuesA22{-32.43606,35.35006,-40.73424,-32.53939,7.081785};
std::vector<double>valuesB22{-32.43606,35.35006,-40.73424,-32.53939,7.081785,50000};
CreateSingleScenarioMean(22,valuesA22,valuesB22,0.5,default_epsilon,1,1,-50,50,num_of_samples,ratio);
}
} // testing
} // differential_privacy
| 4,049 |
4,994 | <filename>examples/ssd_detection/test.cpp<gh_stars>1000+
/*
Copyright (c) 2013, <NAME> and the respective contributors
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
#include <cmath>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <vector>
#include "tiny_dnn/tiny_dnn.h"
#define INPUT_SIZE 300
#define N_ANCHORS 8732
#define N_CLASSES 21
#define BG_CLASS_ID 0
#define NMS_THRESHOLD 0.5
#define MEAN_B 123
#define MEAN_G 117
#define MEAN_R 104
void convert_image(const std::string& imagefilename,
int w,
int h,
tiny_dnn::vec_t& data) {
const int MEAN_BGR[] = {MEAN_B, MEAN_G, MEAN_R};
tiny_dnn::image<> img(imagefilename, tiny_dnn::image_type::bgr);
tiny_dnn::image<> resized = resize_image(img, w, h);
data = resized.to_vec();
size_t spatial_size = resized.height() * resized.width();
for (size_t c = 0; c < resized.depth(); ++c) {
for (size_t i = 0; i < spatial_size; ++i) {
data[c * spatial_size + i] -= MEAN_BGR[c];
}
}
}
void concat_hwc_features(tiny_dnn::vec_t& collections,
size_t n_collection_items,
tiny_dnn::vec_t& feature,
size_t in_spatial_size,
size_t in_channels) {
tiny_dnn::vec_t t_feature;
t_feature.resize(feature.size());
// transpose features to HxWxC
for (size_t i = 0; i < in_spatial_size; ++i) {
for (size_t j = 0; j < in_channels; ++j) {
t_feature[i * in_channels + j] = feature[j * in_spatial_size + i];
}
}
// Append features to vectors
for (size_t i = 0; i < t_feature.size(); ++i) {
collections[n_collection_items + i] = t_feature[i];
}
}
void inline_softmax(tiny_dnn::vec_t& confidences) {
for (size_t i = 0; i < N_ANCHORS; ++i) {
float sum = 0;
for (size_t j = 0; j < N_CLASSES; ++j) {
sum += exp(confidences[i * N_CLASSES + j]);
}
for (size_t j = 0; j < N_CLASSES; ++j) {
confidences[i * N_CLASSES + j] =
exp(confidences[i * N_CLASSES + j]) / sum;
}
}
}
void save_default_boxes(tiny_dnn::vec_t& default_boxes,
size_t box_index,
float cx,
float cy,
float width,
float height) {
default_boxes[box_index * 4] = cx;
default_boxes[box_index * 4 + 1] = cy;
default_boxes[box_index * 4 + 2] = width;
default_boxes[box_index * 4 + 3] = height;
}
void init_default_boxes(tiny_dnn::vec_t& default_boxes) {
float steps[] = {8.0 / INPUT_SIZE, 16.0 / INPUT_SIZE, 32.0 / INPUT_SIZE,
64.0 / INPUT_SIZE, 100.0 / INPUT_SIZE, 300.0 / INPUT_SIZE};
float sizes[] = {30.0 / INPUT_SIZE, 60.0 / INPUT_SIZE, 111.0 / INPUT_SIZE,
162.0 / INPUT_SIZE, 213.0 / INPUT_SIZE, 264.0 / INPUT_SIZE,
315.0 / INPUT_SIZE};
size_t feature_map_sizes[] = {38, 19, 10, 5, 3, 1};
std::vector<std::vector<float>> aspect_ratios = {{2}, {2, 3}, {2, 3},
{2, 3}, {2}, {2}};
const size_t N_FEATURES = 6;
size_t box_index = 0;
for (size_t i = 0; i < N_FEATURES; ++i) {
size_t fm_size = feature_map_sizes[i];
for (size_t h = 0; h < fm_size; ++h) {
for (size_t w = 0; w < fm_size; ++w) {
float cx = (w + 0.5) * steps[i];
float cy = (h + 0.5) * steps[i];
float s = sizes[i];
save_default_boxes(default_boxes, box_index++, cx, cy, s, s);
s = sqrt(sizes[i] * sizes[i + 1]);
save_default_boxes(default_boxes, box_index++, cx, cy, s, s);
s = sizes[i];
for (float ar : aspect_ratios[i]) {
save_default_boxes(default_boxes, box_index++, cx, cy, s * sqrt(ar),
s / sqrt(ar));
save_default_boxes(default_boxes, box_index++, cx, cy, s / sqrt(ar),
s * sqrt(ar));
}
}
}
}
}
void construct_nets(std::vector<tiny_dnn::network<tiny_dnn::sequential>>& nets,
const std::string& modelFolder) {
using conv = tiny_dnn::convolutional_layer;
using pool = tiny_dnn::max_pooling_layer;
using relu = tiny_dnn::relu_layer;
using l2norm = tiny_dnn::l2_normalization_layer;
using pad = tiny_dnn::zero_pad_layer;
tiny_dnn::network<tiny_dnn::sequential> nn1;
nn1 << conv(300, 300, 3, 3, 64, tiny_dnn::padding::same) // vgg.0
<< relu() << conv(300, 300, 3, 64, 64, tiny_dnn::padding::same) // vgg.2
<< relu() << pool(300, 300, 64, 2, 2, false)
<< conv(150, 150, 3, 64, 128, tiny_dnn::padding::same) // vgg.5
<< relu()
<< conv(150, 150, 3, 128, 128, tiny_dnn::padding::same) // vgg.7
<< relu() << pool(150, 150, 128, 2, 2, false)
<< conv(75, 75, 3, 128, 256, tiny_dnn::padding::same) // vgg.10
<< relu() << conv(75, 75, 3, 256, 256, tiny_dnn::padding::same) // vgg.12
<< relu() << conv(75, 75, 3, 256, 256, tiny_dnn::padding::same) // vgg.14
<< relu() << pool(75, 75, 256, 2, 2, true)
<< conv(38, 38, 3, 256, 512, tiny_dnn::padding::same) // vgg.17
<< relu() << conv(38, 38, 3, 512, 512, tiny_dnn::padding::same) // vgg.19
<< relu() << conv(38, 38, 3, 512, 512, tiny_dnn::padding::same) // vgg.21
<< relu() << l2norm(38 * 38, 512, 1e-12, 20);
nets.push_back(nn1);
tiny_dnn::network<tiny_dnn::sequential> nn2;
nn2 << pool(38, 38, 512, 2, 2, false)
<< conv(19, 19, 3, 512, 512, tiny_dnn::padding::same) // vgg.24
<< relu() << conv(19, 19, 3, 512, 512, tiny_dnn::padding::same) // vgg.26
<< relu() << conv(19, 19, 3, 512, 512, tiny_dnn::padding::same) // vgg.28
<< relu() << pad(19, 19, 512, 1, 1) << pool(21, 21, 512, 3, 1, true)
<< pad(19, 19, 512, 6, 6)
<< conv(31, 31, 3, 512, 1024, tiny_dnn::padding::valid, true, 1, 1, 6,
6) // vgg.31
<< relu()
<< conv(19, 19, 1, 1024, 1024, tiny_dnn::padding::same) // vgg.33
<< relu();
nets.push_back(nn2);
tiny_dnn::network<tiny_dnn::sequential> nn3;
nn3 << conv(19, 19, 1, 1024, 256, tiny_dnn::padding::same) // extra.0
<< relu() << pad(19, 19, 256, 1, 1)
<< conv(21, 21, 3, 256, 512, tiny_dnn::padding::valid, true, 2,
2) // extra.1
<< relu();
nets.push_back(nn3);
tiny_dnn::network<tiny_dnn::sequential> nn4;
nn4 << conv(10, 10, 1, 512, 128, tiny_dnn::padding::same) // extra.2
<< relu() << pad(10, 10, 128, 1, 1)
<< conv(12, 12, 3, 128, 256, tiny_dnn::padding::valid, true, 2,
2) // extra.3
<< relu();
nets.push_back(nn4);
tiny_dnn::network<tiny_dnn::sequential> nn5;
nn5 << conv(5, 5, 1, 256, 128, tiny_dnn::padding::same) // extra.4
<< relu() << conv(5, 5, 3, 128, 256, tiny_dnn::padding::valid) // extra.5
<< relu();
nets.push_back(nn5);
tiny_dnn::network<tiny_dnn::sequential> nn6;
nn6 << conv(3, 3, 1, 256, 128, tiny_dnn::padding::same) // extra.6
<< relu() << conv(3, 3, 3, 128, 256, tiny_dnn::padding::valid) // extra.7
<< relu();
nets.push_back(nn6);
// Locations
tiny_dnn::network<tiny_dnn::sequential> nn7;
nn7 << conv(38, 38, 3, 512, 16, tiny_dnn::padding::same); // loc.0
nets.push_back(nn7);
tiny_dnn::network<tiny_dnn::sequential> nn8;
nn8 << conv(19, 19, 3, 1024, 24, tiny_dnn::padding::same); // loc.1
nets.push_back(nn8);
tiny_dnn::network<tiny_dnn::sequential> nn9;
nn9 << conv(10, 10, 3, 512, 24, tiny_dnn::padding::same); // loc.2
nets.push_back(nn9);
tiny_dnn::network<tiny_dnn::sequential> nn10;
nn10 << conv(5, 5, 3, 256, 24, tiny_dnn::padding::same); // loc.3
nets.push_back(nn10);
tiny_dnn::network<tiny_dnn::sequential> nn11;
nn11 << conv(3, 3, 3, 256, 16, tiny_dnn::padding::same); // loc.4
nets.push_back(nn11);
tiny_dnn::network<tiny_dnn::sequential> nn12;
nn12 << conv(1, 1, 3, 256, 16, tiny_dnn::padding::same); // loc.5
nets.push_back(nn12);
// Confidences
tiny_dnn::network<tiny_dnn::sequential> nn13;
nn13 << conv(38, 38, 3, 512, 4 * N_CLASSES,
tiny_dnn::padding::same); // conf.0
nets.push_back(nn13);
tiny_dnn::network<tiny_dnn::sequential> nn14;
nn14 << conv(19, 19, 3, 1024, 6 * N_CLASSES,
tiny_dnn::padding::same); // conf.1
nets.push_back(nn14);
tiny_dnn::network<tiny_dnn::sequential> nn15;
nn15 << conv(10, 10, 3, 512, 6 * N_CLASSES,
tiny_dnn::padding::same); // conf.2
nets.push_back(nn15);
tiny_dnn::network<tiny_dnn::sequential> nn16;
nn16 << conv(5, 5, 3, 256, 6 * N_CLASSES, tiny_dnn::padding::same); // conf.3
nets.push_back(nn16);
tiny_dnn::network<tiny_dnn::sequential> nn17;
nn17 << conv(3, 3, 3, 256, 4 * N_CLASSES, tiny_dnn::padding::same); // conf.4
nets.push_back(nn17);
tiny_dnn::network<tiny_dnn::sequential> nn18;
nn18 << conv(1, 1, 3, 256, 4 * N_CLASSES, tiny_dnn::padding::same); // conf.5
nets.push_back(nn18);
for (size_t i = 0; i < 18; ++i) {
std::ostringstream modelPath;
modelPath << modelFolder << std::setfill('0') << std::setw(2) << i + 1
<< ".weights";
std::ifstream ifs(modelPath.str());
if (ifs.fail()) {
std::cout << "Failed to load weights from " << modelPath.str()
<< std::endl;
} else {
std::cout << "Loading weights from " << modelPath.str() << std::endl;
}
ifs >> nets[i];
}
}
void detect(std::vector<tiny_dnn::network<tiny_dnn::sequential>>& nets,
const std::string& src_filename) {
// convert imagefile to vec_t
tiny_dnn::vec_t img;
convert_image(src_filename, INPUT_SIZE, INPUT_SIZE, img);
// multi-scale features
auto feature1 = nets[0].predict(img);
auto feature2 = nets[1].predict(feature1);
auto feature3 = nets[2].predict(feature2);
auto feature4 = nets[3].predict(feature3);
auto feature5 = nets[4].predict(feature4);
auto feature6 = nets[5].predict(feature5);
// locations
auto loc_feature1 = nets[6].predict(feature1); // 16x38x38
auto loc_feature2 = nets[7].predict(feature2); // 24x19x19
auto loc_feature3 = nets[8].predict(feature3); // 24x10x10
auto loc_feature4 = nets[9].predict(feature4); // 24x5x5
auto loc_feature5 = nets[10].predict(feature5); // 16x3x3
auto loc_feature6 = nets[11].predict(feature6); // 16x1x1
// locations
auto conf_feature1 = nets[12].predict(feature1); // (4*n_classes)x38x38
auto conf_feature2 = nets[13].predict(feature2); // (6*n_classes)x19x19
auto conf_feature3 = nets[14].predict(feature3); // (6*n_classes)x10x10
auto conf_feature4 = nets[15].predict(feature4); // (6*n_classes)x5x5
auto conf_feature5 = nets[16].predict(feature5); // (4*n_classes)x3x3
auto conf_feature6 = nets[17].predict(feature6); // (4*n_classes)x1x1
tiny_dnn::vec_t locations;
size_t n_location_items = 0;
locations.resize(N_ANCHORS * 4);
concat_hwc_features(locations, n_location_items, loc_feature1, 38 * 38, 16);
n_location_items += loc_feature1.size();
concat_hwc_features(locations, n_location_items, loc_feature2, 19 * 19, 24);
n_location_items += loc_feature2.size();
concat_hwc_features(locations, n_location_items, loc_feature3, 10 * 10, 24);
n_location_items += loc_feature3.size();
concat_hwc_features(locations, n_location_items, loc_feature4, 5 * 5, 24);
n_location_items += loc_feature4.size();
concat_hwc_features(locations, n_location_items, loc_feature5, 3 * 3, 16);
n_location_items += loc_feature5.size();
concat_hwc_features(locations, n_location_items, loc_feature6, 1 * 1, 16);
n_location_items += loc_feature6.size();
tiny_dnn::vec_t default_boxes;
default_boxes.resize(N_ANCHORS * 4);
init_default_boxes(default_boxes);
tiny_dnn::vec_t bounding_boxes;
bounding_boxes.resize(N_ANCHORS * 4);
for (size_t i = 0; i < N_ANCHORS; ++i) {
// regress center x and center y for bounding boxes
float cx =
locations[i * 4] * 0.1 * default_boxes[i * 4 + 2] + default_boxes[i * 4];
float cy = locations[i * 4 + 1] * 0.1 * default_boxes[i * 4 + 3] +
default_boxes[i * 4 + 1];
// regress width and height for bounding boxes
float width = exp(locations[i * 4 + 2] * 0.2) * default_boxes[i * 4 + 2];
float height = exp(locations[i * 4 + 3] * 0.2) * default_boxes[i * 4 + 3];
bounding_boxes[i * 4] = cx - width / 2;
bounding_boxes[i * 4 + 1] = cy - height / 2;
bounding_boxes[i * 4 + 2] = cx + width / 2;
bounding_boxes[i * 4 + 3] = cy + height / 2;
}
tiny_dnn::vec_t confidences;
size_t n_confidence_items = 0;
confidences.resize(N_ANCHORS * N_CLASSES);
concat_hwc_features(confidences, n_confidence_items, conf_feature1, 38 * 38,
4 * N_CLASSES);
n_confidence_items += conf_feature1.size();
concat_hwc_features(confidences, n_confidence_items, conf_feature2, 19 * 19,
6 * N_CLASSES);
n_confidence_items += conf_feature2.size();
concat_hwc_features(confidences, n_confidence_items, conf_feature3, 10 * 10,
6 * N_CLASSES);
n_confidence_items += conf_feature3.size();
concat_hwc_features(confidences, n_confidence_items, conf_feature4, 5 * 5,
6 * N_CLASSES);
n_confidence_items += conf_feature4.size();
concat_hwc_features(confidences, n_confidence_items, conf_feature5, 3 * 3,
4 * N_CLASSES);
n_confidence_items += conf_feature5.size();
concat_hwc_features(confidences, n_confidence_items, conf_feature6, 1 * 1,
4 * N_CLASSES);
n_confidence_items += conf_feature6.size();
// Softmax
inline_softmax(confidences);
// Get class labels for bounding boxes
std::vector<int> bounding_box_indexes;
std::vector<int> bounding_box_classes;
std::vector<float> bounding_box_confidences;
for (size_t i = 0; i < N_ANCHORS; ++i) {
int max_conf_id = -1;
float max_conf = -1;
for (size_t j = 0; j < N_CLASSES; ++j) {
float conf = confidences[i * N_CLASSES + j];
if (conf > max_conf) {
max_conf_id = j;
max_conf = conf;
}
}
if (max_conf_id != BG_CLASS_ID) {
bounding_box_indexes.push_back(i);
bounding_box_classes.push_back(max_conf_id);
bounding_box_confidences.push_back(max_conf);
}
}
// Get valid bounding boxes
std::vector<tiny_dnn::bounding_box> bounding_box_candidates;
for (size_t i = 0; i < bounding_box_indexes.size(); ++i) {
int bbox_index = bounding_box_indexes[i];
tiny_dnn::bounding_box bbox;
bbox.x_min = bounding_boxes[bbox_index * 4] * INPUT_SIZE;
bbox.y_min = bounding_boxes[bbox_index * 4 + 1] * INPUT_SIZE;
bbox.x_max = bounding_boxes[bbox_index * 4 + 2] * INPUT_SIZE;
bbox.y_max = bounding_boxes[bbox_index * 4 + 3] * INPUT_SIZE;
bbox.score = bounding_box_confidences[i];
bounding_box_candidates.push_back(bbox);
}
// Print coordinates of bounding boxes
bounding_box_indexes = tiny_dnn::nms(bounding_box_candidates, NMS_THRESHOLD);
if (bounding_box_indexes.size()) {
std::cout << "Bounding box coordinates:" << std::endl;
} else {
std::cout << "No targets detected." << std::endl;
}
for (size_t i = 0; i < bounding_box_indexes.size(); ++i) {
int bbox_index = bounding_box_indexes[i];
tiny_dnn::bounding_box bbox = bounding_box_candidates[bbox_index];
std::cout << "x_min = " << bbox.x_min << ", "
<< "x_max = " << bbox.x_max << ", "
<< "y_min = " << bbox.y_min << ", "
<< "y_max = " << bbox.y_max << ", "
<< "class = " << bounding_box_classes[bbox_index] << ", "
<< "score = " << bounding_box_confidences[bbox_index]
<< std::endl;
}
}
int main(int argc, char** argv) {
if (argc != 3) {
std::cout << "Usage: example_ssd_test model_folder_path img_file_path";
return -1;
}
std::vector<tiny_dnn::network<tiny_dnn::sequential>> nets;
construct_nets(nets, argv[1]);
detect(nets, argv[2]);
}
| 7,555 |
1,862 | #ifndef BLINKER_SERIAL_GPRS_H
#define BLINKER_SERIAL_GPRS_H
#if ARDUINO >= 100
#include <Arduino.h>
#else
#include <WProgram.h>
#endif
#include "../Blinker/BlinkerConfig.h"
#include "../Blinker/BlinkerDebug.h"
#include "../Blinker/BlinkerStream.h"
#include "../Blinker/BlinkerUtility.h"
#include "../Functions/BlinkerHTTPAIR202.h"
#ifndef ARDUINOJSON_VERSION_MAJOR
#include "../modules/ArduinoJson/ArduinoJson.h"
#endif
#include "../Functions/BlinkerMQTTAIR202.h"
// #if defined(ESP32)
// #include <HardwareSerial.h>
// HardwareSerial *HSerial;
// #else
// #include <SoftwareSerial.h>
// SoftwareSerial *SSerial;
// #endif
char* MQTT_HOST_GPRS;
char* MQTT_ID_GPRS;
char* MQTT_NAME_GPRS;
char* MQTT_KEY_GPRS;
char* MQTT_PRODUCTINFO_GPRS;
char* UUID_GPRS;
char* AUTHKEY_GPRS;
char* MQTT_DEVICEID_GPRS;
char* DEVICE_NAME_GPRS;
char* BLINKER_PUB_TOPIC_GPRS;
char* BLINKER_SUB_TOPIC_GPRS;
uint16_t MQTT_PORT_GPRS;
BlinkerMQTTAIR202* mqtt_GPRS;
class BlinkerSerialGPRS : public BlinkerStream
{
public :
BlinkerSerialGPRS()
: stream(NULL), isConnect(false)
{}
int connect();
int connected();
int mConnected();
void disconnect();
void ping();
int available();
void subscribe();
int timedRead();
char * lastRead() { if (isFresh_GPRS) return msgBuf_GPRS; return ""; }
void flush();
// int print(const String & s, bool needCheck = true);
int print(char * data, bool needCheck = true);
int toServer(char * data);
// int bPrint(char * name, const String & data);
// int aliPrint(const String & s);
// int duerPrint(const String & s);
// int aliPrint(const String & data);
// int duerPrint(const String & data, bool report = false);
void begin(const char* _deviceType, String _imei);
void initStream(Stream& s, bool state, blinker_callback_t func);
char * deviceName();
char * authKey() { return AUTHKEY_GPRS; }
char * token() { if (!isMQTTinit) return ""; else return MQTT_KEY_GPRS; }
int init() { return isMQTTinit; }
int reRegister() { return connectServer(); }
int deviceRegister() { return connectServer(); }
// int authCheck();
void freshAlive() { kaTime = millis(); isAlive = true; }
// int needFreshShare();
private :
bool isMQTTinit = false;
int connectServer();
void checkKA();
int checkCanPrint();
int checkPrintSpan();
protected :
Stream* stream;
// char* streamData;
char* msgBuf_GPRS;
// bool isFresh = false;
bool isFresh_GPRS = false;
bool isConnect;
bool isHWS = false;
char* imei;
uint8_t respTimes = 0;
uint32_t respTime = 0;
bool isAvail_GPRS = false;
uint8_t dataFrom_GPRS = BLINKER_MSG_FROM_MQTT;
// uint8_t _sharerFrom = BLINKER_MQTT_FROM_AUTHER;
const char* _deviceType;
bool isAlive = false;
uint32_t kaTime = 0;
uint32_t latestTime;
uint32_t printTime = 0;
int isJson(const String & data);
uint8_t reconnect_time = 0;
blinker_callback_t listenFunc = NULL;
};
int BlinkerSerialGPRS::connect()
{
if (!isMQTTinit) return false;
if (mqtt_GPRS->connected()) return true;
disconnect();
if ((millis() - latestTime) < BLINKER_MQTT_CONNECT_TIMESLOT && latestTime > 0)
{
yield();
return false;
}
BLINKER_LOG(BLINKER_F("Connecting to MQTT... "));
BLINKER_LOG_FreeHeap_ALL();
if (!mqtt_GPRS->connect())
{
BLINKER_LOG(BLINKER_F("Retrying MQTT connection in "), \
BLINKER_MQTT_CONNECT_TIMESLOT/1000, \
BLINKER_F(" seconds..."));
this->latestTime = millis();
reconnect_time += 1;
if (reconnect_time >= 12)
{
reRegister();
reconnect_time = 0;
}
return false;
}
reconnect_time = 0;
BLINKER_LOG(BLINKER_F("MQTT Connected!"));
BLINKER_LOG_FreeHeap();
this->latestTime = millis();
return true;
}
int BlinkerSerialGPRS::connected()
{
if (!isMQTTinit) return false;
return mqtt_GPRS->connected();
}
int BlinkerSerialGPRS::mConnected()
{
if (!isMQTTinit) return false;
else return mqtt_GPRS->connected();
}
void BlinkerSerialGPRS::disconnect()
{
if (isMQTTinit) mqtt_GPRS->disconnect();
}
void BlinkerSerialGPRS::ping()
{
BLINKER_LOG_ALL(BLINKER_F("MQTT Ping!"));
BLINKER_LOG_FreeHeap_ALL();
if (!isMQTTinit) return;
if (!mqtt_GPRS->connected())
{
disconnect();
// delay(100);
// connect();
}
else
{
this->latestTime = millis();
}
}
int BlinkerSerialGPRS::available()
{
if (isMQTTinit) {
checkKA();
// if (!mqtt_PRO->connected() || \
// (millis() - this->latestTime) > BLINKER_MQTT_PING_TIMEOUT)
if ((millis() - this->latestTime) > 30000)
{
ping();
}
else
{
subscribe();
}
}
if (isAvail_GPRS)
{
isAvail_GPRS = false;
return true;
}
else {
return false;
}
}
void BlinkerSerialGPRS::subscribe()
{
if (!isMQTTinit) return;
if (mqtt_GPRS->readSubscription())
{
BLINKER_LOG_ALL(BLINKER_F("Got: "), mqtt_GPRS->lastRead);
// DynamicJsonBuffer jsonBuffer;
// JsonObject& root = jsonBuffer.parseObject(String(mqtt_GPRS->lastRead));
DynamicJsonDocument jsonBuffer(1024);
DeserializationError error = deserializeJson(jsonBuffer, String(mqtt_GPRS->lastRead));
JsonObject root = jsonBuffer.as<JsonObject>();
String _uuid = root["fromDevice"];
String dataGet = root["data"];
BLINKER_LOG_ALL(BLINKER_F("data: "), dataGet);
BLINKER_LOG_ALL(BLINKER_F("fromDevice: "), _uuid);
if (strcmp(_uuid.c_str(), UUID_GPRS) == 0)
{
BLINKER_LOG_ALL(BLINKER_F("Authority uuid"));
kaTime = millis();
isAvail_GPRS = true;
isAlive = true;
// _sharerFrom = BLINKER_MQTT_FROM_AUTHER;
}
if (isFresh_GPRS) free(msgBuf_GPRS);
msgBuf_GPRS = (char*)malloc((dataGet.length()+1)*sizeof(char));
strcpy(msgBuf_GPRS, dataGet.c_str());
isFresh_GPRS = true;
this->latestTime = millis();
dataFrom_GPRS = BLINKER_MSG_FROM_MQTT;
}
}
void BlinkerSerialGPRS::flush()
{
if (isFresh_GPRS)
{
free(msgBuf_GPRS); isFresh_GPRS = false; isAvail_GPRS = false;
// isAliAvail = false; //isBavail = false;
}
}
int BlinkerSerialGPRS::timedRead()
{
int c;
uint32_t _startMillis = millis();
do {
c = stream->read();
if (c >= 0) return c;
} while(millis() - _startMillis < 1000);
return -1;
}
// void BlinkerSerialGPRS::flush()
// {
// if (isFresh)
// {
// free(streamData); isFresh_GPRS = false;
// }
// }
int BlinkerSerialGPRS::print(char * data, bool needCheck)
{
BLINKER_LOG_ALL(BLINKER_F("data: "), data);
uint16_t num = strlen(data);
data[num+8] = '\0';
for(uint16_t c_num = num; c_num > 0; c_num--)
{
data[c_num+7] = data[c_num-1];
}
// String data_add = BLINKER_F("{\"data\":");
char data_add[20] = "{\"data\":";
for(uint16_t c_num = 0; c_num < 8; c_num++)
{
data[c_num] = data_add[c_num];
}
// data_add = BLINKER_F(",\"fromDevice\":\"");
// strcat(data, data_add.c_str());
strcat(data, ",\"fromDevice\":\"");
strcat(data, MQTT_ID_GPRS);
// strcat(data, MQTT_DEVICEID_GPRS); //PRO
// data_add = BLINKER_F("\",\"toDevice\":\"");
// strcat(data, data_add.c_str());
strcat(data, "\",\"toDevice\":\"");
// if (_sharerFrom < BLINKER_MQTT_MAX_SHARERS_NUM)
// {
// strcat(data, _sharers[_sharerFrom]->uuid());
// }
// else
// {
strcat(data, UUID_GPRS);
// }
// data_add = BLINKER_F("\",\"deviceType\":\"OwnApp\"}");
// _sharerFrom = BLINKER_MQTT_FROM_AUTHER;
// strcat(data, data_add.c_str());
strcat(data, "\",\"deviceType\":\"OwnApp\"}");
// data_add = STRING_format(data);
if (!isJson(STRING_format(data))) return false;
// data_add.replace("\"", "\\22");
// strcpy(data, data_add.c_str());
uint16_t d_data_len;
for (uint16_t d_num = 0; d_num < 1024; d_num++)
{
if (data[d_num] == '\"')
{
data[d_num] = '\\';
d_data_len = strlen(data);
// BLINKER_LOG_ALL(BLINKER_F("d_num: "), d_num,
// BLINKER_F(", d_data_len: "), d_data_len);
for(uint16_t c_num = d_data_len; c_num > d_num; c_num--)
{
data[c_num + 2] = data[c_num];
}
data[d_num + 1] = '2';
data[d_num + 2] = '2';
}
}
// #if defined(ESP8266)
// data_add = "";
// #endif
// if (!isJson(STRING_format(data)) return false;
// strcpy(data, STRING_format(data).replace("\"", "\\22").c_str());
// String msg_data = STRING_format(data);
// msg_data.replace("\"", "\\22");
BLINKER_LOG_ALL(BLINKER_F("MQTT Publish..."));
BLINKER_LOG_FreeHeap_ALL();
bool _alive = isAlive;
if (needCheck)
{
if (!checkPrintSpan())
{
return false;
}
respTime = millis();
}
if (mqtt_GPRS->connected())
{
if (needCheck)
{
if (!checkCanPrint())
{
if (!_alive)
{
isAlive = false;
}
return false;
}
}
// if (! mqtt_GPRS->publish(BLINKER_PUB_TOPIC_GPRS, msg_data.c_str()))
if (! mqtt_GPRS->publish(BLINKER_PUB_TOPIC_GPRS, data))
{
BLINKER_LOG_ALL(data);
BLINKER_LOG_ALL(BLINKER_F("...Failed"));
BLINKER_LOG_FreeHeap_ALL();
if (!_alive)
{
isAlive = false;
}
return false;
}
else
{
BLINKER_LOG_ALL(data);
BLINKER_LOG_ALL(BLINKER_F("...OK!"));
BLINKER_LOG_FreeHeap_ALL();
if (needCheck) printTime = millis();
if (!_alive)
{
isAlive = false;
}
this->latestTime = millis();
return true;
}
}
else
{
BLINKER_ERR_LOG(BLINKER_F("MQTT Disconnected"));
isAlive = false;
return false;
}
}
int BlinkerSerialGPRS::toServer(char * data)
{
// if (!checkInit()) return false;
if (!isJson(STRING_format(data))) return false;
BLINKER_LOG_ALL(BLINKER_F("MQTT Publish to server..."));
BLINKER_LOG_FreeHeap_ALL();
bool _alive = isAlive;
if (mqtt_GPRS->connected())
{
if (! mqtt_GPRS->publish(BLINKER_PUB_TOPIC_GPRS, data))
{
BLINKER_LOG_ALL(data);
BLINKER_LOG_ALL(BLINKER_F("...Failed"));
BLINKER_LOG_FreeHeap_ALL();
return false;
}
else
{
BLINKER_LOG_ALL(data);
BLINKER_LOG_ALL(BLINKER_F("...OK!"));
BLINKER_LOG_FreeHeap_ALL();
return true;
}
}
else
{
BLINKER_ERR_LOG(BLINKER_F("MQTT Disconnected"));
isAlive = false;
return false;
}
}
// int BlinkerSerialGPRS::aliPrint(const String & s)
// {
// if (!checkPrintSpan()) {
// respTime = millis();
// return false;
// }
// String _s = s.substring(0, s.length() - 1);
// _s += BLINKER_F(",\"toDeviceAT\":\"AliGenie\"}");
// respTime = millis();
// BLINKER_LOG_ALL(BLINKER_F("AliGenie Response: "), _s);
// if(connected()) {
// BLINKER_LOG_ALL(BLINKER_F("Success..."));
// stream->println(_s);
// return true;
// }
// else {
// BLINKER_LOG_ALL(BLINKER_F("Faile... Disconnected"));
// return false;
// }
// }
// int BlinkerSerialGPRS::duerPrint(const String & s)
// {
// if (!checkPrintSpan()) {
// respTime = millis();
// return false;
// }
// String _s = s.substring(0, s.length() - 1);
// _s += BLINKER_F(",\"toDeviceAT\":\"DuerOS\"}");
// respTime = millis();
// BLINKER_LOG_ALL(BLINKER_F("DuerOS Response: "), _s);
// if(connected()) {
// BLINKER_LOG_ALL(BLINKER_F("Success..."));
// stream->println(_s);
// return true;
// }
// else {
// BLINKER_LOG_ALL(BLINKER_F("Faile... Disconnected"));
// return false;
// }
// }
// int BlinkerSerialGPRS::print(const String & s, bool needCheck)
void BlinkerSerialGPRS::begin(const char* _type, String _imei)
{
_deviceType = _type;
BLINKER_LOG_ALL(BLINKER_F("PRO deviceType: "), _type);
// stream = &s;
// stream->setTimeout(BLINKER_STREAM_TIMEOUT);
// isHWS = state;
imei = (char*)malloc((_imei.length() + 1)*sizeof(char));
strcpy(imei, _imei.c_str());
}
void BlinkerSerialGPRS::initStream(Stream& s, bool state, blinker_callback_t func)
{
// _deviceType = _type;
// BLINKER_LOG_ALL(BLINKER_F("PRO deviceType: "), _type);
stream = &s;
stream->setTimeout(BLINKER_STREAM_TIMEOUT);
isHWS = state;
listenFunc = func;
// streamPrint(BLINKER_CMD_CRESET_RESQ);
// _imei = (char*)malloc(imei.length()*sizeof(char));
// strcpy(_imei, imei.c_str());
}
char * BlinkerSerialGPRS::deviceName() { return MQTT_DEVICEID_GPRS;/*MQTT_ID_PRO;*/ }
void BlinkerSerialGPRS::checkKA()
{
if (millis() - kaTime >= BLINKER_MQTT_KEEPALIVE)
isAlive = false;
}
int BlinkerSerialGPRS::checkCanPrint() {
if ((millis() - printTime >= BLINKER_PRO_MSG_LIMIT && isAlive) || printTime == 0) {
return true;
}
else {
BLINKER_ERR_LOG(BLINKER_F("MQTT NOT ALIVE OR MSG LIMIT"));
checkKA();
return false;
}
}
int BlinkerSerialGPRS::checkPrintSpan()
{
if (millis() - respTime < BLINKER_PRINT_MSG_LIMIT)
{
if (respTimes > BLINKER_PRINT_MSG_LIMIT)
{
BLINKER_ERR_LOG(BLINKER_F("DEVICE NOT CONNECT OR MSG LIMIT"));
return false;
}
else
{
respTimes++;
return true;
}
}
else
{
respTimes = 0;
return true;
}
}
int BlinkerSerialGPRS::connectServer()
{
String host = BLINKER_F(BLINKER_SERVER_HTTPS);
String uri = "";
// uri += BLINKER_F("/api/v1/user/device/register?deviceType=");
// uri += _deviceType;
// uri += BLINKER_F("&deviceName=");
// uri += imei;
uri += BLINKER_F("/api/v1/user/device/diy/auth?authKey=");
uri += _deviceType;
BLINKER_LOG_ALL(BLINKER_F("HTTPS begin: "), host + uri);
BlinkerHTTPAIR202 http(*stream, isHWS, listenFunc);
http.begin(host, uri);
String payload;
if (http.GET())
{
BLINKER_LOG(BLINKER_F("[HTTP] GET... success"));
payload = http.getString();
// return true;
}
else
{
BLINKER_LOG(BLINKER_F("[HTTP] GET... failed"));
return false;
}
BLINKER_LOG_ALL(BLINKER_F("reply was:"));
BLINKER_LOG_ALL(BLINKER_F("=============================="));
BLINKER_LOG_ALL(payload);
BLINKER_LOG_ALL(BLINKER_F("=============================="));
// DynamicJsonBuffer jsonBuffer;
// JsonObject& root = jsonBuffer.parseObject(payload);
DynamicJsonDocument jsonBuffer(1024);
DeserializationError error = deserializeJson(jsonBuffer, payload);
JsonObject root = jsonBuffer.as<JsonObject>();
if (STRING_contains_string(payload, BLINKER_CMD_NOTFOUND) || error ||
!STRING_contains_string(payload, BLINKER_CMD_IOTID)) {
// while(1) {
BLINKER_ERR_LOG(("Please make sure you have register this device!"));
// ::delay(60000);
return false;
// }
}
String _userID = root[BLINKER_CMD_DETAIL][BLINKER_CMD_DEVICENAME];
String _userName = root[BLINKER_CMD_DETAIL][BLINKER_CMD_IOTID];
String _key = root[BLINKER_CMD_DETAIL][BLINKER_CMD_IOTTOKEN];
String _productInfo = root[BLINKER_CMD_DETAIL][BLINKER_CMD_PRODUCTKEY];
String _broker = root[BLINKER_CMD_DETAIL][BLINKER_CMD_BROKER];
String _uuid = root[BLINKER_CMD_DETAIL][BLINKER_CMD_UUID];
if (isMQTTinit)
{
free(MQTT_HOST_GPRS);
free(MQTT_ID_GPRS);
free(MQTT_NAME_GPRS);
free(MQTT_KEY_GPRS);
free(MQTT_PRODUCTINFO_GPRS);
free(UUID_GPRS);
free(DEVICE_NAME_GPRS);
free(BLINKER_PUB_TOPIC_GPRS);
free(BLINKER_SUB_TOPIC_GPRS);
free(mqtt_GPRS);
// free(iotSub_GPRS);
isMQTTinit = false;
}
if (_broker == BLINKER_MQTT_BORKER_ALIYUN) {
// memcpy(DEVICE_NAME_MQTT, _userID.c_str(), 12);
DEVICE_NAME_GPRS = (char*)malloc((_userID.length()+1)*sizeof(char));
strcpy(DEVICE_NAME_GPRS, _userID.c_str());
MQTT_ID_GPRS = (char*)malloc((_userID.length()+1)*sizeof(char));
strcpy(MQTT_ID_GPRS, _userID.c_str());
MQTT_NAME_GPRS = (char*)malloc((_userName.length()+1)*sizeof(char));
strcpy(MQTT_NAME_GPRS, _userName.c_str());
MQTT_KEY_GPRS = (char*)malloc((_key.length()+1)*sizeof(char));
strcpy(MQTT_KEY_GPRS, _key.c_str());
MQTT_PRODUCTINFO_GPRS = (char*)malloc((_productInfo.length()+1)*sizeof(char));
strcpy(MQTT_PRODUCTINFO_GPRS, _productInfo.c_str());
MQTT_HOST_GPRS = (char*)malloc((strlen(BLINKER_MQTT_ALIYUN_HOST)+1)*sizeof(char));
strcpy(MQTT_HOST_GPRS, BLINKER_MQTT_ALIYUN_HOST);
MQTT_PORT_GPRS = BLINKER_MQTT_ALIYUN_PORT;
}
UUID_GPRS = (char*)malloc((_uuid.length()+1)*sizeof(char));
strcpy(UUID_GPRS, _uuid.c_str());
BLINKER_LOG_ALL(BLINKER_F("===================="));
BLINKER_LOG_ALL(BLINKER_F("DEVICE_NAME_GPRS: "), DEVICE_NAME_GPRS);
BLINKER_LOG_ALL(BLINKER_F("MQTT_PRODUCTINFO_GPRS: "), MQTT_PRODUCTINFO_GPRS);
BLINKER_LOG_ALL(BLINKER_F("MQTT_ID_GPRS: "), MQTT_ID_GPRS);
BLINKER_LOG_ALL(BLINKER_F("MQTT_NAME_GPRS: "), MQTT_NAME_GPRS);
BLINKER_LOG_ALL(BLINKER_F("MQTT_KEY_GPRS: "), MQTT_KEY_GPRS);
BLINKER_LOG_ALL(BLINKER_F("MQTT_BROKER: "), _broker);
BLINKER_LOG_ALL(BLINKER_F("HOST: "), MQTT_HOST_GPRS);
BLINKER_LOG_ALL(BLINKER_F("PORT: "), MQTT_PORT_GPRS);
BLINKER_LOG_ALL(BLINKER_F("UUID_GPRS: "), UUID_GPRS);
BLINKER_LOG_ALL(BLINKER_F("===================="));
if (_broker == BLINKER_MQTT_BORKER_ALIYUN) {
String PUB_TOPIC_STR = BLINKER_F("/");
PUB_TOPIC_STR += MQTT_PRODUCTINFO_GPRS;
PUB_TOPIC_STR += BLINKER_F("/");
PUB_TOPIC_STR += MQTT_ID_GPRS;
PUB_TOPIC_STR += BLINKER_F("/s");
BLINKER_PUB_TOPIC_GPRS = (char*)malloc((PUB_TOPIC_STR.length() + 1)*sizeof(char));
// memcpy(BLINKER_PUB_TOPIC_GPRS, PUB_TOPIC_STR.c_str(), str_len);
strcpy(BLINKER_PUB_TOPIC_GPRS, PUB_TOPIC_STR.c_str());
BLINKER_LOG_ALL(BLINKER_F("BLINKER_PUB_TOPIC_GPRS: "), BLINKER_PUB_TOPIC_GPRS);
String SUB_TOPIC_STR = BLINKER_F("/");
SUB_TOPIC_STR += MQTT_PRODUCTINFO_GPRS;
SUB_TOPIC_STR += BLINKER_F("/");
SUB_TOPIC_STR += MQTT_ID_GPRS;
SUB_TOPIC_STR += BLINKER_F("/r");
BLINKER_SUB_TOPIC_GPRS = (char*)malloc((SUB_TOPIC_STR.length() + 1)*sizeof(char));
// memcpy(BLINKER_SUB_TOPIC_GPRS, SUB_TOPIC_STR.c_str(), str_len);
strcpy(BLINKER_SUB_TOPIC_GPRS, SUB_TOPIC_STR.c_str());
BLINKER_LOG_ALL(BLINKER_F("BLINKER_SUB_TOPIC_GPRS: "), BLINKER_SUB_TOPIC_GPRS);
}
// String _userID = root[BLINKER_CMD_DETAIL][BLINKER_CMD_DEVICENAME];
// String _userName = root[BLINKER_CMD_DETAIL][BLINKER_CMD_IOTID];
// String _key = root[BLINKER_CMD_DETAIL][BLINKER_CMD_IOTTOKEN];
// String _productInfo = root[BLINKER_CMD_DETAIL][BLINKER_CMD_PRODUCTKEY];
// String _broker = root[BLINKER_CMD_DETAIL][BLINKER_CMD_BROKER];
// String _uuid = root[BLINKER_CMD_DETAIL][BLINKER_CMD_UUID];
// String _authKey = root[BLINKER_CMD_DETAIL][BLINKER_CMD_KEY];
// if (isMQTTinit)
// {
// free(MQTT_HOST_GPRS);
// free(MQTT_ID_GPRS);
// free(MQTT_NAME_GPRS);
// free(MQTT_KEY_GPRS);
// free(MQTT_PRODUCTINFO_GPRS);
// free(UUID_GPRS);
// free(AUTHKEY_GPRS);
// free(MQTT_DEVICEID_GPRS);
// free(BLINKER_PUB_TOPIC_GPRS);
// free(BLINKER_SUB_TOPIC_GPRS);
// free(mqtt_GPRS);
// isMQTTinit = false;
// }
// BLINKER_LOG_ALL(("===================="));
// // if (_broker == "BLINKER_MQTT_BORKER_ALIYUN") {
// // memcpy(DEVICE_NAME, _userID.c_str(), 12);
// String _deviceName = _userID.substring(12, 36);
// MQTT_DEVICEID_GPRS = (char*)malloc((_deviceName.length()+1)*sizeof(char));
// strcpy(MQTT_DEVICEID_GPRS, _deviceName.c_str());
// MQTT_ID_GPRS = (char*)malloc((_userID.length()+1)*sizeof(char));
// strcpy(MQTT_ID_GPRS, _userID.c_str());
// MQTT_NAME_GPRS = (char*)malloc((_userName.length()+1)*sizeof(char));
// strcpy(MQTT_NAME_GPRS, _userName.c_str());
// MQTT_KEY_GPRS = (char*)malloc((_key.length()+1)*sizeof(char));
// strcpy(MQTT_KEY_GPRS, _key.c_str());
// MQTT_PRODUCTINFO_GPRS = (char*)malloc((_productInfo.length()+1)*sizeof(char));
// strcpy(MQTT_PRODUCTINFO_GPRS, _productInfo.c_str());
// MQTT_HOST_GPRS = (char*)malloc((strlen(BLINKER_MQTT_ALIYUN_HOST)+1)*sizeof(char));
// strcpy(MQTT_HOST_GPRS, BLINKER_MQTT_ALIYUN_HOST);
// AUTHKEY_GPRS = (char*)malloc((_authKey.length()+1)*sizeof(char));
// strcpy(AUTHKEY_GPRS, _authKey.c_str());
// MQTT_PORT_GPRS = BLINKER_MQTT_ALIYUN_PORT;
// BLINKER_LOG_ALL(("===================="));
// // }
// UUID_GPRS = (char*)malloc((_uuid.length()+1)*sizeof(char));
// strcpy(UUID_GPRS, _uuid.c_str());
// char uuid_eeprom[BLINKER_AUUID_SIZE];
// BLINKER_LOG_ALL(("==========AUTH CHECK=========="));
// // if (!isFirst)
// // {
// // char _authCheck;
// // EEPROM.begin(BLINKER_EEP_SIZE);
// // EEPROM.get(BLINKER_EEP_ADDR_AUUID, uuid_eeprom);
// // if (strcmp(uuid_eeprom, _uuid.c_str()) != 0) {
// // // strcpy(UUID_PRO, _uuid.c_str());
// // strcpy(uuid_eeprom, _uuid.c_str());
// // EEPROM.put(BLINKER_EEP_ADDR_AUUID, uuid_eeprom);
// // EEPROM.get(BLINKER_EEP_ADDR_AUUID, uuid_eeprom);
// // BLINKER_LOG_ALL(BLINKER_F("===================="));
// // BLINKER_LOG_ALL(BLINKER_F("uuid_eeprom: "), uuid_eeprom);
// // BLINKER_LOG_ALL(BLINKER_F("_uuid: "), _uuid);
// // isNew = true;
// // }
// // EEPROM.get(BLINKER_EEP_ADDR_AUTH_CHECK, _authCheck);
// // if (_authCheck != BLINKER_AUTH_CHECK_DATA) {
// // EEPROM.put(BLINKER_EEP_ADDR_AUTH_CHECK, BLINKER_AUTH_CHECK_DATA);
// // isAuth = true;
// // }
// // EEPROM.commit();
// // EEPROM.end();
// // isFirst = true;
// // }
// BLINKER_LOG_ALL(BLINKER_F("===================="));
// BLINKER_LOG_ALL(BLINKER_F("DEVICE_NAME: "), imei);
// BLINKER_LOG_ALL(BLINKER_F("MQTT_PRODUCTINFO_GPRS: "), MQTT_PRODUCTINFO_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("MQTT_DEVICEID_GPRS: "), MQTT_DEVICEID_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("MQTT_ID_GPRS: "), MQTT_ID_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("MQTT_NAME_GPRS: "), MQTT_NAME_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("MQTT_KEY_GPRS: "), MQTT_KEY_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("MQTT_BROKER: "), _broker);
// BLINKER_LOG_ALL(BLINKER_F("HOST: "), MQTT_HOST_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("PORT: "), MQTT_PORT_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("UUID_GPRS: "), UUID_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("AUTHKEY_GPRS: "), AUTHKEY_GPRS);
// BLINKER_LOG_ALL(BLINKER_F("===================="));
// // if (_broker == BLINKER_MQTT_BORKER_ALIYUN) {
// String PUB_TOPIC_STR = BLINKER_F("/");
// PUB_TOPIC_STR += MQTT_PRODUCTINFO_GPRS;
// PUB_TOPIC_STR += BLINKER_F("/");
// PUB_TOPIC_STR += MQTT_DEVICEID_GPRS;
// PUB_TOPIC_STR += BLINKER_F("/s");
// BLINKER_PUB_TOPIC_GPRS = (char*)malloc((PUB_TOPIC_STR.length() + 1)*sizeof(char));
// strcpy(BLINKER_PUB_TOPIC_GPRS, PUB_TOPIC_STR.c_str());
// BLINKER_LOG_ALL(BLINKER_F("BLINKER_PUB_TOPIC_GPRS: "), BLINKER_PUB_TOPIC_GPRS);
// String SUB_TOPIC_STR = BLINKER_F("/");
// SUB_TOPIC_STR += MQTT_PRODUCTINFO_GPRS;
// SUB_TOPIC_STR += BLINKER_F("/");
// SUB_TOPIC_STR += MQTT_DEVICEID_GPRS;
// SUB_TOPIC_STR += BLINKER_F("/r");
// BLINKER_SUB_TOPIC_GPRS = (char*)malloc((SUB_TOPIC_STR.length() + 1)*sizeof(char));
// strcpy(BLINKER_SUB_TOPIC_GPRS, SUB_TOPIC_STR.c_str());
// BLINKER_LOG_ALL(BLINKER_F("BLINKER_SUB_TOPIC_GPRS: "), BLINKER_SUB_TOPIC_GPRS);
// // }
// if (_broker == BLINKER_MQTT_BORKER_ALIYUN) {
mqtt_GPRS = new BlinkerMQTTAIR202(*stream, isHWS, MQTT_HOST_GPRS, MQTT_PORT_GPRS,
MQTT_ID_GPRS, MQTT_NAME_GPRS, MQTT_KEY_GPRS, listenFunc);
// }
this->latestTime = millis();
isMQTTinit = true;
mqtt_GPRS->subscribe(BLINKER_SUB_TOPIC_GPRS);
return true;
}
// int BlinkerSerialGPRS::connect()
// {
// stream->println(STRING_format(BLINKER_CMD_MCONFIG_RESQ) +
// "=\"" + MQTT_ID_GPRS +
// "\",\"" + MQTT_NAME_GPRS +
// "\",\"" + MQTT_KEY_GPRS + "\"");
// }
int BlinkerSerialGPRS::isJson(const String & data)
{
BLINKER_LOG_ALL(BLINKER_F("isJson: "), data);
// DynamicJsonBuffer jsonBuffer;
// JsonObject& root = jsonBuffer.parseObject(data);
DynamicJsonDocument jsonBuffer(1024);
DeserializationError error = deserializeJson(jsonBuffer, data);
JsonObject root = jsonBuffer.as<JsonObject>();
// if (!root.success())
if (error)
{
BLINKER_ERR_LOG("Print data is not Json! ", data);
return false;
}
return true;
}
#endif
| 14,047 |
365 | <reponame>larskuhtz/massiv
#ifndef MASSIV_INCLUDE
#define MASSIV_INCLUDE
#if MASSIV_UNSAFE_CHECKS
#define INDEX_CHECK(name, s, f) (indexWith __FILE__ __LINE__ (name) (s) (f))
#else
#define INDEX_CHECK(name, s, f) ((f))
#endif
#endif
| 112 |
2,542 | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
namespace KtlInteropTest
{
using ::_delete;
class ComComponent : public KShared<ComComponent>,
public KObject<ComComponent>,
public IKtlInteropTestComponent
{
K_FORCE_SHARED(ComComponent)
K_BEGIN_COM_INTERFACE_LIST(ComComponent)
COM_INTERFACE_ITEM(IID_IUnknown, IKtlInteropTestComponent)
COM_INTERFACE_ITEM(IID_IKtlInteropTestComponent, IKtlInteropTestComponent)
K_END_COM_INTERFACE_LIST()
public:
static HRESULT Create(
__in KAllocator & allocator,
__out ComPointer<IKtlInteropTestComponent> & component);
STDMETHODIMP BeginOperation(
__in HRESULT beginResult,
__in HRESULT endResult,
__in IFabricAsyncOperationCallback *callback,
__out IFabricAsyncOperationContext **context);
STDMETHODIMP EndOperation(
__in IFabricAsyncOperationContext *context);
private:
class AsyncOperationContext : public Ktl::Com::FabricAsyncContextBase
{
friend ComComponent;
K_FORCE_SHARED(AsyncOperationContext)
public:
ErrorCode StartOperation(
__in HRESULT beginResult,
__in HRESULT endResult,
__in_opt KAsyncContextBase* const parent,
__in_opt CompletionCallback callback);
protected:
void OnStart();
void OnReuse();
private:
ComComponent::SPtr _owner;
HRESULT _endResult;
};
};
};
| 825 |
335 | <gh_stars>100-1000
{
"word": "Electrodeposit",
"definitions": [
"To cause (a metal) to be deposited from solution by electrolytic action; to apply (a coating of a metal) in this way, by making the article to be coated act as the cathode in an electrolytic cell where the metal forms the anode."
],
"parts-of-speech": "Verb"
} | 117 |
488 | <reponame>maurizioabba/rose<gh_stars>100-1000
// t0433.cc
// matching out-of-line defn when arg type is dependent qualified
template <class T>
struct A {
typedef T some_type;
int foo1(some_type s);
int foo2(some_type s);
};
template <class T>
int A<T>::foo1(typename A<T>::some_type s)
{
return 2;
}
template <class T2>
int A<T2>::foo2(typename A<T2>::some_type s)
{
return 3;
}
void f()
{
A<int> a;
a.foo1(1);
a.foo2(2);
}
template <class T>
struct A<T*> {
typedef T some_type;
int foo(some_type** s);
};
template <class T>
int A<T*>::foo(typename A<T*>::some_type** s)
{
return 2;
}
void g()
{
A<int*> a;
int **q = 0;
a.foo(q);
}
| 319 |
414 | <reponame>huggingface/optimum<filename>optimum/runs_base.py<gh_stars>100-1000
import os
import subprocess
from contextlib import contextmanager
from time import perf_counter_ns
import numpy as np
import torch
import transformers
from datasets import Dataset
from tqdm import trange
import optuna
from optimum import version as optimum_version
from .utils.preprocessing import (
QuestionAnsweringProcessing,
TextClassificationProcessing,
TokenClassificationProcessing,
)
from .utils.runs import RunConfig
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def get_autoclass_name(task):
if task in ["text-classification", "audio-classification"]:
autoclass_name = "sequence-classification"
else:
autoclass_name = task
return autoclass_name
class Calibrator:
def __init__(self, calibration_dataset: Dataset, quantizer, model_path, qconfig, calibration_params):
self.calibration_dataset = calibration_dataset
self.quantizer = quantizer
self.model_path = model_path
self.qconfig = qconfig
self.calibration_params = calibration_params
def calibrate(self):
raise NotImplementedError()
class Run:
def __init__(self, run_config: dict):
"""Initialize the Run class holding methods to perform inference and evaluation given a config.
A run compares a transformers model and an optimized model on latency/throughput, model size, and provided metrics.
Args:
run_config (dict): Parameters to use for the run. See [`~utils.runs.RunConfig`] for the expected keys.
"""
RunConfig(**run_config) # validate the data (useful if used as standalone)
self.task = run_config["task"]
if run_config["quantization_approach"] == "static":
self.static_quantization = True
else:
self.static_quantization = False
search_space = {"batch_size": run_config["batch_sizes"], "input_length": run_config["input_lengths"]}
self.study = optuna.create_study(
directions=["maximize", "minimize"],
sampler=optuna.samplers.GridSampler(search_space),
)
cpu_info = subprocess.check_output(["lscpu"]).decode("utf-8")
optimum_hash = None
if "dev" in optimum_version.__version__:
optimum_hash = subprocess.check_output(
"git ls-remote https://github.com/huggingface/optimum.git HEAD | awk '{ print $1}'", shell=True
)
optimum_hash = optimum_hash.decode("utf-8").strip("\n")
self.return_body = {
"model_name_or_path": run_config["model_name_or_path"],
"task": self.task,
"dataset": run_config["dataset"],
"quantization_approach": run_config["quantization_approach"],
"operators_to_quantize": run_config["operators_to_quantize"],
"node_exclusion": run_config["node_exclusion"],
"aware_training": run_config["aware_training"],
"per_channel": run_config["per_channel"],
"calibration": run_config["calibration"],
"framework": run_config["framework"],
"framework_args": run_config["framework_args"],
"hardware": cpu_info, # is this ok?
"versions": {
"transformers": transformers.__version__,
"optimum": optimum_version.__version__,
"optimum_hash": optimum_hash,
},
"evaluation": {
"time": [],
"others": {"baseline": {}, "optimized": {}},
},
}
def launch(self):
"""Launch inference to compare metrics between the original and optimized model.
These metrics are latency, throughput, model size, and user provided metrics.
Returns:
`dict`: Finalized run data with metrics stored in the "evaluation" key.
"""
try:
self.study.optimize(self._launch_time, n_trials=100, timeout=600)
self.launch_eval()
finally:
self.finalize()
return self.return_body
def _launch_time(self, trial):
"""Optuna objective function to measure latency/throughput.
Populate the `["evaluation"]["time"]` list of the run for various batch size and input length.
Returns:
Dummy data.
"""
raise NotImplementedError()
def launch_eval(self):
"""
Run evaluation on the original and optimized model.
Populate the `["evaluation"]["others"]` subdictionary of the run.
"""
raise NotImplementedError()
def load_datasets(self):
"""Load evaluation dataset, and if needed, calibration dataset for static quantization."""
datasets_dict = self.processor.load_datasets()
self._eval_dataset = datasets_dict["eval"]
if self.static_quantization:
self._calibration_dataset = datasets_dict["calibration"]
def get_calibration_dataset(self):
"""Get calibration dataset. The dataset needs to be loaded first with [`~optimum.runs_base.Run.load_datasets`].
Returns:
`datasets.Dataset`: Calibration dataset.
"""
if not hasattr(self, "_calibration_dataset"):
raise KeyError("No calibration dataset defined for this run.")
return self._calibration_dataset
def get_eval_dataset(self):
"""
Get evaluation dataset. The dataset needs to be loaded first with [`~optimum.runs_base.Run.load_datasets`].
Returns:
`datasets.Dataset`: Evaluation dataset.
"""
if not hasattr(self, "_eval_dataset"):
raise KeyError("No evaluation dataset defined for this run.")
return self._eval_dataset
def finalize(self):
"""Cleanup intermediary files."""
raise NotImplementedError()
SEC_TO_NS_SCALE = 1000000000
NS_TO_MS_SCALE = 1e6
def ns_to_ms(ns_time):
return ns_time / NS_TO_MS_SCALE
class TimeBenchmark:
def __init__(self, model, batch_size: int, input_length: int, has_token_type_ids: bool):
self.batch_size = batch_size
self.input_length = input_length
self.has_token_type_ids = has_token_type_ids
self.model = model
# TODO fix
self.warmup_runs = 2
self.benchmark_duration = 2
self.latencies = []
self.throughput = float("-inf")
@property
def num_runs(self) -> int:
return len(self.latencies)
@contextmanager
def track(self):
start = perf_counter_ns()
yield
end = perf_counter_ns()
# Append the time to the buffer
self.latencies.append(end - start)
print(f"Tracked function took: {(end - start)}ns ({(end - start) / 1e6:.3f}ms)")
def finalize(self, duration_ns: int):
self.throughput = round((len(self.latencies) / duration_ns) * SEC_TO_NS_SCALE, 2)
def to_dict(self):
# Compute stats, beware latencies are stored as ms
benchmarks_stats = {
"nb_forwards": len(self.latencies),
"throughput": self.throughput,
"latency_mean": ns_to_ms(np.mean(self.latencies)),
"latency_std": ns_to_ms(np.std(self.latencies)),
"latency_50": ns_to_ms(np.quantile(self.latencies, 0.5)),
"latency_90": ns_to_ms(np.quantile(self.latencies, 0.9)),
"latency_95": ns_to_ms(np.quantile(self.latencies, 0.95)),
"latency_99": ns_to_ms(np.quantile(self.latencies, 0.99)),
"latency_999": ns_to_ms(np.quantile(self.latencies, 0.999)),
}
return benchmarks_stats
def execute(self):
inputs = {
"input_ids": torch.randint(high=1000, size=(self.batch_size, self.input_length)),
"attention_mask": torch.ones(self.batch_size, self.input_length, dtype=torch.int64),
}
if self.has_token_type_ids:
inputs["token_type_ids"] = torch.ones(self.batch_size, self.input_length, dtype=torch.int64)
# Warmup
outputs = []
for _ in trange(self.warmup_runs, desc="Warming up"):
output = self.model.forward(**inputs)
outputs.append(output[0])
benchmark_duration_ns = self.benchmark_duration * SEC_TO_NS_SCALE
while sum(self.latencies) < benchmark_duration_ns:
# TODO not trak GPU/CPU <--> numpy/torch, need to change the implementation of forward
with self.track():
self.model.forward(**inputs)
self.finalize(benchmark_duration_ns)
return self.to_dict()
task_processing_map = {
"text-classification": TextClassificationProcessing,
"token-classification": TokenClassificationProcessing,
"question-answering": QuestionAnsweringProcessing,
}
| 3,770 |
872 | #!/usr/bin/python3
"""
Given n pairs of parentheses, write a function to generate all combinations of
well-formed parentheses.
For example, given n = 3, a solution set is:
[
"((()))",
"(()())",
"(())()",
"()(())",
"()()()"
]
"""
from typing import List
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
"""
Method 1 - backtracking
Method 2 - dp
Let F[n] be the list of parentheses at length 2n
"""
F: List[List[str]] = [[] for _ in range(n + 1)]
F[0].append("")
for i in range(1, n+1):
for j in range(i):
for s1 in F[j]:
for s2 in F[i-j-1]:
F[i].append(f"({s1}){s2}")
return F[n]
| 374 |
718 | <reponame>plutoyuxie/mmgeneration<gh_stars>100-1000
import torch
from mmgen.models.diffusions import UniformTimeStepSampler
def test_uniform_sampler():
sampler = UniformTimeStepSampler(10)
timesteps = sampler(2)
assert timesteps.shape == torch.Size([
2,
])
assert timesteps.max() < 10 and timesteps.min() >= 0
timesteps = sampler.__call__(2)
assert timesteps.shape == torch.Size([
2,
])
assert timesteps.max() < 10 and timesteps.min() >= 0
| 201 |
2,577 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.runtime;
import java.util.ArrayList;
import java.util.List;
import org.camunda.bpm.engine.impl.ProcessEngineLogger;
import org.camunda.bpm.engine.impl.bpmn.helper.BpmnProperties;
import org.camunda.bpm.engine.impl.bpmn.parser.ConditionalEventDefinition;
import org.camunda.bpm.engine.impl.bpmn.parser.EventSubscriptionDeclaration;
import org.camunda.bpm.engine.impl.cmd.CommandLogger;
import org.camunda.bpm.engine.impl.event.EventType;
import org.camunda.bpm.engine.impl.interceptor.CommandContext;
import org.camunda.bpm.engine.impl.persistence.deploy.cache.DeploymentCache;
import org.camunda.bpm.engine.impl.persistence.entity.EventSubscriptionEntity;
import org.camunda.bpm.engine.impl.persistence.entity.EventSubscriptionManager;
import org.camunda.bpm.engine.impl.persistence.entity.ExecutionEntity;
import org.camunda.bpm.engine.impl.persistence.entity.ProcessDefinitionEntity;
import org.camunda.bpm.engine.impl.pvm.process.ActivityImpl;
/**
* @author <NAME>
*
*/
public class DefaultConditionHandler implements ConditionHandler {
private final static CommandLogger LOG = ProcessEngineLogger.CMD_LOGGER;
@Override
public List<ConditionHandlerResult> evaluateStartCondition(CommandContext commandContext, ConditionSet conditionSet) {
if (conditionSet.getProcessDefinitionId() == null) {
return evaluateConditionStartByEventSubscription(commandContext, conditionSet);
} else {
return evaluateConditionStartByProcessDefinitionId(commandContext, conditionSet, conditionSet.getProcessDefinitionId());
}
}
protected List<ConditionHandlerResult> evaluateConditionStartByEventSubscription(CommandContext commandContext, ConditionSet conditionSet) {
List<EventSubscriptionEntity> subscriptions = findConditionalStartEventSubscriptions(commandContext, conditionSet);
if (subscriptions.isEmpty()) {
throw LOG.exceptionWhenEvaluatingConditionalStartEvent();
}
List<ConditionHandlerResult> results = new ArrayList<ConditionHandlerResult>();
for (EventSubscriptionEntity subscription : subscriptions) {
ProcessDefinitionEntity processDefinition = subscription.getProcessDefinition();
if (!processDefinition.isSuspended()) {
ActivityImpl activity = subscription.getActivity();
if (evaluateCondition(conditionSet, activity)) {
results.add(new ConditionHandlerResult(processDefinition, activity));
}
}
}
return results;
}
protected List<EventSubscriptionEntity> findConditionalStartEventSubscriptions(CommandContext commandContext, ConditionSet conditionSet) {
EventSubscriptionManager eventSubscriptionManager = commandContext.getEventSubscriptionManager();
if (conditionSet.isTenantIdSet) {
return eventSubscriptionManager.findConditionalStartEventSubscriptionByTenantId(conditionSet.getTenantId());
} else {
return eventSubscriptionManager.findConditionalStartEventSubscription();
}
}
protected List<ConditionHandlerResult> evaluateConditionStartByProcessDefinitionId(CommandContext commandContext, ConditionSet conditionSet,
String processDefinitionId) {
DeploymentCache deploymentCache = commandContext.getProcessEngineConfiguration().getDeploymentCache();
ProcessDefinitionEntity processDefinition = deploymentCache.findDeployedProcessDefinitionById(processDefinitionId);
List<ConditionHandlerResult> results = new ArrayList<ConditionHandlerResult>();
if (processDefinition != null && !processDefinition.isSuspended()) {
List<ActivityImpl> activities = findConditionalStartEventActivities(processDefinition);
if (activities.isEmpty()) {
throw LOG.exceptionWhenEvaluatingConditionalStartEventByProcessDefinition(processDefinitionId);
}
for (ActivityImpl activity : activities) {
if (evaluateCondition(conditionSet, activity)) {
results.add(new ConditionHandlerResult(processDefinition, activity));
}
}
}
return results;
}
protected List<ActivityImpl> findConditionalStartEventActivities(ProcessDefinitionEntity processDefinition) {
List<ActivityImpl> activities = new ArrayList<ActivityImpl>();
for (EventSubscriptionDeclaration declaration : ConditionalEventDefinition.getDeclarationsForScope(processDefinition).values()) {
if (isConditionStartEvent(declaration)) {
activities.add(((ConditionalEventDefinition) declaration).getConditionalActivity());
}
}
return activities;
}
protected boolean isConditionStartEvent(EventSubscriptionDeclaration declaration) {
return EventType.CONDITONAL.name().equals(declaration.getEventType()) && declaration.isStartEvent();
}
protected boolean evaluateCondition(ConditionSet conditionSet, ActivityImpl activity) {
ExecutionEntity temporaryExecution = new ExecutionEntity();
if (conditionSet.getVariables() != null) {
temporaryExecution.initializeVariableStore(conditionSet.getVariables());
}
temporaryExecution.setProcessDefinition(activity.getProcessDefinition());
ConditionalEventDefinition conditionalEventDefinition = activity.getProperties().get(BpmnProperties.CONDITIONAL_EVENT_DEFINITION);
if (conditionalEventDefinition.getVariableName() == null || conditionSet.getVariables().containsKey(conditionalEventDefinition.getVariableName())) {
return conditionalEventDefinition.tryEvaluate(temporaryExecution);
} else {
return false;
}
}
}
| 1,926 |
879 | <filename>core/src/main/java/org/zstack/core/errorcode/ElaborationCategory.java<gh_stars>100-1000
package org.zstack.core.errorcode;
import org.zstack.header.rest.SDK;
/**
* Created by mingjian.deng on 2018/12/1.
*/
@SDK
public class ElaborationCategory {
private String category;
private Integer num;
public ElaborationCategory() {
}
public ElaborationCategory(String category, Integer num) {
this.category = category;
this.num = num;
}
public static ElaborationCategory __example__() {
return new ElaborationCategory("BS", 10);
}
public String getCategory() {
return category;
}
public void setCategory(String category) {
this.category = category;
}
public Integer getNum() {
return num;
}
public void setNum(Integer num) {
this.num = num;
}
}
| 336 |
1,738 | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#pragma once
#include <AzToolsFramework/Thumbnails/Thumbnail.h>
#include <AzToolsFramework/Thumbnails/SourceControlThumbnailBus.h>
namespace AzToolsFramework
{
struct SourceControlFileInfo;
namespace Thumbnailer
{
class SourceControlThumbnailKey
: public ThumbnailKey
{
Q_OBJECT
public:
AZ_RTTI(SourceControlFileInfo, "{F6772100-A178-45A7-8F75-41426B07D829}", ThumbnailKey);
explicit SourceControlThumbnailKey(const char* fileName);
const AZStd::string& GetFileName() const;
bool UpdateThumbnail() override;
protected:
//! absolute path
AZStd::string m_fileName;
//! how often should sc thumbnails auto update
const AZStd::chrono::minutes m_updateInterval;
//! time since this sc thumbnail updated
AZStd::chrono::system_clock::time_point m_nextUpdate;
};
//! SourceControlThumbnail currently replicates the source control functionality within Material Browser
//! Additionally source control status is refreshed whenever an operation is performed through context menu
class SourceControlThumbnail
: public Thumbnail
, public SourceControlThumbnailRequestBus::Handler
{
Q_OBJECT
public:
SourceControlThumbnail(SharedThumbnailKey key, int thumbnailSize);
~SourceControlThumbnail() override;
//////////////////////////////////////////////////////////////////////////
// SourceControlNotificationBus
//////////////////////////////////////////////////////////////////////////
void FileStatusChanged(const char* filename) override;
static bool ReadyForUpdate();
public Q_SLOTS:
void Update() override;
private:
//! If another sc thumbnail is currently requesting sc status this will return false
static bool m_readyForUpdate;
// To avoid overpopulating sc update stack with status requests, sc thumbnail does not load until this function is called
void RequestSourceControlStatus();
void SourceControlFileInfoUpdated(bool succeeded, const SourceControlFileInfo& fileInfo);
};
namespace
{
class SourceControlKeyHash
{
public:
size_t operator() (const SharedThumbnailKey& /*val*/) const
{
return 0;
}
};
class SourceControlKeyEqual
{
public:
bool operator()(const SharedThumbnailKey& val1, const SharedThumbnailKey& val2) const
{
auto sourceThumbnailKey1 = azrtti_cast<const SourceControlThumbnailKey*>(val1.data());
auto sourceThumbnailKey2 = azrtti_cast<const SourceControlThumbnailKey*>(val2.data());
if (!sourceThumbnailKey1 || !sourceThumbnailKey2)
{
return false;
}
return sourceThumbnailKey1->GetFileName() == sourceThumbnailKey2->GetFileName();
}
};
}
//! Stores products' thumbnails
class SourceControlThumbnailCache
: public ThumbnailCache<SourceControlThumbnail, SourceControlKeyHash, SourceControlKeyEqual>
{
public:
SourceControlThumbnailCache();
~SourceControlThumbnailCache() override;
protected:
bool IsSupportedThumbnail(SharedThumbnailKey key) const override;
};
} // namespace Thumbnailer
} // namespace AzToolsFramework | 1,725 |
1,097 | <gh_stars>1000+
/*
MIT License
Copyright (c) 2018-2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <atomic>
#include <cstddef>
#include <cmath>
#include <thread>
#include <unordered_map>
#include <bx/filepath.h>
#include <bx/string.h>
#include <cgltf.h>
#include <GLFW/glfw3.h>
#include <imgui/imgui.h>
#include <nfd.h>
#include <ofbx.h>
#include <stb_image.h>
#include <stb_image_resize.h>
#define STL_READER_NO_EXCEPTIONS
#include <stl_reader.h>
#include "shaders/shared.h"
#include "viewer.h"
#define ONE_GLTF_OBJECT_PER_MESH 0
bgfx::VertexLayout ModelVertex::layout;
enum class ModelStatus
{
NotLoaded,
Loading,
Finalizing,
Loaded
};
enum class ModelFormat
{
Gltf,
Obj,
Stl
};
struct
{
std::atomic<ModelStatus> status;
std::thread *thread = nullptr;
std::atomic<int> loadProgress;
objzModel *data = nullptr;
void (*destroyModelData)(objzModel *) = nullptr;
std::vector<uint32_t> diffuseTextures;
std::vector<uint32_t> emissionTextures;
AABB aabb;
bx::Vec3 centroid = bx::Vec3(0.0f, 0.0f, 0.0f);
bgfx::VertexBufferHandle vb = BGFX_INVALID_HANDLE;
bgfx::IndexBufferHandle ib = BGFX_INVALID_HANDLE;
bgfx::VertexBufferHandle wireframeVb = BGFX_INVALID_HANDLE;
std::vector<WireframeVertex> wireframeVertices;
float scale = 1.0f;
bool rightHandedAxis = false; // Default is z/-x/y, right handed is -z/x/y.
bool clockwiseFaceWinding = true;
bgfx::ShaderHandle vs_model;
bgfx::ShaderHandle fs_material;
bgfx::ProgramHandle materialProgram;
bgfx::UniformHandle u_diffuse;
bgfx::UniformHandle u_emission;
bgfx::UniformHandle u_lightDir;
bgfx::UniformHandle u_shade_overlay_diffuse_emission;
bgfx::UniformHandle u_textureSize_cellSize;
bgfx::UniformHandle u_overlayOpacity_colorChartType;
bgfx::UniformHandle u_meshColor_primitiveIdStart;
bgfx::UniformHandle s_diffuse;
bgfx::UniformHandle s_emission;
bgfx::UniformHandle s_lightmap;
bgfx::UniformHandle s_faceData;
bgfx::UniformHandle u_color;
bgfx::TextureHandle u_dummyTexture;
}
s_model;
static const float s_rightHandedAxisMatrix[16] = {
0.0f, 0.0f, -1.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
static bool readFileData(const char *filename, std::vector<uint8_t> *fileData)
{
#if _MSC_VER
FILE *f;
if (fopen_s(&f, filename, "rb") != 0)
f = nullptr;
#else
FILE *f = fopen(filename, "rb");
#endif
if (!f) {
fprintf(stderr, "Error opening '%s'\n", filename);
return false;
}
fseek(f, 0, SEEK_END);
const long length = ftell(f);
fseek(f, 0, SEEK_SET);
fileData->resize(length);
if (fread(fileData->data(), 1, (size_t)length, f) < (size_t)length) {
fclose(f);
fprintf(stderr, "Error reading '%s'\n", filename);
return false;
}
fclose(f);
return true;
}
struct TextureData
{
uint16_t width;
uint16_t height;
const bgfx::Memory *mem;
int numComponents;
// Data used by baking to sample the texture.
uint8_t *sampleData;
uint32_t sampleWidth, sampleHeight;
};
static TextureData textureLoad(const char *basePath, const char *filename)
{
char fullFilename[256] = { 0 };
bx::strCopy(fullFilename, sizeof(fullFilename), basePath);
bx::strCat(fullFilename, sizeof(fullFilename), filename);
TextureData td;
td.mem = nullptr;
td.sampleData = nullptr;
std::vector<uint8_t> fileData;
if (!readFileData(fullFilename, &fileData))
return td;
int width, height, numComponents;
const uint8_t *imageData = stbi_load_from_memory(fileData.data(), (int)fileData.size(), &width, &height, &numComponents, 0);
if (!imageData) {
fprintf(stderr, "Error loading '%s': %s\n", fullFilename, stbi_failure_reason());
return td;
}
printf("Texture '%s': %dx%d %d bpp\n", fullFilename, width, height, numComponents * 8);
// Generate mipmaps.
const int nMips = 1 + (int)bx::floor(bx::log2((float)bx::max(width, height)));
int mipWidth = width, mipHeight = height;
uint32_t memSize = 0;
for (int i = 0; i < nMips; i++) {
memSize += uint32_t(mipWidth * mipHeight * numComponents);
mipWidth = bx::max(mipWidth >> 1, 1);
mipHeight = bx::max(mipHeight >> 1, 1);
}
const bgfx::Memory *mem = bgfx::alloc(memSize);
memcpy(mem->data, imageData, width * height * numComponents);
stbi_image_free((void *)imageData);
const uint8_t *src = mem->data;
int srcWidth = width, srcHeight = height;
uint8_t *dest = mem->data;
mipWidth = width;
mipHeight = height;
for (int i = 0; i < nMips - 1; i++) {
dest += mipWidth * mipHeight * numComponents;
mipWidth = bx::max(mipWidth >> 1, 1);
mipHeight = bx::max(mipHeight >> 1, 1);
stbir_resize_uint8_srgb(src, srcWidth, srcHeight, srcWidth * numComponents, dest, mipWidth, mipHeight, mipWidth * numComponents, numComponents, numComponents == 4 ? 3 : STBIR_ALPHA_CHANNEL_NONE, 0);
src = dest;
srcWidth = mipWidth;
srcHeight = mipHeight;
// Copy a small mip for baking to use for sampling textures.
if (!td.sampleData && (mipWidth <= 32 || mipHeight <= 32)) {
const size_t size = mipWidth * mipHeight * numComponents;
td.sampleData = new uint8_t[size];
memcpy(td.sampleData, dest, size);
td.sampleWidth = (uint32_t)mipWidth;
td.sampleHeight = (uint32_t)mipHeight;
}
}
td.mem = mem;
td.width = (uint16_t)width;
td.height = (uint16_t)height;
td.numComponents = numComponents;
return td;
}
struct CachedTexture
{
char filename[256];
TextureData data;
bgfx::TextureHandle handle;
};
static std::vector<CachedTexture> s_textureCache;
static uint32_t textureLoadCached(const char *basePath, const char *filename)
{
for (uint32_t i = 0; i < (uint32_t)s_textureCache.size(); i++) {
if (bx::strCmpI(s_textureCache[i].filename, filename) == 0)
return i;
}
CachedTexture texture;
bx::strCopy(texture.filename, sizeof(texture.filename), filename);
texture.data = textureLoad(basePath, filename);
texture.handle = BGFX_INVALID_HANDLE;
s_textureCache.push_back(texture);
return (uint32_t)s_textureCache.size() - 1;
}
static void textureCreateCachedTextures()
{
for (uint32_t i = 0; i < (uint32_t)s_textureCache.size(); i++) {
CachedTexture &texture = s_textureCache[i];
if (!texture.data.mem) {
texture.handle = BGFX_INVALID_HANDLE;
continue;
}
bgfx::TextureFormat::Enum format = bgfx::TextureFormat::RGBA8;
if (texture.data.numComponents == 1)
format = bgfx::TextureFormat::R8;
else if (texture.data.numComponents == 2)
format = bgfx::TextureFormat::RG8;
else if (texture.data.numComponents == 3)
format = bgfx::TextureFormat::RGB8;
else {
assert(false);
}
texture.handle = bgfx::createTexture2D(texture.data.width, texture.data.height, true, 1, format, BGFX_SAMPLER_MIN_ANISOTROPIC | BGFX_SAMPLER_MAG_ANISOTROPIC, texture.data.mem);
}
}
static bgfx::TextureHandle textureGetHandle(uint32_t index)
{
if (index == UINT32_MAX)
return BGFX_INVALID_HANDLE;
return s_textureCache[index].handle;
}
static void textureDestroyCache()
{
for (int i = 0; i < (int)s_textureCache.size(); i++) {
bgfx::destroy(s_textureCache[i].handle);
delete s_textureCache[i].data.sampleData;
}
s_textureCache.clear();
}
void modelInit()
{
s_model.status = ModelStatus::NotLoaded;
s_model.u_color = bgfx::createUniform("u_color", bgfx::UniformType::Vec4);
s_model.u_diffuse = bgfx::createUniform("u_diffuse", bgfx::UniformType::Vec4);
s_model.u_emission = bgfx::createUniform("u_emission", bgfx::UniformType::Vec4);
s_model.u_lightDir = bgfx::createUniform("u_lightDir", bgfx::UniformType::Vec4);
s_model.u_shade_overlay_diffuse_emission = bgfx::createUniform("u_shade_overlay_diffuse_emission", bgfx::UniformType::Vec4);
s_model.u_textureSize_cellSize = bgfx::createUniform("u_textureSize_cellSize2", bgfx::UniformType::Vec4);
s_model.u_overlayOpacity_colorChartType = bgfx::createUniform("u_overlayOpacity_colorChartType", bgfx::UniformType::Vec4);
s_model.u_meshColor_primitiveIdStart = bgfx::createUniform("u_meshColor_primitiveIdStart", bgfx::UniformType::Vec4);
s_model.s_diffuse = bgfx::createUniform("s_diffuse", bgfx::UniformType::Sampler);
s_model.s_emission = bgfx::createUniform("s_emission", bgfx::UniformType::Sampler);
s_model.s_lightmap = bgfx::createUniform("s_lightmap", bgfx::UniformType::Sampler);
s_model.s_faceData = bgfx::createUniform("s_faceData", bgfx::UniformType::Sampler);
s_model.vs_model = loadShader(ShaderId::vs_model);
s_model.fs_material = loadShader(ShaderId::fs_material);
s_model.materialProgram = bgfx::createProgram(s_model.vs_model, s_model.fs_material);
s_model.u_dummyTexture = bgfx::createTexture2D(16, 16, false, 1, bgfx::TextureFormat::BGRA8);
ModelVertex::init();
bgfx::setViewClear(kModelView, BGFX_CLEAR_COLOR | BGFX_CLEAR_DEPTH, 0x444444ff);
bgfx::setViewRect(kModelView, 0, 0, bgfx::BackbufferRatio::Equal);
bgfx::setViewRect(kModelTransparentView, 0, 0, bgfx::BackbufferRatio::Equal);
bgfx::setViewMode(kModelTransparentView, bgfx::ViewMode::DepthDescending);
}
void modelShutdown()
{
modelDestroy();
bgfx::destroy(s_model.u_color);
bgfx::destroy(s_model.u_diffuse);
bgfx::destroy(s_model.u_emission);
bgfx::destroy(s_model.u_lightDir);
bgfx::destroy(s_model.u_shade_overlay_diffuse_emission);
bgfx::destroy(s_model.u_textureSize_cellSize);
bgfx::destroy(s_model.u_overlayOpacity_colorChartType);
bgfx::destroy(s_model.u_meshColor_primitiveIdStart);
bgfx::destroy(s_model.s_diffuse);
bgfx::destroy(s_model.s_emission);
bgfx::destroy(s_model.s_lightmap);
bgfx::destroy(s_model.s_faceData);
bgfx::destroy(s_model.vs_model);
bgfx::destroy(s_model.fs_material);
bgfx::destroy(s_model.materialProgram);
bgfx::destroy(s_model.u_dummyTexture);
}
static void fbxDestroy(objzModel *model)
{
delete [] (uint32_t *)model->indices;
delete [] model->meshes;
delete [] model->objects;
delete [] (ModelVertex *)model->vertices;
if (model->materials)
delete [] model->materials;
delete model;
}
static objzModel *fbxLoad(const char *filename, const char * /*basePath*/)
{
std::vector<uint8_t> fileData;
if (!readFileData(filename, &fileData))
return nullptr;
ofbx::IScene *scene = ofbx::load(fileData.data(), (int)fileData.size(), (ofbx::u64)ofbx::LoadFlags::TRIANGULATE);
if (!scene) {
fprintf(stderr, "%s\n", ofbx::getError());
return nullptr;
}
objzModel *model = new objzModel();
model->flags = 0;
model->numIndices = 0;
model->numMaterials = 0;
model->numMeshes = (uint32_t)scene->getMeshCount();
model->numObjects = 1;
model->numVertices = 0;
// Count array lengths.
std::unordered_map<const ofbx::Material *, uint32_t> materialToIndex;
for (int i = 0; i < scene->getAllObjectCount(); i++) {
const ofbx::Object *object = scene->getAllObjects()[i];
if (object->getType() == ofbx::Object::Type::MATERIAL) {
materialToIndex[(const ofbx::Material *)object] = model->numMaterials;
model->numMaterials++;
}
}
for (int i = 0; i < scene->getMeshCount(); i++) {
const ofbx::Geometry *geo = scene->getMesh(i)->getGeometry();
model->numIndices += (uint32_t)geo->getIndexCount();
model->numVertices += (uint32_t)geo->getVertexCount();
}
// Alloc data.
auto indices = new uint32_t[model->numIndices];
auto vertices = new ModelVertex[model->numVertices];
model->indices = indices;
model->meshes = new objzMesh[model->numMeshes];
model->objects = new objzObject[model->numObjects];
model->vertices = vertices;
if (model->numMaterials > 0)
model->materials = new objzMaterial[model->numMaterials];
else
model->materials = nullptr;
// Populate data.
{
objzObject &object = model->objects[0];
object.name[0] = 0;
object.firstMesh = 0;
object.numMeshes = model->numMeshes;
object.firstIndex = 0;
object.numIndices = model->numIndices;
object.firstVertex = 0;
object.numVertices = model->numVertices;
}
uint32_t currentIndex = 0, currentVertex = 0;
bool hasTexcoords = true;
for (int i = 0; i < scene->getMeshCount(); i++) {
const ofbx::Mesh *sourceMesh = scene->getMesh(i);
ofbx::Matrix dtransform = sourceMesh->getGlobalTransform();
float transform[16];
for (uint32_t j = 0; j < 16; j++)
transform[j] = (float)dtransform.m[j];
const ofbx::Geometry *sourceGeo = scene->getMesh(i)->getGeometry();
objzMesh &mesh = model->meshes[i];
mesh.firstIndex = currentIndex;
mesh.numIndices = (uint32_t)sourceGeo->getIndexCount();
// ignoring all but the first material for now
if (sourceMesh->getMaterialCount() > 0)
mesh.materialIndex = materialToIndex[sourceMesh->getMaterial(0)];
else
mesh.materialIndex = -1;
for (uint32_t j = 0; j < mesh.numIndices; j++) {
int sourceIndex = sourceGeo->getFaceIndices()[j];
if (sourceIndex < 0)
sourceIndex = -sourceIndex - 1; // index is negative if last in face
if (sourceIndex >= sourceGeo->getVertexCount()) {
fprintf(stderr, "Index '%d' out of range of vertex count '%d'\n", sourceIndex, sourceGeo->getVertexCount());
scene->destroy();
fbxDestroy(model);
return nullptr;
}
const uint32_t index = currentVertex + (uint32_t)sourceIndex;
assert(index < model->numVertices);
indices[mesh.firstIndex + j] = index;
}
for (uint32_t j = 0; j < (uint32_t)sourceGeo->getVertexCount(); j++) {
ModelVertex &vertex = vertices[currentVertex + j];
const ofbx::Vec3 &pos = sourceGeo->getVertices()[j];
vertex.pos.x = (float)pos.x;
vertex.pos.y = (float)pos.y;
vertex.pos.z = (float)pos.z;
vertex.pos = bx::mul(vertex.pos, transform);
if (sourceGeo->getNormals()) {
const ofbx::Vec3 &normal = sourceGeo->getNormals()[j];
vertex.normal.x = (float)normal.x;
vertex.normal.y = (float)normal.y;
vertex.normal.z = (float)normal.z;
} else {
vertex.normal = bx::Vec3(0.0f);
}
if (sourceGeo->getUVs(0)) {
const ofbx::Vec2 &uv = sourceGeo->getUVs(0)[j];
vertex.texcoord[0] = (float)uv.x;
vertex.texcoord[1] = (float)uv.y;
} else {
vertex.texcoord[0] = vertex.texcoord[1] = 0.0f;
hasTexcoords = false;
}
vertex.texcoord[2] = vertex.texcoord[3] = 0.0f;
}
currentIndex += mesh.numIndices;
currentVertex += (uint32_t)sourceGeo->getVertexCount();
}
if (hasTexcoords)
model->flags |= OBJZ_FLAG_TEXCOORDS;
uint32_t currentMaterial = 0;
for (int i = 0; i < scene->getAllObjectCount(); i++) {
const ofbx::Object *object = scene->getAllObjects()[i];
if (object->getType() != ofbx::Object::Type::MATERIAL)
continue;
auto sourceMat = (const ofbx::Material *)object;
objzMaterial &destMat = model->materials[currentMaterial];
memset(&destMat, 0, sizeof(destMat));
destMat.opacity = 1.0f;
const ofbx::Color &diffuse = sourceMat->getDiffuseColor();
destMat.diffuse[0] = diffuse.r;
destMat.diffuse[1] = diffuse.g;
destMat.diffuse[2] = diffuse.b;
currentMaterial++;
}
scene->destroy();
return model;
}
BX_PRAGMA_DIAGNOSTIC_PUSH();
BX_PRAGMA_DIAGNOSTIC_IGNORED_MSVC(4702) // 'unreachable code'
bool gltfAnyNodeInHierarchyHasMesh(const cgltf_node *node)
{
if (node->mesh)
return true;
for (cgltf_size ci = 0; ci < node->children_count; ci++)
return gltfAnyNodeInHierarchyHasMesh(node->children[ci]);
return false;
}
BX_PRAGMA_DIAGNOSTIC_POP();
static void gltfCountMeshData(const cgltf_node *node, objzModel *model)
{
if (node->mesh) {
for (cgltf_size pi = 0; pi < node->mesh->primitives_count; pi++) {
const cgltf_primitive &primitive = node->mesh->primitives[pi];
const cgltf_accessor *apositions = nullptr;
for (cgltf_size ai = 0; ai < primitive.attributes_count; ai++) {
const cgltf_attribute &attrib = primitive.attributes[ai];
if (attrib.type == cgltf_attribute_type_position) {
apositions = attrib.data;
break;
}
}
const cgltf_accessor *aindices = primitive.indices;
if (apositions && aindices) {
model->numVertices += (uint32_t)apositions->count;
model->numIndices += (uint32_t)aindices->count;
}
model->numMeshes++;
#if ONE_GLTF_OBJECT_PER_MESH
model->numObjects++;
#endif
}
}
for (cgltf_size ci = 0; ci < node->children_count; ci++)
gltfCountMeshData(node->children[ci], model);
}
template<typename T>
static const T *gltfGetBufferData(const cgltf_accessor *accessor)
{
auto buffer = (const uint8_t *)accessor->buffer_view->buffer->data;
const cgltf_size offset = accessor->offset + accessor->buffer_view->offset;
return (const T *)&buffer[offset];
}
#define GLTF_COPY_INDICES(type) \
const type *meshIndices = gltfGetBufferData<type>(aindices); \
for (uint32_t ii = 0; ii < (uint32_t)aindices->count; ii++) { \
assert(ii + firstMeshIndex < model->numIndices); \
indices[ii + firstMeshIndex] = firstMeshVertex + (uint32_t)meshIndices[ii]; \
assert(indices[ii + firstMeshIndex] < model->numVertices); \
}
static void gltfPopulateMeshData(const cgltf_node *node, const cgltf_material *firstMaterial, objzModel *model, uint32_t ¤tObject, uint32_t ¤tMesh, uint32_t &firstMeshIndex, uint32_t &firstMeshVertex, bool &hasTexcoords)
{
const cgltf_mesh *sourceMesh = node->mesh;
if (sourceMesh) {
float transform[16];
cgltf_node_transform_world(node, transform);
float rotation[16];
if (node->has_rotation)
bx::mtxQuat(rotation, *(bx::Quaternion *)node->rotation);
else
bx::mtxIdentity(rotation);
for (cgltf_size pi = 0; pi < sourceMesh->primitives_count; pi++) {
const cgltf_primitive &primitive = sourceMesh->primitives[pi];
const cgltf_accessor *apositions = nullptr, *anormals = nullptr, *atexcoords = nullptr;
for (cgltf_size ai = 0; ai < primitive.attributes_count; ai++) {
const cgltf_attribute &attrib = primitive.attributes[ai];
if (attrib.type == cgltf_attribute_type_position)
apositions = attrib.data;
else if (attrib.type == cgltf_attribute_type_normal)
anormals = attrib.data;
else if (attrib.type == cgltf_attribute_type_texcoord)
atexcoords = attrib.data;
}
const cgltf_accessor *aindices = primitive.indices;
if (!apositions || !aindices)
continue;
// Copy vertex data.
const float *meshPosition = gltfGetBufferData<float>(apositions);
const float *meshNormal = anormals && anormals->count == apositions->count ? gltfGetBufferData<float>(anormals) : nullptr;
const float *meshTexcoord = atexcoords && atexcoords->count == apositions->count ? gltfGetBufferData<float>(atexcoords) : nullptr;
for (cgltf_size vi = 0; vi < apositions->count; vi++) {
assert(vi + firstMeshVertex < model->numVertices);
ModelVertex &vertex = ((ModelVertex *)model->vertices)[vi + firstMeshVertex];
vertex.pos = bx::mul(bx::Vec3(meshPosition[0], meshPosition[1], meshPosition[2]), transform);
meshPosition += apositions->stride / sizeof(float);
if (meshNormal) {
vertex.normal = bx::Vec3(meshNormal[0], meshNormal[1], meshNormal[2]);
if (node->has_rotation)
vertex.normal = bx::mul(vertex.normal, rotation);
meshNormal += anormals->stride / sizeof(float);
}
if (meshTexcoord) {
vertex.texcoord[0] = meshTexcoord[0];
vertex.texcoord[1] = meshTexcoord[1];
meshTexcoord += atexcoords->stride / sizeof(float);
} else {
hasTexcoords = false;
}
}
// Copy indices.
auto indices = (uint32_t *)model->indices;
if (aindices->component_type == cgltf_component_type_r_8u) {
GLTF_COPY_INDICES(uint8_t)
} else if (aindices->component_type == cgltf_component_type_r_16u) {
GLTF_COPY_INDICES(uint16_t)
} else if (aindices->component_type == cgltf_component_type_r_32u) {
GLTF_COPY_INDICES(uint32_t)
} else {
assert (false);
}
#if ONE_GLTF_OBJECT_PER_MESH
// Create object.
assert(currentObject < model->numObjects);
objzObject &object = model->objects[currentObject];
object.name[0] = 0;
object.firstMesh = currentMesh;
object.numMeshes = 1;
object.firstIndex = firstMeshIndex;
object.numIndices = (uint32_t)aindices->count;
object.firstVertex = firstMeshVertex;
object.numVertices = (uint32_t)apositions->count;
currentObject++;
#endif
// Create mesh.
assert(currentMesh < model->numMeshes);
objzMesh &mesh = model->meshes[currentMesh];
mesh.materialIndex = primitive.material ? int32_t(primitive.material - firstMaterial) : -1;
mesh.firstIndex = firstMeshIndex;
mesh.numIndices = (uint32_t)aindices->count;
currentMesh++;
#if !ONE_GLTF_OBJECT_PER_MESH
// Update object.
objzObject &object = model->objects[currentObject];
object.numMeshes++;
object.numIndices += (uint32_t)aindices->count;
object.numVertices += (uint32_t)apositions->count;
#endif
firstMeshVertex += (uint32_t)apositions->count;
firstMeshIndex += (uint32_t)aindices->count;
}
}
for (cgltf_size ci = 0; ci < node->children_count; ci++)
gltfPopulateMeshData(node->children[ci], firstMaterial, model, currentObject, currentMesh, firstMeshIndex, firstMeshVertex, hasTexcoords);
}
static objzModel *gltfLoad(const char *filename, const char *basePath)
{
cgltf_data *gltfData = nullptr;
cgltf_options options;
bx::memSet(&options, 0, sizeof(options));
cgltf_result result = cgltf_parse_file(&options, filename, &gltfData);
if (result == cgltf_result_success) {
result = cgltf_load_buffers(&options, gltfData, basePath);
if (result == cgltf_result_success)
result = cgltf_validate(gltfData);
}
if (result != cgltf_result_success) {
if (gltfData)
cgltf_free(gltfData);
return nullptr;
}
objzModel *model = new objzModel();
model->flags = 0;
model->numIndices = 0;
model->numMaterials = (uint32_t)gltfData->materials_count;
model->numMeshes = 0;
model->numObjects = 0;
model->numVertices = 0;
// Count array lengths.
for (cgltf_size ni = 0; ni < gltfData->nodes_count; ni++) {
// Objects are root nodes with a mesh, or any ancestor with a mesh.
const cgltf_node &node = gltfData->nodes[ni];
if (node.parent)
continue;
if (!gltfAnyNodeInHierarchyHasMesh(&node))
continue;
gltfCountMeshData(&node, model);
#if !ONE_GLTF_OBJECT_PER_MESH
model->numObjects++;
#endif
}
// Alloc data.
model->indices = new uint32_t[model->numIndices];
model->meshes = new objzMesh[model->numMeshes];
model->objects = new objzObject[model->numObjects];
model->vertices = new ModelVertex[model->numVertices];
// Populate data.
uint32_t currentObject = 0, currentMesh = 0, firstMeshIndex = 0, firstMeshVertex = 0;
bool hasTexcoords = true;
for (cgltf_size ni = 0; ni < gltfData->nodes_count; ni++) {
const cgltf_node &node = gltfData->nodes[ni];
if (node.parent)
continue;
if (!gltfAnyNodeInHierarchyHasMesh(&node))
continue;
#if !ONE_GLTF_OBJECT_PER_MESH
// Create object.
assert(currentObject < model->numObjects);
objzObject &object = model->objects[currentObject];
bx::strCopy(object.name, sizeof(object.name), node.name);
object.firstMesh = currentMesh;
object.numMeshes = 0;
object.firstIndex = firstMeshIndex;
object.numIndices = 0;
object.firstVertex = firstMeshVertex;
object.numVertices = 0;
#endif
// Create mesh data.
gltfPopulateMeshData(&node, gltfData->materials, model, currentObject, currentMesh, firstMeshIndex, firstMeshVertex, hasTexcoords);
#if !ONE_GLTF_OBJECT_PER_MESH
currentObject++;
#endif
}
if (hasTexcoords)
model->flags |= OBJZ_FLAG_TEXCOORDS;
// Materials.
model->materials = new objzMaterial[model->numMaterials];
for (uint32_t i = 0; i < model->numMaterials; i++) {
const cgltf_material &sourceMat = gltfData->materials[i];
const cgltf_texture *diffuse = sourceMat.pbr_metallic_roughness.base_color_texture.texture;
const cgltf_texture *emission = sourceMat.emissive_texture.texture;
objzMaterial &destMat = model->materials[i];
memset(&destMat, 0, sizeof(destMat));
destMat.opacity = 1.0f;
memcpy(destMat.diffuse, sourceMat.pbr_metallic_roughness.base_color_factor, sizeof(float) * 3);
if (diffuse)
bx::strCopy(destMat.diffuseTexture, sizeof(destMat.diffuseTexture), diffuse->image->uri);
if (emission)
bx::strCopy(destMat.emissionTexture, sizeof(destMat.emissionTexture), emission->image->uri);
}
cgltf_free(gltfData);
return model;
}
static void gltfDestroy(objzModel *model)
{
delete [] (uint32_t *)model->indices;
delete [] model->meshes;
delete [] model->objects;
delete [] (ModelVertex *)model->vertices;
delete [] model->materials;
delete model;
}
static objzModel *stlLoad(const char *filename, const char * /*basePath*/)
{
std::vector<float> coords, normals;
std::vector<unsigned int> tris, solids;
if (!stl_reader::ReadStlFile(filename, coords, normals, tris, solids))
return nullptr;
objzModel *model = new objzModel();
model->flags = 0;
model->numIndices = (uint32_t)tris.size();
model->numMaterials = 0;
model->numMeshes = (uint32_t)solids.size() - 1;
model->numObjects = (uint32_t)solids.size() - 1;
model->numVertices = (uint32_t)coords.size() / 3;
model->indices = new uint32_t[model->numIndices];
model->materials = nullptr;
model->meshes = new objzMesh[model->numMeshes];
model->objects = new objzObject[model->numObjects];
model->vertices = new ModelVertex[model->numVertices];
for (uint32_t i = 0; i < model->numObjects; i++) {
objzObject &object = model->objects[i];
object.name[0] = 0;
object.firstMesh = i;
object.numMeshes = 1;
object.firstIndex = solids[i] * 3;
object.numIndices = solids[i + 1] * 3;
object.firstVertex = 0;
object.numVertices = model->numVertices;
objzMesh &mesh = model->meshes[i];
mesh.materialIndex = -1;
mesh.firstIndex = object.firstIndex;
mesh.numIndices = object.numIndices;
}
auto vertices = (ModelVertex *)model->vertices;
for (uint32_t i = 0; i < model->numVertices; i++) {
ModelVertex &v = vertices[i];
v.pos.x = coords[i * 3 + 0];
v.pos.y = coords[i * 3 + 1];
v.pos.z = coords[i * 3 + 2];
v.normal.x = normals[i * 3 + 0];
v.normal.y = normals[i * 3 + 1];
v.normal.z = normals[i * 3 + 2];
v.texcoord[0] = v.texcoord[1] = v.texcoord[2] = v.texcoord[3] = 0.0f;
}
memcpy(model->indices, tris.data(), sizeof(uint32_t) * model->numIndices);
return model;
}
static void stlDestroy(objzModel *model)
{
delete [] (uint32_t *)model->indices;
delete [] model->meshes;
delete [] model->objects;
delete [] (ModelVertex *)model->vertices;
delete model;
}
static void objzLoadProgress(const char *, int progress)
{
s_model.loadProgress = progress;
}
struct ModelLoadThreadArgs
{
char filename[256];
};
static void modelLoadThread(ModelLoadThreadArgs args)
{
s_model.data = nullptr;
char basePath[256] = { 0 };
const char *lastSlash = strrchr(args.filename, '/');
if (!lastSlash)
lastSlash = strrchr(args.filename, '\\');
if (lastSlash) {
for (int i = 0;; i++) {
basePath[i] = args.filename[i];
if (&args.filename[i] == lastSlash)
break;
}
}
bx::FilePath filePath(args.filename);
const bx::StringView ext = filePath.getExt();
if (bx::strCmpI(ext, ".fbx") == 0) {
objzModel *model = fbxLoad(args.filename, basePath);
if (!model) {
fprintf(stderr, "Error loading '%s'\n", args.filename);
setErrorMessage("Error loading '%s'\n", args.filename);
s_model.status = ModelStatus::NotLoaded;
return;
}
s_model.data = model;
s_model.destroyModelData = fbxDestroy;
} else if (bx::strCmpI(ext, ".glb") == 0 || bx::strCmpI(ext, ".gltf") == 0) {
objzModel *model = gltfLoad(args.filename, basePath);
if (!model) {
fprintf(stderr, "Error loading '%s'\n", args.filename);
setErrorMessage("Error loading '%s'\n", args.filename);
s_model.status = ModelStatus::NotLoaded;
return;
}
s_model.data = model;
s_model.destroyModelData = gltfDestroy;
} else if (bx::strCmpI(ext, ".obj") == 0) {
objz_setProgress(objzLoadProgress);
objz_setIndexFormat(OBJZ_INDEX_FORMAT_U32);
objz_setVertexFormat(sizeof(ModelVertex), offsetof(ModelVertex, pos), offsetof(ModelVertex, texcoord), offsetof(ModelVertex, normal));
objzModel *model = objz_load(args.filename);
if (!model) {
fprintf(stderr, "%s\n", objz_getError());
setErrorMessage("Error loading' %s'\n%s\n", args.filename, objz_getError());
s_model.status = ModelStatus::NotLoaded;
return;
}
if (objz_getError()) // Print warnings.
printf("%s\n", objz_getError());
s_model.data = model;
s_model.destroyModelData = objz_destroy;
for (uint32_t i = 0; i < model->numVertices; i++) {
auto v = &((ModelVertex *)model->vertices)[i];
v->texcoord[1] = 1.0f - v->texcoord[1];
}
} else if (bx::strCmpI(ext, ".stl") == 0) {
objzModel *model = stlLoad(args.filename, basePath);
if (!model) {
fprintf(stderr, "Error loading '%s'\n", args.filename);
setErrorMessage("Error loading '%s'\n", args.filename);
s_model.status = ModelStatus::NotLoaded;
return;
}
s_model.data = model;
s_model.destroyModelData = stlDestroy;
} else {
abort();
}
uint32_t numWireframeVertices = 0;
for (uint32_t i = 0; i < s_model.data->numMeshes; i++) {
const objzMesh &mesh = s_model.data->meshes[i];
const objzMaterial *mat = mesh.materialIndex == -1 ? nullptr : &s_model.data->materials[mesh.materialIndex];
if (mat && mat->opacity < 1.0f)
continue;
numWireframeVertices += mesh.numIndices;
}
s_model.wireframeVertices.resize(numWireframeVertices);
uint32_t currentWireframeVertex = 0;
for (uint32_t i = 0; i < s_model.data->numMeshes; i++) {
const objzMesh &mesh = s_model.data->meshes[i];
const objzMaterial *mat = mesh.materialIndex == -1 ? nullptr : &s_model.data->materials[mesh.materialIndex];
if (mat && mat->opacity < 1.0f)
continue;
for (uint32_t j = 0; j < mesh.numIndices / 3; j++) {
WireframeVertex *dest = &s_model.wireframeVertices[currentWireframeVertex];
for (uint32_t k = 0; k < 3; k++)
dest[k].pos = ((const ModelVertex *)s_model.data->vertices)[((const uint32_t *)s_model.data->indices)[mesh.firstIndex + j * 3 + k]].pos;
dest[0].barycentric = bx::Vec3(1.0f, 0.0f, 0.0f);
dest[1].barycentric = bx::Vec3(0.0f, 1.0f, 0.0f);
dest[2].barycentric = bx::Vec3(0.0f, 0.0f, 1.0f);
currentWireframeVertex += 3;
}
}
s_model.diffuseTextures.resize(s_model.data->numMaterials);
s_model.emissionTextures.resize(s_model.data->numMaterials);
for (uint32_t i = 0; i < s_model.data->numMaterials; i++) {
const objzMaterial &mat = s_model.data->materials[i];
s_model.diffuseTextures[i] = mat.diffuseTexture[0] ? textureLoadCached(basePath, mat.diffuseTexture) : UINT32_MAX;
s_model.emissionTextures[i] = mat.emissionTexture[0] ? textureLoadCached(basePath, mat.emissionTexture) : UINT32_MAX;
}
s_model.status = ModelStatus::Finalizing;
}
void modelFinalize()
{
if (s_model.status != ModelStatus::Finalizing)
return;
if (s_model.thread) {
if (s_model.thread->joinable())
s_model.thread->join();
delete s_model.thread;
s_model.thread = nullptr;
}
printf(" %u object%s\n", s_model.data->numObjects, s_model.data->numObjects > 1 ? "s" : "");
printf(" %u mesh%s\n", s_model.data->numMeshes, s_model.data->numMeshes > 1 ? "es" : "");
printf(" %u triangles\n", s_model.data->numIndices / 3);
printf(" %u vertices\n", s_model.data->numVertices);
textureCreateCachedTextures();
s_model.aabb = AABB();
s_model.centroid = bx::Vec3(0.0f, 0.0f, 0.0f);
uint32_t centroidCount = 0;
for (uint32_t i = 0; i < s_model.data->numVertices; i++) {
const bx::Vec3 &pos = ((const ModelVertex *)s_model.data->vertices)[i].pos;
s_model.aabb.addPoint(pos);
if (!std::isnan(pos.x) && !std::isnan(pos.y) && !std::isnan(pos.z)) {
s_model.centroid = bx::add(s_model.centroid, pos);
centroidCount++;
}
}
s_model.centroid = bx::mul(s_model.centroid, 1.0f / centroidCount);
float radius = 0.0f;
bx::Vec3 aabbCorners[8];
s_model.aabb.getCorners(aabbCorners);
for (uint32_t i = 0; i < 8; i++)
radius = bx::max(radius, bx::distance(s_model.centroid, aabbCorners[i]));
if (radius > 0.0f)
s_model.scale = 16.0f / radius;
s_model.vb = bgfx::createVertexBuffer(bgfx::makeRef(s_model.data->vertices, s_model.data->numVertices * sizeof(ModelVertex)), ModelVertex::layout);
s_model.ib = bgfx::createIndexBuffer(bgfx::makeRef(s_model.data->indices, s_model.data->numIndices * sizeof(uint32_t)), BGFX_BUFFER_INDEX32);
if (!s_model.wireframeVertices.empty())
s_model.wireframeVb = bgfx::createVertexBuffer(bgfx::makeRef(s_model.wireframeVertices.data(), uint32_t(s_model.wireframeVertices.size() * sizeof(WireframeVertex))), WireframeVertex::layout);
resetCamera();
g_options.shadeMode = ShadeMode::FlatMaterial;
g_options.overlayMode = OverlayMode::None;
g_options.wireframeMode = WireframeMode::Triangles;
s_model.status = ModelStatus::Loaded;
}
static bool modelCanOpen()
{
if (s_model.status == ModelStatus::Loading || s_model.status == ModelStatus::Finalizing)
return false;
if (!(atlasIsNotGenerated() || atlasIsReady()))
return false;
return true;
}
void modelOpen(const char *filename)
{
if (!modelCanOpen())
return;
modelDestroy();
s_model.loadProgress = 0;
s_model.status = ModelStatus::Loading;
char windowTitle[256];
snprintf(windowTitle, sizeof(windowTitle), "%s - %s\n", WINDOW_TITLE, filename);
glfwSetWindowTitle(g_window, windowTitle);
printf("Loading '%s'\n", filename);
ModelLoadThreadArgs args;
bx::strCopy(args.filename, sizeof(args.filename), filename);
s_model.thread = new std::thread(modelLoadThread, args);
}
void modelOpenDialog()
{
if (!modelCanOpen())
return;
nfdchar_t *filename = nullptr;
nfdresult_t result = NFD_OpenDialog("fbx,glb,gltf,obj,stl", nullptr, &filename);
if (result != NFD_OKAY)
{
free(filename);
return;
}
modelOpen(filename);
free(filename);
}
void modelDestroy()
{
textureDestroyCache();
atlasDestroy();
if (s_model.thread) {
if (s_model.thread->joinable())
s_model.thread->join();
delete s_model.thread;
s_model.thread = nullptr;
}
if (s_model.data) {
s_model.destroyModelData(s_model.data);
s_model.data = nullptr;
}
if (bgfx::isValid(s_model.vb)) {
bgfx::destroy(s_model.vb);
bgfx::destroy(s_model.ib);
if (bgfx::isValid(s_model.wireframeVb)) {
bgfx::destroy(s_model.wireframeVb);
s_model.wireframeVb = BGFX_INVALID_HANDLE;
}
s_model.vb = BGFX_INVALID_HANDLE;
s_model.ib = BGFX_INVALID_HANDLE;
}
glfwSetWindowTitle(g_window, WINDOW_TITLE);
s_model.status = ModelStatus::NotLoaded;
}
void modelRender(const float *view, const float *projection)
{
if (s_model.status != ModelStatus::Loaded)
return;
float transform[16];
if (s_model.rightHandedAxis)
memcpy(transform, s_rightHandedAxisMatrix, sizeof(float) * 16);
else
bx::mtxIdentity(transform);
float scaleMatrix[16];
bx::mtxScale(scaleMatrix, s_model.scale);
float modelMatrix[16];
bx::mtxMul(modelMatrix, transform, scaleMatrix);
bgfx::setViewTransform(kModelView, view, projection);
bgfx::setViewTransform(kModelTransparentView, view, projection);
const float lightDir[] = { view[2], view[6], view[10], 0.0f };
uint32_t primitiveIdStart = 0;
for (uint32_t i = 0; i < s_model.data->numMeshes; i++) {
const objzMesh &mesh = s_model.data->meshes[i];
const objzMaterial *mat = mesh.materialIndex == -1 ? nullptr : &s_model.data->materials[mesh.materialIndex];
const bool transparent = mat ? mat->opacity < 1.0f : false;
if (atlasIsReady()) {
bgfx::setIndexBuffer(atlasGetIb(), mesh.firstIndex, mesh.numIndices);
bgfx::setVertexBuffer(0, atlasGetVb());
} else {
bgfx::setIndexBuffer(s_model.ib, mesh.firstIndex, mesh.numIndices);
bgfx::setVertexBuffer(0, s_model.vb);
}
uint64_t state = BGFX_STATE_DEFAULT;
if (!s_model.clockwiseFaceWinding)
state = (state & ~BGFX_STATE_CULL_CW) | BGFX_STATE_CULL_CCW;
if (transparent)
state |= BGFX_STATE_BLEND_ALPHA;
bgfx::setState(state);
bgfx::setTransform(modelMatrix);
bgfx::setUniform(s_model.u_lightDir, lightDir);
float diffuse[4], emission[4];
if (!mat) {
diffuse[0] = diffuse[1] = diffuse[2] = 0.5f;
diffuse[3] = 1.0f;
emission[0] = emission[1] = emission[2] = emission[3] = 0.0f;
} else {
diffuse[0] = mat->diffuse[0];
diffuse[1] = mat->diffuse[1];
diffuse[2] = mat->diffuse[2];
diffuse[3] = mat->opacity;
emission[0] = mat->emission[0];
emission[1] = mat->emission[1];
emission[2] = mat->emission[2];
emission[3] = mat->opacity;
}
bgfx::setUniform(s_model.u_diffuse, diffuse);
bgfx::setUniform(s_model.u_emission, emission);
float shade_overlay_diffuse_emission[4];
shade_overlay_diffuse_emission[2] = DIFFUSE_COLOR;
shade_overlay_diffuse_emission[3] = EMISSION_COLOR;
if (g_options.shadeMode == ShadeMode::LightmapMaterial)
shade_overlay_diffuse_emission[0] = (float)SHADE_LIGHTMAP;
else if (g_options.shadeMode == ShadeMode::LightmapOnly)
shade_overlay_diffuse_emission[0] = (float)SHADE_LIGHTMAP_ONLY;
else
shade_overlay_diffuse_emission[0] = (float)SHADE_FLAT;
shade_overlay_diffuse_emission[1] = (float)OVERLAY_NONE;
if (g_options.overlayMode == OverlayMode::Chart && atlasIsReady())
shade_overlay_diffuse_emission[1] = (float)OVERLAY_CHART;
else if (g_options.overlayMode == OverlayMode::Mesh)
shade_overlay_diffuse_emission[1] = (float)OVERLAY_MESH;
else if (g_options.overlayMode == OverlayMode::Stretch)
shade_overlay_diffuse_emission[1] = (float)OVERLAY_STRETCH;
bgfx::TextureHandle diffuseTexture = BGFX_INVALID_HANDLE;
bgfx::TextureHandle emissionTexture = BGFX_INVALID_HANDLE;
if (mat) {
diffuseTexture = textureGetHandle(s_model.diffuseTextures[mesh.materialIndex]);
emissionTexture = textureGetHandle(s_model.emissionTextures[mesh.materialIndex]);
}
if (bgfx::isValid(diffuseTexture))
shade_overlay_diffuse_emission[2] = DIFFUSE_TEXTURE;
if (bgfx::isValid(emissionTexture))
shade_overlay_diffuse_emission[3] = EMISSION_TEXTURE;
bgfx::setUniform(s_model.u_shade_overlay_diffuse_emission, shade_overlay_diffuse_emission);
float textureSize_cellSize[4];
if (atlasIsReady()) {
textureSize_cellSize[0] = (float)atlasGetWidth();
textureSize_cellSize[1] = (float)atlasGetHeight();
} else {
textureSize_cellSize[0] = textureSize_cellSize[1] = 0.0f;
}
textureSize_cellSize[2] = (float)g_options.chartCellSize;
textureSize_cellSize[3] = (float)g_options.chartCellSize;
bgfx::setUniform(s_model.u_textureSize_cellSize, textureSize_cellSize);
float overlayOpacity_colorChartType[4];
overlayOpacity_colorChartType[0] = g_options.overlayOpacity;
overlayOpacity_colorChartType[1] = (float)g_options.chartColorMode;
bgfx::setUniform(s_model.u_overlayOpacity_colorChartType, overlayOpacity_colorChartType);
float meshColor_primitiveIdStart[4];
if (g_options.overlayMode == OverlayMode::Mesh) {
srand(i);
uint8_t color[4];
randomRGB(color);
meshColor_primitiveIdStart[0] = color[0] / 255.0f;
meshColor_primitiveIdStart[1] = color[1] / 255.0f;
meshColor_primitiveIdStart[2] = color[2] / 255.0f;
}
meshColor_primitiveIdStart[3] = (float)primitiveIdStart;
bgfx::setUniform(s_model.u_meshColor_primitiveIdStart, meshColor_primitiveIdStart);
bgfx::setTexture(0, s_model.s_diffuse, bgfx::isValid(diffuseTexture) ? diffuseTexture : s_model.u_dummyTexture);
bgfx::setTexture(1, s_model.s_emission, bgfx::isValid(emissionTexture) ? emissionTexture : s_model.u_dummyTexture);
if (g_options.shadeMode == ShadeMode::LightmapMaterial || g_options.shadeMode == ShadeMode::LightmapOnly)
bgfx::setTexture(2, s_model.s_lightmap, bakeGetLightmap(), bakeGetLightmapSamplerFlags());
else
bgfx::setTexture(2, s_model.s_lightmap, s_model.u_dummyTexture);
if (g_options.overlayMode == OverlayMode::Chart || g_options.overlayMode == OverlayMode::Stretch)
bgfx::setTexture(3, s_model.s_faceData, atlasGetFaceDataTexture());
else
bgfx::setTexture(3, s_model.s_faceData, s_model.u_dummyTexture);
bgfx::submit(transparent ? kModelTransparentView : kModelView, s_model.materialProgram);
primitiveIdStart += mesh.numIndices / 3;
}
if (g_options.wireframe) {
if (g_options.wireframeMode == WireframeMode::Triangles) {
if (bgfx::isValid(s_model.wireframeVb)) {
const float color[] = { 0.0f, 0.0f, 0.0f, 0.75f };
bgfx::setUniform(s_model.u_color, color);
setWireframeThicknessUniform(1.5f);
bgfx::setState(BGFX_STATE_WRITE_RGB | BGFX_STATE_WRITE_A | BGFX_STATE_WRITE_Z | BGFX_STATE_DEPTH_TEST_LEQUAL | BGFX_STATE_CULL_CW | BGFX_STATE_BLEND_ALPHA | BGFX_STATE_MSAA);
bgfx::setTransform(modelMatrix);
bgfx::setVertexBuffer(0, s_model.wireframeVb);
bgfx::submit(kModelView, getWireframeProgram(), 1);
}
} else {
atlasRenderChartsWireframe(modelMatrix);
}
}
}
void modelShowGuiMenu()
{
ImGui::Checkbox("Right-handed axis", &s_model.rightHandedAxis);
ImGui::Checkbox("Clockwise face winding", &s_model.clockwiseFaceWinding);
ImGui::PushItemWidth(100.0f);
ImGui::InputFloat("Scale", &s_model.scale, 0.01f, 0.1f);
ImGui::PopItemWidth();
s_model.scale = bx::max(0.001f, s_model.scale);
}
void modelShowGuiWindow()
{
const ImGuiWindowFlags progressWindowFlags = ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_AlwaysAutoResize | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoSavedSettings;
if (s_model.status == ModelStatus::Loading) {
ImGui::SetNextWindowPos(ImVec2(g_windowSize[0] * 0.5f, g_windowSize[1] * 0.5f), ImGuiCond_Always, ImVec2(0.5f, 0.5f));
if (ImGui::Begin("##modelProgress", nullptr, progressWindowFlags)) {
ImGui::AlignTextToFramePadding();
ImGui::Text("Loading model");
ImGui::SameLine();
ImGui::Spinner("##modelSpinner");
if (s_model.loadProgress > 0)
ImGui::ProgressBar(s_model.loadProgress.load() / 100.0f);
ImGui::End();
}
}
}
AABB modelGetAABB()
{
return s_model.aabb;
}
const objzModel *modelGetData()
{
return s_model.data;
}
bx::Vec3 modelGetCentroid()
{
bx::Vec3 centroid(s_model.centroid);
if (s_model.rightHandedAxis)
centroid = bx::mul(centroid, s_rightHandedAxisMatrix);
return bx::mul(centroid, s_model.scale);
}
float modelGetScale()
{
return s_model.scale;
}
bgfx::ShaderHandle modelGet_vs_model()
{
return s_model.vs_model;
}
bool modelIsLoaded()
{
return s_model.status == ModelStatus::Loaded;
}
static bool modelSampleTexture(uint32_t textureIndex, const float *uv, bx::Vec3 *color)
{
if (textureIndex == UINT32_MAX)
return false;
const CachedTexture &texture = s_textureCache[textureIndex];
if (!texture.data.mem)
return false;
const uint32_t x = uint32_t(uv[0] * texture.data.sampleWidth) % texture.data.sampleWidth;
const uint32_t y = uint32_t(uv[1] * texture.data.sampleHeight) % texture.data.sampleHeight;
const uint8_t *rgb = &texture.data.sampleData[(x + y * texture.data.sampleWidth) * texture.data.numComponents];
if (texture.data.numComponents == 1)
*color = bx::Vec3(rgb[0] / 255.0f);
else
*color = bx::Vec3(rgb[0] / 255.0f, rgb[1] / 255.0f, rgb[2] / 255.0f);
return true;
}
bool modelSampleMaterialDiffuse(const objzMaterial *mat, const float *uv, bx::Vec3 *color)
{
return modelSampleTexture(s_model.diffuseTextures[mat - s_model.data->materials], uv, color);
}
bool modelSampleMaterialEmission(const objzMaterial *mat, const float *uv, bx::Vec3 *color)
{
return modelSampleTexture(s_model.emissionTextures[mat - s_model.data->materials], uv, color);
}
| 18,011 |
2,003 | // Copyright (c) 2015, Baidu.com, Inc. All Rights Reserved
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "master/delete_table_procedure.h"
#include "master/master_env.h"
DECLARE_int32(tera_master_meta_retry_times);
namespace tera {
namespace master {
std::map<DeleteTablePhase,
DeleteTableProcedure::DeleteTablePhaseHandler> DeleteTableProcedure::phase_handlers_{
{DeleteTablePhase::kPrepare, std::bind(&DeleteTableProcedure::PrepareHandler, _1, _2)},
{DeleteTablePhase::kDeleteTable, std::bind(&DeleteTableProcedure::DeleteTableHandler, _1, _2)},
{DeleteTablePhase::kUpdateMeta, std::bind(&DeleteTableProcedure::UpdateMetaHandler, _1, _2)},
{DeleteTablePhase::kEofPhase, std::bind(&DeleteTableProcedure::EofPhaseHandler, _1, _2)}};
DeleteTableProcedure::DeleteTableProcedure(TablePtr table, const DeleteTableRequest* request,
DeleteTableResponse* response,
google::protobuf::Closure* closure,
ThreadPool* thread_pool)
: table_(table),
request_(request),
response_(response),
rpc_closure_(closure),
update_meta_(false),
done_(false),
thread_pool_(thread_pool) {
PROC_LOG(INFO) << "begin delete table: " << table_->GetTableName();
SetNextPhase(DeleteTablePhase::kPrepare);
}
std::string DeleteTableProcedure::ProcId() const {
static std::string prefix("DeleteTable:");
return prefix + table_->GetTableName();
}
void DeleteTableProcedure::RunNextStage() {
DeleteTablePhase phase = GetCurrentPhase();
auto it = phase_handlers_.find(phase);
PROC_CHECK(it != phase_handlers_.end()) << "illegal phase:" << phase
<< ", table: " << table_->GetTableName();
DeleteTablePhaseHandler handler = it->second;
handler(this, phase);
}
void DeleteTableProcedure::PrepareHandler(const DeleteTablePhase&) {
if (!MasterEnv().GetMaster()->HasPermission(request_, table_, "delete table")) {
EnterPhaseWithResponseStatus(kNotPermission, DeleteTablePhase::kEofPhase);
return;
}
SetNextPhase(DeleteTablePhase::kDeleteTable);
}
void DeleteTableProcedure::DeleteTableHandler(const DeleteTablePhase&) {
std::vector<TabletPtr> tablets;
table_->GetTablet(&tablets);
for (size_t i = 0; i < tablets.size(); ++i) {
TabletPtr tablet = tablets[i];
if (tablet->GetStatus() != TabletMeta::kTabletDisable) {
PROC_LOG(WARNING) << "tablet: " << tablet << " not in disabled status, "
<< StatusCodeToString(tablet->GetStatus());
EnterPhaseWithResponseStatus(StatusCode(tablet->GetStatus()), DeleteTablePhase::kEofPhase);
return;
}
PackMetaWriteRecords(tablet, true, meta_records_);
}
if (!table_->DoStateTransition(TableEvent::kDeleteTable)) {
PROC_LOG(WARNING) << "table: " << table_->GetTableName()
<< ", current status: " << StatusCodeToString(table_->GetStatus());
EnterPhaseWithResponseStatus(kTableNotSupport, DeleteTablePhase::kEofPhase);
return;
}
PackMetaWriteRecords(table_, true, meta_records_);
// delete quota setting store in meta table
quota::MasterQuotaHelper::PackDeleteQuotaRecords(table_->GetTableName(), meta_records_);
SetNextPhase(DeleteTablePhase::kUpdateMeta);
}
void DeleteTableProcedure::UpdateMetaHandler(const DeleteTablePhase&) {
if (update_meta_) {
return;
}
update_meta_.store(true);
PROC_LOG(INFO) << "table: " << table_->GetTableName() << "begin to update meta";
UpdateMetaClosure closure = std::bind(&DeleteTableProcedure::UpdateMetaDone, this, _1);
MasterEnv().BatchWriteMetaTableAsync(meta_records_, closure, FLAGS_tera_master_meta_retry_times);
}
void DeleteTableProcedure::EofPhaseHandler(const DeleteTablePhase&) {
done_.store(true);
if (table_->InTransition()) {
table_->UnlockTransition();
}
PROC_LOG(INFO) << "delete table: " << table_->GetTableName() << " finish";
rpc_closure_->Run();
}
void DeleteTableProcedure::UpdateMetaDone(bool succ) {
if (!succ) {
PROC_LOG(WARNING) << "table: " << table_->GetTableName() << " update meta fail";
EnterPhaseWithResponseStatus(kMetaTabletError, DeleteTablePhase::kEofPhase);
return;
}
PROC_LOG(INFO) << "table: " << table_->GetTableName() << " update meta succ";
if (!MasterEnv().GetQuotaEntry()->DelRecord(table_->GetTableName())) {
PROC_LOG(WARNING) << "table: " << table_->GetTableName()
<< " delete master memory quota cache failed";
}
StatusCode code;
MasterEnv().GetTabletManager()->DeleteTable(table_->GetTableName(), &code);
EnterPhaseWithResponseStatus(kMasterOk, DeleteTablePhase::kEofPhase);
}
std::ostream& operator<<(std::ostream& o, const DeleteTablePhase& phase) {
static const char* msg[] = {"DeleteTablePhase::kPrepare", "DeleteTablePhase::kDeleteTable",
"DeleteTablePhase::kUpdateMeta", "DeleteTablePhase::kEofPhase",
"DeleteTablePhase::kUnknown"};
static uint32_t msg_size = sizeof(msg) / sizeof(const char*);
typedef std::underlying_type<DeleteTablePhase>::type UnderType;
uint32_t index =
static_cast<UnderType>(phase) - static_cast<UnderType>(DeleteTablePhase::kPrepare);
index = index < msg_size ? index : msg_size - 1;
o << msg[index];
return o;
}
}
}
| 2,026 |
666 | <filename>modules/Gyoi_CveExplorerNVD.py
#!/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import codecs
import json
import glob
import zipfile
import shutil
import ssl
import urllib3
import configparser
import pandas as pd
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
class CveExplorerNVD:
def __init__(self, utility, is_no_update):
# Read config.ini.
self.utility = utility
config = configparser.ConfigParser()
self.file_name = os.path.basename(__file__)
self.full_path = os.path.dirname(os.path.abspath(__file__))
self.root_path = os.path.join(self.full_path, '../')
config.read(os.path.join(self.root_path, 'config.ini'))
try:
self.ua = config['Common']['user-agent']
self.con_timeout = float(config['CveExplorerNVD']['con_timeout'])
self.max_cve_count = int(config['CveExplorerNVD']['max_cve_count'])
self.vuln_db_dir = config['CveExplorerNVD']['vuln_db_dir']
self.nvd_name = config['CveExplorerNVD']['nvd_name']
self.nvd_db_header = str(config['CveExplorerNVD']['nvd_db_header']).split('@')
self.nvd_year_name = config['CveExplorerNVD']['nvd_year_name']
self.nvd_db_dir = os.path.join(self.full_path, self.vuln_db_dir)
self.nvd_path = os.path.join(self.full_path, os.path.join(self.vuln_db_dir, self.nvd_name))
self.nvd_year_path = os.path.join(self.full_path, os.path.join(self.vuln_db_dir, self.nvd_year_name))
self.cve_year_list = config['CveExplorerNVD']['cve_years'].split('@')
self.nvd_meta_url = config['CveExplorerNVD']['nvd_meta_url']
self.nvd_zip_url = config['CveExplorerNVD']['nvd_zip_url']
self.nvd_chk_date_regex = config['CveExplorerNVD']['nvd_chk_date_regex']
self.nvd_chk_hash_regex = config['CveExplorerNVD']['nvd_chk_hash_regex']
self.nvd_date_format = config['CveExplorerNVD']['nvd_date_format']
self.headers = urllib3.make_headers(proxy_basic_auth=self.utility.proxy_user + ':' + self.utility.proxy_pass)
self.db_colmns = {}
self.action_name = 'CVE Explorer'
except Exception as e:
self.utility.print_message(FAIL, 'Reading config.ini is failure : {}'.format(e))
self.utility.write_log(40, 'Reading config.ini is failure : {}'.format(e))
sys.exit(1)
# Set HTTP request header.
self.http_req_header = {'User-Agent': self.ua,
'Connection': 'keep-alive',
'Accept-Language': 'ja,en-US;q=0.7,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'Cache-Control': 'no-cache'}
# Create/Get vulnerability data base.
for idx, col_name in enumerate(self.nvd_db_header):
self.db_colmns[idx] = col_name
if is_no_update is True and os.path.exists(self.nvd_path):
self.utility.print_message(WARNING, 'Skip updating vulnerability DB.')
self.utility.print_message(WARNING, 'Load existing "{}".'.format(self.nvd_path))
self.df_vuln_db = pd.read_csv(self.nvd_path, sep=',', encoding='utf-8')
else:
self.df_vuln_db = self.initialize_vuln_db()
# Extract vulnerability information from NVD.
def extract_vuln_info(self, cve_items, cve_year, last_modified_date):
self.utility.write_log(20, '[In] Extract vulnerability information [{}]'.format(self.file_name))
all_cve_list = []
# Get last modified date.
last_modified_date_value = last_modified_date
for cve_item in cve_items['CVE_Items']:
# Get problem type (ex. CWE-**).
per_cve = cve_item['cve']
problem_type_value = ''
problems = per_cve['problemtype']['problemtype_data']
for description in problems:
for problem in description['description']:
problem_type_value = problem['value']
# Get description of vulnerability.
description_value = ''
for description in per_cve['description']['description_data']:
description_value = description['value']
# Get CVSS score.
cvss_score_v2_value = ''
cvss_score_v3_value = ''
impact = cve_item['impact']
# CVSS v3 score.
if 'baseMetricV3' in impact:
cvss_score_v3_value = float(impact['baseMetricV3']['cvssV3']['baseScore'])
else:
cvss_score_v3_value = 0
# CVSS v2 score.
if 'baseMetricV2' in impact:
cvss_score_v2_value = format(impact['baseMetricV2']['cvssV2']['baseScore'])
else:
cvss_score_v2_value = 0
# Get data type and CVE id.
data_type_value = per_cve['data_type']
cve_id_value = per_cve['CVE_data_meta']['ID']
# Get configuration of CPE 2.3.
some_cpe = []
for nodes in cve_item['configurations']['nodes']:
if 'children' in nodes:
for child_node in nodes['children']:
if 'cpe_match' in child_node:
for cpe in child_node['cpe_match']:
some_cpe.append(cpe)
else:
if 'cpe_match' in nodes:
for cpe in nodes['cpe_match']:
some_cpe.append(cpe)
for per_cpe in some_cpe:
cpe23_list = per_cpe['cpe23Uri'].split(':')
category_value = cpe23_list[2]
vendor_name_value = cpe23_list[3]
product_name_value = cpe23_list[4]
version_value = cpe23_list[5]
update_value = cpe23_list[6]
edition_value = cpe23_list[7]
# Add each item to list.
self.utility.print_message(OK, 'Extract CVE information : '
'{}, Vendor={}, '
'Product={}, Version={}'.format(cve_id_value,
vendor_name_value,
product_name_value,
version_value))
per_cve_list = []
per_cve_list.append(last_modified_date_value)
per_cve_list.append(data_type_value)
per_cve_list.append(problem_type_value)
per_cve_list.append(cve_id_value)
per_cve_list.append(cvss_score_v2_value)
per_cve_list.append(cvss_score_v3_value)
per_cve_list.append(str(category_value).lower())
per_cve_list.append(str(vendor_name_value).lower())
per_cve_list.append(str(product_name_value).lower())
per_cve_list.append(str(version_value).lower())
per_cve_list.append(str(update_value).lower())
per_cve_list.append(str(edition_value).lower())
per_cve_list.append(description_value.replace('\r', ' ').replace('\n', ' '))
all_cve_list.append(per_cve_list)
# Create csv file.
db_path = self.nvd_year_path.replace('*', cve_year)
self.utility.write_log(20, 'Create yearly vulnerability database : {}.'.format(db_path))
pd.DataFrame(all_cve_list).to_csv(db_path, header=False, index=False)
self.utility.write_log(20, '[Out] Extract vulnerability information [{}]'.format(self.file_name))
# Create vulnerability yearly data base:
def create_vuln_yearly_db(self, cve_year, last_modified_date):
# Get cve list from NVD.
self.utility.write_log(20, '[In] Create yearly vulnerability database [{}]'.format(self.file_name))
target_url = self.nvd_zip_url.replace('*', cve_year)
tmp_file = os.path.join(self.nvd_db_dir, 'temp_' + cve_year + '.zip')
# Download zip file (include cve list) and uncompress zip file.
target_json_name = ''
self.utility.write_log(20, 'Accessing : {}'.format(target_url))
self.utility.print_message(OK, 'Get {} CVE list from {}'.format(cve_year, target_url))
http = None
ctx = ssl.create_default_context()
ctx.set_ciphers('DEFAULT')
# ctx.set_ciphers('DEFAULT@SECLEVEL=1')
if self.utility.proxy != '':
self.utility.print_message(WARNING, 'Set proxy server: {}'.format(self.utility.proxy))
if self.utility.proxy_user != '':
headers = urllib3.make_headers(proxy_basic_auth=self.utility.proxy_user + ':' + self.utility.proxy_pass)
http = urllib3.ProxyManager(timeout=self.con_timeout,
headers=self.http_req_header,
proxy_url=self.utility.proxy,
proxy_headers=headers)
else:
http = urllib3.ProxyManager(timeout=self.con_timeout,
headers=self.http_req_header,
proxy_url=self.utility.proxy)
else:
http = urllib3.PoolManager(timeout=self.con_timeout,
headers=self.http_req_header,
ssl_version=ssl.PROTOCOL_TLS,
ssl_context=ctx)
try:
with http.request('GET', target_url, preload_content=False) as res, open(tmp_file, 'wb') as fout:
shutil.copyfileobj(res, fout)
except Exception as e:
self.utility.print_exception(e, 'Access is failure : {}'.format(target_url))
self.utility.write_log(30, 'Accessing is failure : {}'.format(target_url))
with zipfile.ZipFile(tmp_file, 'r') as downloaded_zip:
target_json_name = downloaded_zip.namelist()[0]
downloaded_zip.extractall(self.nvd_db_dir)
os.remove(tmp_file)
# Create cve list of cve file.
yearly_cve_list = []
with codecs.open(os.path.join(self.nvd_db_dir, target_json_name), 'r', encoding='utf-8') as fin:
self.extract_vuln_info(json.loads(fin.read().replace('\0', '')), cve_year, last_modified_date)
self.utility.write_log(20, '[Out] Create yearly vulnerability database [{}]'.format(self.file_name))
return yearly_cve_list
# Initialize Vulnerability Data Base.
def initialize_vuln_db(self):
# Get vulnerabilities information.
self.utility.write_log(20, '[In] Initialize vulnerability database [{}].'.format(self.file_name))
update_flag = False
for cve_year in self.cve_year_list:
# Get last modified date and file hash.
try:
# Get meta information.
target_url = self.nvd_meta_url.replace('*', cve_year)
self.utility.print_message(OK, 'Get {} meta information from {}'.format(cve_year, target_url))
self.utility.write_log(20, 'Accessing : {}'.format(target_url))
res_meta, _, _, _, encoding = self.utility.send_request('GET', target_url)
obj_match = re.match(self.nvd_chk_date_regex, res_meta.data.decode(encoding))
last_modified_date = obj_match.group(obj_match.lastindex)
year_db = self.nvd_year_path.replace('*', cve_year)
if os.path.exists(year_db) is True:
# Get existing data base.
df_year_db = pd.read_csv(year_db,
sep=',',
names=self.nvd_db_header,
header=None,
encoding='utf-8').fillna('')
# Check last modified date.
db_cve_date = self.utility.transform_date_object(df_year_db['last_modified_date'][0],
self.nvd_date_format)
currently_cve_date = self.utility.transform_date_object(last_modified_date, self.nvd_date_format)
if db_cve_date < currently_cve_date:
# Create vulnerability data base.
self.utility.print_message(OK, 'Update {} : latest date={}, last modified date={}'.
format(year_db,
currently_cve_date.strftime(self.nvd_date_format),
db_cve_date.strftime(self.nvd_date_format)))
self.create_vuln_yearly_db(cve_year, last_modified_date)
update_flag = True
else:
self.utility.print_message(FAIL, 'Skip updating {} : no update from {}'.
format(year_db, db_cve_date.strftime(self.nvd_date_format)))
else:
# Create vulnerability data base.
self.create_vuln_yearly_db(cve_year, last_modified_date)
update_flag = True
except Exception as e:
self.utility.print_exception(e, 'Getting last modified date is failure.')
self.utility.write_log(30, 'Getting last modified date is failure.')
df_vuln_db = None
if update_flag is True:
try:
# Load updating vulnerability data base each year.
self.utility.print_message(OK, 'Create vulnerability database : {}'.format(self.nvd_path))
year_csv_list = glob.glob(os.path.join(self.nvd_db_dir, self.nvd_year_name))
# Create DataFrame.
cve_list = []
for file in year_csv_list:
cve_list.append(pd.read_csv(file, sep=',', header=None, encoding='utf-8').fillna(''))
if len(cve_list) != 0:
# Create new vulnerability data base.
df_vuln_db = pd.concat(cve_list).rename(columns=self.db_colmns).sort_values(by=['cvss_v3_score',
'cvss_v2_score'],
ascending=False)
df_vuln_db.to_csv(self.nvd_path, mode='w', index=False)
except Exception as e:
self.utility.print_exception(e, 'Creating vulnerability database is failure : {}.'.format(e))
self.utility.write_log(30, 'Creating vulnerability database is failure : {}.'.format(e))
else:
# Load existing vulnerability data base.
self.utility.print_message(OK, 'Load vulnerability database : {}'.format(self.nvd_path))
df_vuln_db = pd.read_csv(self.nvd_path, sep=',', encoding='utf-8')
self.utility.write_log(20, '[Out] Initialize vulnerability database [{}].'.format(self.file_name))
return df_vuln_db
# Explore CVE information.
def cve_explorer(self, product_list):
msg = self.utility.make_log_msg(self.utility.log_in,
self.utility.log_dis,
self.file_name,
action=self.action_name,
note='Explore CVE information',
dest=self.utility.target_host)
self.utility.write_log(20, msg)
for prod_idx, product in enumerate(product_list):
self.utility.print_message(NOTE, 'Explore CVE of {}/{} from NVD.'.format(product[1], product[2]))
df_selected_cve = None
cve_info = ''
if product[1] != '*' and product[3] != '*':
df_selected_cve = self.df_vuln_db[(self.df_vuln_db['vendor_name'] == product[1]) &
(self.df_vuln_db['product_name'] == product[2]) &
(self.df_vuln_db['version_value'] == product[3])]
elif product[1] != '*' and product[3] == '*':
df_selected_cve = self.df_vuln_db[(self.df_vuln_db['vendor_name'] == product[1]) &
(self.df_vuln_db['product_name'] == product[2])]
elif product[1] == '*' and product[3] != '*':
df_selected_cve = self.df_vuln_db[(self.df_vuln_db['product_name'] == product[2]) &
(self.df_vuln_db['version_value'] == product[3])]
else:
df_selected_cve = self.df_vuln_db[(self.df_vuln_db['product_name'] == product[2])]
for cve_idx, cve_id in enumerate(df_selected_cve['id'].drop_duplicates()):
msg = 'Find {} for {}/{} {}.'.format(cve_id, product[1], product[2], product[3])
self.utility.print_message(WARNING, msg)
msg = self.utility.make_log_msg(self.utility.log_mid,
self.utility.log_dis,
self.file_name,
action=self.action_name,
note=msg,
dest=self.utility.target_host)
self.utility.write_log(30, msg)
cve_info += cve_id + '\n'
if cve_idx == (self.max_cve_count - 1):
break
# Insert CVE to product list.
if cve_info == '':
cve_info = 'Cannot search.'
product_list[prod_idx].insert(len(product), cve_info)
msg = self.utility.make_log_msg(self.utility.log_out,
self.utility.log_dis,
self.file_name,
action=self.action_name,
note='Explore CVE information',
dest=self.utility.target_host)
self.utility.write_log(20, msg)
return product_list
| 10,955 |
15,947 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example dag for using the KubernetesPodOperator.
"""
from datetime import datetime
from kubernetes.client import models as k8s
from airflow import DAG
from airflow.kubernetes.secret import Secret
from airflow.operators.bash import BashOperator
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
# [START howto_operator_k8s_cluster_resources]
secret_file = Secret('volume', '/etc/sql_conn', 'airflow-secrets', 'sql_alchemy_conn')
secret_env = Secret('env', 'SQL_CONN', 'airflow-secrets', 'sql_alchemy_conn')
secret_all_keys = Secret('env', None, 'airflow-secrets-2')
volume_mount = k8s.V1VolumeMount(
name='test-volume', mount_path='/root/mount_file', sub_path=None, read_only=True
)
configmaps = [
k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='test-configmap-1')),
k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='test-configmap-2')),
]
volume = k8s.V1Volume(
name='test-volume',
persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name='test-volume'),
)
port = k8s.V1ContainerPort(name='http', container_port=80)
init_container_volume_mounts = [
k8s.V1VolumeMount(mount_path='/etc/foo', name='test-volume', sub_path=None, read_only=True)
]
init_environments = [k8s.V1EnvVar(name='key1', value='value1'), k8s.V1EnvVar(name='key2', value='value2')]
init_container = k8s.V1Container(
name="init-container",
image="ubuntu:16.04",
env=init_environments,
volume_mounts=init_container_volume_mounts,
command=["bash", "-cx"],
args=["echo 10"],
)
affinity = k8s.V1Affinity(
node_affinity=k8s.V1NodeAffinity(
preferred_during_scheduling_ignored_during_execution=[
k8s.V1PreferredSchedulingTerm(
weight=1,
preference=k8s.V1NodeSelectorTerm(
match_expressions=[
k8s.V1NodeSelectorRequirement(key="disktype", operator="in", values=["ssd"])
]
),
)
]
),
pod_affinity=k8s.V1PodAffinity(
required_during_scheduling_ignored_during_execution=[
k8s.V1WeightedPodAffinityTerm(
weight=1,
pod_affinity_term=k8s.V1PodAffinityTerm(
label_selector=k8s.V1LabelSelector(
match_expressions=[
k8s.V1LabelSelectorRequirement(key="security", operator="In", values="S1")
]
),
topology_key="failure-domain.beta.kubernetes.io/zone",
),
)
]
),
)
tolerations = [k8s.V1Toleration(key="key", operator="Equal", value="value")]
# [END howto_operator_k8s_cluster_resources]
with DAG(
dag_id='example_kubernetes_operator',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
tags=['example'],
) as dag:
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo", "10"],
labels={"foo": "bar"},
secrets=[secret_file, secret_env, secret_all_keys],
ports=[port],
volumes=[volume],
volume_mounts=[volume_mount],
env_from=configmaps,
name="airflow-test-pod",
task_id="task",
affinity=affinity,
is_delete_operator_pod=True,
hostnetwork=False,
tolerations=tolerations,
init_containers=[init_container],
priority_class_name="medium",
)
# [START howto_operator_k8s_private_image]
quay_k8s = KubernetesPodOperator(
namespace='default',
image='quay.io/apache/bash',
image_pull_secrets=[k8s.V1LocalObjectReference('testquay')],
cmds=["bash", "-cx"],
arguments=["echo", "10", "echo pwd"],
labels={"foo": "bar"},
name="airflow-private-image-pod",
is_delete_operator_pod=True,
in_cluster=True,
task_id="task-two",
get_logs=True,
)
# [END howto_operator_k8s_private_image]
# [START howto_operator_k8s_write_xcom]
write_xcom = KubernetesPodOperator(
namespace='default',
image='alpine',
cmds=["sh", "-c", "mkdir -p /airflow/xcom/;echo '[1,2,3,4]' > /airflow/xcom/return.json"],
name="write-xcom",
do_xcom_push=True,
is_delete_operator_pod=True,
in_cluster=True,
task_id="write-xcom",
get_logs=True,
)
pod_task_xcom_result = BashOperator(
bash_command="echo \"{{ task_instance.xcom_pull('write-xcom')[0] }}\"",
task_id="pod_task_xcom_result",
)
# [END howto_operator_k8s_write_xcom]
write_xcom >> pod_task_xcom_result
| 2,478 |
318 | <gh_stars>100-1000
/*
* Copyright Beijing 58 Information Technology Co.,Ltd.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Log.h
*
* Created on: 2011-7-6
* Author: Service Platform Architecture Team (<EMAIL>)
*/
#ifndef __LOG_H_
#define __LOG_H_
#include <time.h>
#include <stdarg.h>
#include <stdio.h>
#include <unistd.h>
#define GAEA_DEBUG 0
#define GAEA_VERBOSE 1
#define GAEA_NOTICE 2
#define GAEA_WARNING 3
#ifdef __cplusplus
extern "C" {
#endif
void gaeaLog(int level, const char *fmt, ...);
void setLogFilePath(char* pa);
#ifdef __cplusplus
}
#endif
#endif /* LOG_H_ */
| 482 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.core.http.netty;
import com.azure.core.http.HttpClient;
import com.azure.core.http.HttpMethod;
import com.azure.core.http.HttpRequest;
import com.azure.core.http.ProxyOptions;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.handler.logging.LogLevel;
import reactor.netty.resources.ConnectionProvider;
import reactor.netty.tcp.TcpClient;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.time.Duration;
/**
* Code snippets for {@link NettyAsyncHttpClientBuilder}
*/
public class NettyAsyncHttpClientBuilderJavaDocCodeSnippets {
/**
* Code snippet for simple http client instantiation.
*/
public void simpleInstantiation() {
// BEGIN: com.azure.core.http.netty.instantiation-simple
HttpClient client = new NettyAsyncHttpClientBuilder()
.port(8080)
.wiretap(true)
.build();
// END: com.azure.core.http.netty.instantiation-simple
}
/**
* Code snippet for creating http client with fixed thread pool.
*/
public void fixedThreadPoolSample() {
// BEGIN: com.azure.core.http.netty.NettyAsyncHttpClientBuilder#eventLoopGroup
int threadCount = 5;
HttpClient client = new NettyAsyncHttpClientBuilder()
.eventLoopGroup(new NioEventLoopGroup(threadCount))
.build();
// END: com.azure.core.http.netty.NettyAsyncHttpClientBuilder#eventLoopGroup
}
/**
* Code snippet for creating an HttpClient with a specified {@link ConnectionProvider}.
*/
public void connectionProviderSample() {
// BEGIN: com.azure.core.http.netty.NettyAsyncHttpClientBuilder.connectionProvider#ConnectionProvider
// The following creates a connection provider which will have each connection use the base name
// 'myHttpConnection', has a limit of 500 concurrent connections in the connection pool, has no limit on the
// number of connection requests that can be pending when all connections are in use, and removes a connection
// from the pool if the connection isn't used for 60 seconds.
ConnectionProvider connectionProvider = ConnectionProvider.builder("myHttpConnection")
.maxConnections(500)
.pendingAcquireMaxCount(-1)
.maxIdleTime(Duration.ofSeconds(60))
.build();
HttpClient client = new NettyAsyncHttpClientBuilder()
.connectionProvider(connectionProvider)
.build();
// END: com.azure.core.http.netty.NettyAsyncHttpClientBuilder.connectionProvider#ConnectionProvider
}
/**
* Code snippet for creating http client with proxy.
*/
public void proxySample() {
// BEGIN: com.azure.core.http.netty.NettyAsyncHttpClientBuilder#proxy
HttpClient client = new NettyAsyncHttpClientBuilder()
.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("<proxy-host>", 8888)))
.build();
// END: com.azure.core.http.netty.NettyAsyncHttpClientBuilder#proxy
}
/**
* Code snippet for creating a new http client based on an existing reactor netty HttpClient.
* The existing client is configured for netty level logging.
*/
public void fromExistingReactorNettyClient() {
// BEGIN: com.azure.core.http.netty.from-existing-http-client
// Creates a reactor-netty client with netty logging enabled.
reactor.netty.http.client.HttpClient baseHttpClient = reactor.netty.http.client.HttpClient.create()
.wiretap(TcpClient.class.getName(), LogLevel.INFO);
// Create an HttpClient based on above reactor-netty client and configure EventLoop count.
HttpClient client = new NettyAsyncHttpClientBuilder(baseHttpClient)
.eventLoopGroup(new NioEventLoopGroup(5))
.build();
// END: com.azure.core.http.netty.from-existing-http-client
}
/**
* Code snippet to demonstrate the use of a Netty based http client that disables buffer copy.
*/
public void disabledBufferCopyClientSample() {
HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http://localhost");
// BEGIN: com.azure.core.http.netty.disabled-buffer-copy
HttpClient client = new NettyAsyncHttpClientBuilder()
.port(8080)
.disableBufferCopy(true)
.build();
client.send(httpRequest)
.flatMapMany(response -> response.getBody())
.map(byteBuffer -> completeProcessingByteBuffer(byteBuffer))
.subscribe();
// END: com.azure.core.http.netty.disabled-buffer-copy
}
private int completeProcessingByteBuffer(ByteBuffer byteBuffer) {
return byteBuffer.remaining();
}
}
| 1,798 |
778 | <reponame>rahulyesantharao/tuplex
//--------------------------------------------------------------------------------------------------------------------//
// //
// Tuplex: Blazing Fast Python Data Science //
// //
// //
// (c) 2017 - 2021, Tuplex team //
// Created by <NAME> first on 1/1/2021 //
// License: Apache 2.0 //
//--------------------------------------------------------------------------------------------------------------------//
#ifndef TUPLEX_VIRTUALFILE_H
#define TUPLEX_VIRTUALFILE_H
#include "IFileSystemImpl.h"
#include "VirtualFileSystemBase.h"
namespace tuplex {
class VirtualFile;
class VirtualFileSystem;
/*!
* abstract base class for a file system specific Implementation
*/
class VirtualFile {
public:
VirtualFile() : _uri(""), _mode(VFS_READ) {}
VirtualFile(const URI& uri, VirtualFileMode mode) : _uri(uri), _mode(mode) {}
virtual ~VirtualFile() = default;
/*!
* (thread-safe!) writes buffer to URI location, immediate persistance
* @param buffer data buffer, must be non-null
* @param bufferSize number of bytes to write
* @return status of write operation
*/
virtual VirtualFileSystemStatus write(const void* buffer, uint64_t bufferSize) = 0;
/*!
* reads up to nbytes bytes towards buffer.
* @param buffer memory location where to store bytes
* @param nbytes maximum number of bytes to read
* @param bytesRead actual bytes read. Always set to at least 0, even in error case
* @return status of read operation
*/
virtual VirtualFileSystemStatus read(void* buffer, uint64_t nbytes, size_t *bytesRead=nullptr) const = 0;
/*!
* this functions reads only the required bytes without buffering. Might be faster, e.g. for S3 requests
* @param buffer
* @param nbytes
* @param bytesRead
* @return
*/
virtual VirtualFileSystemStatus readOnly(void* buffer, uint64_t nbytes, size_t *bytesRead=nullptr) const {
return read(buffer, nbytes, bytesRead);
}
/*!
* closes file
* @return status of close operation
*/
virtual VirtualFileSystemStatus close() = 0;
/*!
* move file cursor by delta from current position
* @param delta how many bytes to move forward (positive) or backward(negative). gets autoclmaped
* @return if seek was successful
*/
virtual VirtualFileSystemStatus seek(int64_t delta) = 0;
/*!
* returns size in bytes of file
* @return
*/
virtual size_t size() const = 0;
URI getURI() const { return _uri; }
virtual bool is_open() const = 0;
/*!
* indicates whether end-of-file was reached or not.
* @return
*/
virtual bool eof() const = 0;
protected:
URI _uri;
VirtualFileMode _mode;
};
}
#endif //TUPLEX_VIRTUALFILE_H | 1,849 |
2,690 | # -*- test-case-name: flocker.node.agents.test.test_blockdevice -*-
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
This module implements the parts of a block-device based dataset
convergence agent that can be re-used against many different kinds of block
devices.
"""
import itertools
from uuid import UUID
from stat import S_IRWXU, S_IRWXG, S_IRWXO
from errno import EEXIST
from datetime import timedelta
from eliot import MessageType, ActionType, Field, Logger
from eliot.serializers import identity
from zope.interface import implementer, Interface, provider
from pyrsistent import PClass, field, pmap_field, pset_field, thaw, CheckedPMap
from characteristic import with_cmp
from twisted.python.reflect import safe_repr
from twisted.internet.defer import succeed, fail
from twisted.python.filepath import FilePath
from twisted.python.components import proxyForInterface
from twisted.python.constants import (
Values, ValueConstant,
Names, NamedConstant,
)
from .blockdevice_manager import BlockDeviceManager
from ._logging import DATASET_ID, COUNT
from .. import (
IDeployer, ILocalState, IStateChange, in_parallel, NoOp,
)
from .._deploy import NotInUseDatasets
from ...control import NodeState, Manifestation, Dataset, NonManifestDatasets
from ...control._model import pvector_field
from ...common import RACKSPACE_MINIMUM_VOLUME_SIZE, auto_threaded, provides
from ...common.algebraic import TaggedUnionInvariant
# Eliot is transitioning away from the "Logger instances all over the place"
# approach. And it's hard to put Logger instances on PClass subclasses which
# we have a lot of. So just use this global logger for now.
_logger = Logger()
# The size which will be assigned to datasets with an unspecified
# maximum_size.
# XXX: Make this configurable. FLOC-2679
DEFAULT_DATASET_SIZE = RACKSPACE_MINIMUM_VOLUME_SIZE
# The metadata key for flocker profiles.
PROFILE_METADATA_KEY = u"clusterhq:flocker:profile"
class DatasetStates(Names):
"""
States that a ``Dataset`` can be in.
"""
# Doesn't exist yet.
NON_EXISTENT = NamedConstant()
# Existing volume not recorded as owning a dataset
UNREGISTERED = NamedConstant()
# Ownership recorded but not reported as a volume
# This can happen
# - after a volume has been deleted (since the dataset registrations are
# never cleaned up.
# - Some backends (AWS in particular) are only eventually consistent, so
# list_volumes might not report the volume even after it has already
# been created.
REGISTERED = NamedConstant()
# Exists, but attached elsewhere to a live host:
ATTACHED_ELSEWHERE = NamedConstant()
# Exists, but attached elsewhere to a dead host:
ATTACHED_TO_DEAD_NODE = NamedConstant()
# Exists, but not attached
NON_MANIFEST = NamedConstant()
# Attached to this node but no filesystem
ATTACHED_NO_FILESYSTEM = NamedConstant()
# Attached to this node, has filesystem
ATTACHED = NamedConstant()
# Mounted on this node
MOUNTED = NamedConstant()
# Deleted from the driver
DELETED = NamedConstant()
class DiscoveredDataset(PClass):
"""
Dataset as discovered by deployer.
:ivar DatasetStates state: The state this dataset was determined to be in.
:ivar int maximum_size: The maximum size of the dataset.
:param unicode blockdevice_id: The unique identifier of the
``IBlockDeviceAPI``-managed volume.
:ivar FilePath device_path: The absolute path to the block device file on
the node where the dataset is attached.
:ivar FilePath mount_point: The absolute path to the location on the node
where the dataset will be mounted.
"""
state = field(
invariant=lambda state: (state in DatasetStates.iterconstants(),
"Not a valid state"),
mandatory=True,
)
dataset_id = field(type=UUID, mandatory=True)
maximum_size = field(type=(int, long))
blockdevice_id = field(type=unicode, mandatory=True)
device_path = field(FilePath)
mount_point = field(FilePath)
__invariant__ = TaggedUnionInvariant(
tag_attribute='state',
attributes_for_tag={
DatasetStates.ATTACHED_ELSEWHERE: {'maximum_size'},
DatasetStates.ATTACHED_TO_DEAD_NODE: {'maximum_size'},
DatasetStates.NON_MANIFEST: {'maximum_size'},
DatasetStates.UNREGISTERED: {'maximum_size'},
DatasetStates.REGISTERED: set(),
DatasetStates.ATTACHED_NO_FILESYSTEM: {
'device_path', 'maximum_size'},
DatasetStates.ATTACHED: {
'device_path', 'maximum_size'},
DatasetStates.MOUNTED: {
'device_path', 'mount_point', 'maximum_size'},
},
)
class DesiredDataset(PClass):
"""
Dataset as requested by configuration and applications.
"""
state = field(
invariant=lambda state: (state in DatasetStates.iterconstants(),
"Not a valid state"),
mandatory=True,
)
dataset_id = field(type=UUID, mandatory=True)
maximum_size = field(type=(int, long))
metadata = pmap_field(
key_type=unicode,
value_type=unicode,
)
mount_point = field(FilePath)
filesystem = field(unicode, initial=u"ext4", mandatory=True,
invariant=lambda v: (v == "ext4", "Must be 'ext4'."))
__invariant__ = TaggedUnionInvariant(
tag_attribute='state',
attributes_for_tag={
DatasetStates.NON_MANIFEST: {"maximum_size"},
DatasetStates.MOUNTED: {"maximum_size", "mount_point"},
DatasetStates.DELETED: set(),
},
)
class IDatasetStateChangeFactory(Interface):
def from_state_and_config(discovered_dataset, desired_dataset):
"""
Create a state change that will bring the discovered dataset into the
state described by the desired dataset.
:param discovered_dataset: The discovered state of the dataset or
``None`` if nothing is known about the dataset.
:type discovered_dataset: ``DiscoveredDataset`` or ``NoneType``
:param DesiredDataset desired_dataset: The desired state of the
dataset or ``None`` if nothing is known about the desired state of
the dataset.
:type desired_dataset: ``DesiredDataset`` or ``NoneType``
:return: The desired state change.
:rtype: ``IStateChange``.
"""
class ICalculator(Interface):
"""
An object that can calculate the changes required to bring dataset state
and desired dataset configuration into alignment.
"""
def calculate_changes_for_datasets(
discovered_datasets, desired_datasets,
):
"""
Calculate the state changes necessary to make the local state match the
desired cluster configuration.
:param discovered_datasets: The datasets that have been discovered.
:type discovered_datasets: mapping of `dataset_id`` to
``DiscoveredDataset``.
:param desired_datasets: The datasets that are desired on this node.
:type desired_datasets: mapping of `dataset_id`` to ``DesiredDataset``.
:return: An ``IStateChange`` provider.
"""
class VolumeException(Exception):
"""
A base class for exceptions raised by ``IBlockDeviceAPI`` operations.
:param unicode blockdevice_id: The unique identifier of the
``IBlockDeviceAPI``-managed volume.
"""
def __init__(self, blockdevice_id):
if not isinstance(blockdevice_id, unicode):
raise TypeError(
'Unexpected blockdevice_id type. '
'Expected unicode. '
'Got {!r}.'.format(blockdevice_id)
)
Exception.__init__(self, blockdevice_id)
self.blockdevice_id = blockdevice_id
class UnknownVolume(VolumeException):
"""
The block device could not be found.
"""
class AlreadyAttachedVolume(VolumeException):
"""
A failed attempt to attach a block device that is already attached.
"""
class UnattachedVolume(VolumeException):
"""
An attempt was made to operate on an unattached volume but the operation
requires the volume to be attached.
"""
class DatasetExists(Exception):
"""
A ``BlockDeviceVolume`` with the requested dataset_id already exists.
"""
def __init__(self, blockdevice):
Exception.__init__(self, blockdevice)
self.blockdevice = blockdevice
class FilesystemExists(Exception):
"""
A failed attempt to create a filesystem on a block device that already has
one.
"""
def __init__(self, device):
Exception.__init__(self, device)
self.device = device
class UnknownInstanceID(Exception):
"""
Could not compute instance ID for block device.
"""
def __init__(self, blockdevice):
Exception.__init__(
self,
'Could not find valid instance ID for {}'.format(blockdevice))
self.blockdevice = blockdevice
DATASET = Field(
u"dataset",
lambda dataset: dataset.dataset_id,
u"The unique identifier of a dataset."
)
VOLUME = Field(
u"volume",
lambda volume: volume.blockdevice_id,
u"The unique identifier of a volume."
)
FILESYSTEM_TYPE = Field.forTypes(
u"filesystem_type",
[unicode],
u"The name of a filesystem."
)
MOUNTPOINT = Field(
u"mountpoint",
lambda path: path.path,
u"The absolute path to the location on the node where the dataset will be "
u"mounted.",
)
BLOCK_DEVICE_ID = Field(
u"block_device_id",
lambda id: unicode(id),
u"The unique identifier if the underlying block device."
)
BLOCK_DEVICE_SIZE = Field(
u"block_device_size",
identity,
u"The size of the underlying block device."
)
BLOCK_DEVICE_COMPUTE_INSTANCE_ID = Field(
u"block_device_compute_instance_id",
identity,
u"An identifier for the host to which the underlying block device is "
u"attached.",
)
BLOCK_DEVICE_PATH = Field(
u"block_device_path",
lambda path: path.path,
u"The system device file for an attached block device."
)
PROFILE_NAME = Field.forTypes(
u"profile_name",
[unicode],
u"The name of a profile for a volume."
)
MAXIMUM_SIZE = Field.forTypes(
u"maximum_size",
[int],
u"The maximum size of a volume.",
)
METADATA = Field(
u"metadata",
thaw,
u"The metadata of a dataset.",
)
CREATE_BLOCK_DEVICE_DATASET = ActionType(
u"agent:blockdevice:create",
[DATASET_ID, MAXIMUM_SIZE, METADATA],
[],
u"A block-device-backed dataset is being created.",
)
UNMOUNT_BLOCK_DEVICE = ActionType(
u"agent:blockdevice:unmount",
[DATASET_ID],
[],
u"A block-device-backed dataset is being unmounted.",
)
UNMOUNT_BLOCK_DEVICE_DETAILS = MessageType(
u"agent:blockdevice:unmount:details",
[BLOCK_DEVICE_ID, BLOCK_DEVICE_PATH],
u"The device file for a block-device-backed dataset has been discovered."
)
MOUNT_BLOCK_DEVICE = ActionType(
u"agent:blockdevice:mount",
[DATASET_ID, BLOCK_DEVICE_PATH],
[],
u"A block-device-backed dataset is being mounted.",
)
MOUNT_BLOCK_DEVICE_DETAILS = MessageType(
u"agent:blockdevice:mount:details",
[BLOCK_DEVICE_PATH],
u"The device file for a block-device-backed dataset has been discovered."
)
ATTACH_VOLUME = ActionType(
u"agent:blockdevice:attach_volume",
[DATASET_ID, BLOCK_DEVICE_ID],
[],
u"The volume for a block-device-backed dataset is being attached."
)
DETACH_VOLUME = ActionType(
u"agent:blockdevice:detach_volume",
[DATASET_ID, BLOCK_DEVICE_ID],
[],
u"The volume for a block-device-backed dataset is being detached."
)
DESTROY_VOLUME = ActionType(
u"agent:blockdevice:destroy_volume",
[BLOCK_DEVICE_ID],
[],
u"The volume for a block-device-backed dataset is being destroyed."
)
CREATE_FILESYSTEM = ActionType(
u"agent:blockdevice:create_filesystem",
[BLOCK_DEVICE_PATH, FILESYSTEM_TYPE],
[],
u"A block device is being initialized with a filesystem.",
)
INVALID_DEVICE_PATH_VALUE = Field(
u"invalid_value",
lambda value: safe_repr(value),
u"A value returned from IBlockDeviceAPI.get_device_path which could not "
u"possibly be correct. This likely indicates a bug in the "
"IBlockDeviceAPI implementation.",
)
INVALID_DEVICE_PATH = MessageType(
u"agent:blockdevice:discover_state:invalid_device_path",
[DATASET_ID, INVALID_DEVICE_PATH_VALUE],
u"The device path given by the IBlockDeviceAPI implementation was "
u"invalid.",
)
CREATE_VOLUME_PROFILE_DROPPED = MessageType(
u"agent:blockdevice:profiles:create_volume_with_profiles:profile_dropped",
[DATASET_ID, PROFILE_NAME],
u"The profile of a volume was dropped during creation because the backend "
u"does not support profiles. Use a backend that provides "
u"IProfiledBlockDeviceAPI to get profile support."
)
DISCOVERED_RAW_STATE = MessageType(
u"agent:blockdevice:raw_state",
[Field(u"raw_state", safe_repr)],
u"The discovered raw state of the node's block device volumes.")
UNREGISTERED_VOLUME_ATTACHED = MessageType(
u"agent:blockdevice:unregistered_volume_attached",
[DATASET_ID, BLOCK_DEVICE_ID],
u"A blockdevice that isn't registered as belonging to a dataset is "
u"attached to an instance."
)
FUNCTION_NAME = Field.for_types(
"function", [bytes, unicode],
u"The name of the function.")
CALL_LIST_VOLUMES = MessageType(
u"flocker:node:agents:blockdevice:list_volumes",
[FUNCTION_NAME, COUNT],
u"list_volumes called.",)
REGISTER_BLOCKDEVICE = ActionType(
u"agent:blockdevice:register",
[DATASET_ID, BLOCK_DEVICE_ID],
[],
u"A block-device is being registered as belonging to a dataset.",
)
def _volume_field():
"""
Create and return a ``PClass`` ``field`` to hold a ``BlockDeviceVolume``.
"""
return field(
type=BlockDeviceVolume, mandatory=True,
# Disable the automatic PClass.create factory. Callers can just
# supply the right type, we don't need the magic coercion behavior
# supplied by default.
factory=lambda x: x
)
@with_cmp(["blockdevice_id", "dataset_id", "size", "attached_to"])
class BlockDeviceVolume(PClass):
"""
A block device that may be attached to a host.
:ivar unicode blockdevice_id: An identifier for the block device which is
unique across the entire cluster. For example, an EBS volume
identifier (``vol-4282672b``). This is used to address the block
device for operations like attach and detach.
:ivar int size: The size, in bytes, of the block device.
:ivar unicode attached_to: An opaque identifier for the node to which the
volume is attached or ``None`` if it is currently unattached. The
identifier is supplied by the ``IBlockDeviceAPI.compute_instance_id``
method based on the underlying infrastructure services (for example, if
the cluster runs on AWS, this is very likely an EC2 instance id).
:ivar UUID dataset_id: The Flocker dataset ID associated with this volume.
"""
blockdevice_id = field(type=unicode, mandatory=True)
size = field(type=(int, long), mandatory=True)
attached_to = field(
type=(unicode, type(None)), initial=None, mandatory=True
)
dataset_id = field(type=UUID, mandatory=True)
def _blockdevice_volume_from_datasetid(volumes, dataset_id):
"""
A helper to get the volume for a given dataset_id.
:param list volumes: The ``BlockDeviceVolume`` instances to inspect for a
match.
:param UUID dataset_id: The identifier of the dataset the volume of which
to find.
:return: Either a ``BlockDeviceVolume`` matching the given ``dataset_id``
or ``None`` if no such volume can be found.
"""
for volume in volumes:
if volume.dataset_id == dataset_id:
return volume
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class CreateFilesystem(PClass):
"""
Create a filesystem on a block device.
:ivar FilePath device: The device on which to create the filesystem.
:ivar unicode filesystem: The name of the filesystem type to create. For
example, ``u"ext4"``.
"""
device = field(type=FilePath, mandatory=True)
filesystem = field(type=unicode, mandatory=True)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(
device=discovered_dataset.device_path,
filesystem=desired_dataset.filesystem,
)
@property
def eliot_action(self):
return CREATE_FILESYSTEM(
_logger, block_device_path=self.device,
filesystem_type=self.filesystem
)
def run(self, deployer, state_persister):
try:
_ensure_no_filesystem(self.device, deployer.block_device_manager)
deployer.block_device_manager.make_filesystem(self.device,
self.filesystem)
except:
return fail()
return succeed(None)
def _ensure_no_filesystem(device, block_device_manager):
"""
Raises an error if there's already a filesystem on ``device``.
:param FilePath device: The path to the device to query.
:param block_device_manager: The ``IBlockDeviceManager`` implementer to use
for managing blockdevices on this machine.
:raises: ``FilesystemExists`` if there is already a filesystem on
``device``.
:return: ``None``
"""
if block_device_manager.has_filesystem(device):
raise FilesystemExists(device)
def _valid_size(size):
"""
Pyrsistent invariant for filesystem size, which must be a multiple of 1024
bytes.
"""
if size % 1024 == 0:
return (True, "")
return (
False, "Filesystem size must be multiple of 1024, not %d" % (size,)
)
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class MountBlockDevice(PClass):
"""
Mount the filesystem mounted from the block device backed by a particular
volume.
:ivar UUID dataset_id: The unique identifier of the dataset associated with
the filesystem to mount.
:ivar FilePath device_path: The path of the block device to be mounted.
:ivar FilePath mountpoint: The filesystem location at which to mount the
volume's filesystem. If this does not exist, it is created.
"""
device_path = field(type=FilePath, mandatory=True)
mountpoint = field(type=FilePath, mandatory=True)
# Only for logging
dataset_id = field(type=UUID, mandatory=True)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(
dataset_id=desired_dataset.dataset_id,
device_path=discovered_dataset.device_path,
mountpoint=desired_dataset.mount_point,
)
@property
def eliot_action(self):
return MOUNT_BLOCK_DEVICE(_logger, dataset_id=self.dataset_id,
block_device_path=self.device_path)
def run(self, deployer, state_persister):
"""
Run the system ``mount`` tool to mount this change's volume's block
device. The volume must be attached to this node.
"""
# Create the directory where a device will be mounted.
# The directory's parent's permissions will be set to only allow access
# by owner, to limit access by other users on the node.
try:
self.mountpoint.makedirs()
except OSError as e:
if e.errno != EEXIST:
return fail()
self.mountpoint.parent().chmod(S_IRWXU)
# This should be asynchronous. FLOC-1797
deployer.block_device_manager.mount(self.device_path, self.mountpoint)
# Remove lost+found to ensure filesystems always start out empty.
# Mounted filesystem is also made world
# writeable/readable/executable since we can't predict what user a
# container will run as. We make sure we change mounted
# filesystem's root directory permissions, so we only do this
# after the filesystem is mounted. If other files exist we don't
# bother with either change, since at that point user has modified
# the volume and we don't want to undo their changes by mistake
# (e.g. postgres doesn't like world-writeable directories once
# it's initialized).
# A better way is described in
# https://clusterhq.atlassian.net/browse/FLOC-2074
lostfound = self.mountpoint.child(b"lost+found")
if self.mountpoint.children() == [lostfound]:
lostfound.remove()
self.mountpoint.chmod(S_IRWXU | S_IRWXG | S_IRWXO)
self.mountpoint.restat()
return succeed(None)
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class UnmountBlockDevice(PClass):
"""
Unmount the filesystem mounted from the block device backed by a particular
volume.
:ivar UUID dataset_id: The unique identifier of the dataset associated with
the filesystem to unmount.
:ivar unicode blockdevice_id: The unique identifier of the mounted
``IBlockDeviceAPI``-managed volume to be unmounted.
"""
dataset_id = field(type=UUID, mandatory=True)
blockdevice_id = field(type=unicode, mandatory=True)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(
dataset_id=discovered_dataset.dataset_id,
blockdevice_id=discovered_dataset.blockdevice_id,
)
@property
def eliot_action(self):
return UNMOUNT_BLOCK_DEVICE(_logger, dataset_id=self.dataset_id)
def run(self, deployer, state_persister):
"""
Run the system ``unmount`` tool to unmount this change's volume's block
device. The volume must be attached to this node and the corresponding
block device mounted.
"""
api = deployer.async_block_device_api
deferred_device_path = api.get_device_path(self.blockdevice_id)
def got_device(device):
UNMOUNT_BLOCK_DEVICE_DETAILS(
block_device_id=self.blockdevice_id,
block_device_path=device
).write(_logger)
# This should be asynchronous. FLOC-1797
deployer.block_device_manager.unmount(device)
deferred_device_path.addCallback(got_device)
return deferred_device_path
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class AttachVolume(PClass):
"""
Attach an unattached volume to this node (the node of the deployer it is
run with).
:ivar UUID dataset_id: The unique identifier of the dataset associated with
the volume to attach.
:ivar unicode blockdevice_id: The unique identifier of the
``IBlockDeviceAPI``-managed volume to be attached.
"""
dataset_id = field(type=UUID, mandatory=True)
blockdevice_id = field(type=unicode, mandatory=True)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(
dataset_id=discovered_dataset.dataset_id,
blockdevice_id=discovered_dataset.blockdevice_id,
)
@property
def eliot_action(self):
return ATTACH_VOLUME(_logger, dataset_id=self.dataset_id,
block_device_id=self.blockdevice_id)
def run(self, deployer, state_persister):
"""
Use the deployer's ``IBlockDeviceAPI`` to attach the volume.
"""
api = deployer.async_block_device_api
getting_id = api.compute_instance_id()
def got_compute_id(compute_instance_id):
return api.attach_volume(
self.blockdevice_id,
attach_to=compute_instance_id,
)
attaching = getting_id.addCallback(got_compute_id)
return attaching
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class DetachVolume(PClass):
"""
Detach a volume from the node it is currently attached to.
:ivar UUID dataset_id: The unique identifier of the dataset associated with
the volume to detach.
:ivar unicode blockdevice_id: The unique identifier of the
``IBlockDeviceAPI``-managed volume to be detached.
"""
dataset_id = field(type=UUID, mandatory=True)
blockdevice_id = field(type=unicode, mandatory=True)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(
dataset_id=discovered_dataset.dataset_id,
blockdevice_id=discovered_dataset.blockdevice_id,
)
@property
def eliot_action(self):
return DETACH_VOLUME(_logger, dataset_id=self.dataset_id,
block_device_id=self.blockdevice_id)
def run(self, deployer, state_persister):
"""
Use the deployer's ``IBlockDeviceAPI`` to detach the volume.
"""
api = deployer.async_block_device_api
return api.detach_volume(self.blockdevice_id)
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class DestroyVolume(PClass):
"""
Destroy the storage (and therefore contents) of a volume.
:ivar unicode blockdevice_id: The unique identifier of the
``IBlockDeviceAPI``-managed volume to be destroyed.
"""
blockdevice_id = field(type=unicode, mandatory=True)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(blockdevice_id=discovered_dataset.blockdevice_id)
@property
def eliot_action(self):
return DESTROY_VOLUME(_logger, block_device_id=self.blockdevice_id)
def run(self, deployer, state_persister):
"""
Use the deployer's ``IBlockDeviceAPI`` to destroy the volume.
"""
api = deployer.async_block_device_api
return api.destroy_volume(self.blockdevice_id)
def allocated_size(allocation_unit, requested_size):
"""
Round ``requested_size`` up to the nearest ``allocation_unit``.
:param int allocation_unit: The interval in ``bytes`` to which
``requested_size`` will be rounded up.
:param int requested_size: The size in ``bytes`` that is required.
:return: The ``allocated_size`` in ``bytes``.
"""
allocation_unit = int(allocation_unit)
requested_size = int(requested_size)
previous_interval_size = (
(requested_size // allocation_unit) * allocation_unit
)
if previous_interval_size < requested_size:
return previous_interval_size + allocation_unit
else:
return requested_size
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class CreateBlockDeviceDataset(PClass):
"""
An operation to create a new dataset on a newly created volume with a newly
initialized filesystem.
:ivar Dataset dataset: The dataset for which to create a block device.
"""
dataset_id = field(UUID, mandatory=True)
maximum_size = field(type=(int, long), mandatory=True)
metadata = pmap_field(unicode, unicode)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(
dataset_id=desired_dataset.dataset_id,
maximum_size=desired_dataset.maximum_size,
metadata=desired_dataset.metadata,
)
@property
def eliot_action(self):
return CREATE_BLOCK_DEVICE_DATASET(
_logger,
dataset_id=self.dataset_id,
maximum_size=self.maximum_size,
metadata=self.metadata,
)
def _create_volume(self, deployer):
"""
Create the volume using the backend API. This method will create the
volume with a profile if the metadata on the volume suggests that we
should.
We should consider splitting this into two separate IStateChanges,
one for creating a volume with a profile, and one for creating a
volume without a profile.
:param deployer: The deployer to use to create the volume.
:returns: The created ``BlockDeviceVolume``.
"""
api = deployer.block_device_api
profile_name = self.metadata.get(PROFILE_METADATA_KEY)
size = allocated_size(allocation_unit=api.allocation_unit(),
requested_size=self.maximum_size)
if profile_name:
return (
deployer.profiled_blockdevice_api.create_volume_with_profile(
dataset_id=self.dataset_id,
size=size,
profile_name=profile_name
)
)
else:
return api.create_volume(dataset_id=self.dataset_id, size=size)
def run(self, deployer, state_persister):
"""
Create a block device, attach it to the local host, create an ``ext4``
filesystem on the device and mount it.
Operations are performed synchronously.
See ``IStateChange.run`` for general argument and return type
documentation.
:returns: An already fired ``Deferred`` with result ``None`` or a
failed ``Deferred`` with a ``DatasetExists`` exception if a
blockdevice with the required dataset_id already exists.
"""
api = deployer.block_device_api
try:
check_for_existing_dataset(api, self.dataset_id)
except:
return fail()
return self._create_volume(deployer)
@implementer(IStateChange)
@provider(IDatasetStateChangeFactory)
class RegisterVolume(PClass):
"""
Register a blockdevice volume as belonging to a particular dataset.
If we have a volume in the output of list_volumes() that is associated to
a dataset that doesn't have a mapping in the blockdevice ownership
registry, send a command to the control service indicating this mapping.
Since control service registry won't allow more than one mapping, if there
are two block device volumes we end up picking one, ensuring no
ambiguity. This is both the crash recovery optimization (allowing us to
reuse already created block devices) and also the upgrade mechanism from
time when registry didn't exist.
:ivar UUID dataset_id: The unique identifier of the dataset to have a
blockdevice registered to it.
:ivar unicode blockdevice_id: The unique identifier of the mounted
``IBlockDeviceAPI``-managed volume to be registered as belonging to the
dataset.
"""
dataset_id = field(type=UUID, mandatory=True)
blockdevice_id = field(type=unicode, mandatory=True)
@classmethod
def from_state_and_config(cls, discovered_dataset, desired_dataset):
return cls(
dataset_id=discovered_dataset.dataset_id,
blockdevice_id=discovered_dataset.blockdevice_id,
)
@property
def eliot_action(self):
return REGISTER_BLOCKDEVICE(
dataset_id=self.dataset_id,
block_device_id=self.blockdevice_id,
)
def run(self, deployer, state_persister):
return state_persister.record_ownership(
dataset_id=self.dataset_id,
blockdevice_id=self.blockdevice_id,
)
class IBlockDeviceAsyncAPI(Interface):
"""
Common operations provided by all block device backends, exposed via
asynchronous methods.
"""
def allocation_unit():
"""
See ``IBlockDeviceAPI.allocation_unit``.
:returns: A ``Deferred`` that fires with ``int`` size of the
allocation_unit.
"""
def compute_instance_id():
"""
See ``IBlockDeviceAPI.compute_instance_id``.
:returns: A ``Deferred`` that fires with ``unicode`` of a
provider-specific node identifier which identifies the node where
the method is run, or fails with ``UnknownInstanceID`` if we cannot
determine the identifier.
"""
def create_volume(dataset_id, size):
"""
See ``IBlockDeviceAPI.create_volume``.
:returns: A ``Deferred`` that fires with a ``BlockDeviceVolume`` when
the volume has been created.
"""
def destroy_volume(blockdevice_id):
"""
See ``IBlockDeviceAPI.destroy_volume``.
:return: A ``Deferred`` that fires when the volume has been destroyed.
"""
def attach_volume(blockdevice_id, attach_to):
"""
See ``IBlockDeviceAPI.attach_volume``.
:returns: A ``Deferred`` that fires with a ``BlockDeviceVolume`` with a
``attached_to`` attribute set to ``attach_to``.
"""
def detach_volume(blockdevice_id):
"""
See ``BlockDeviceAPI.detach_volume``.
:returns: A ``Deferred`` that fires when the volume has been detached.
"""
def list_volumes():
"""
See ``BlockDeviceAPI.list_volume``.
:returns: A ``Deferred`` that fires with a ``list`` of
``BlockDeviceVolume``\ s.
"""
def get_device_path(blockdevice_id):
"""
See ``BlockDeviceAPI.get_device_path``.
:returns: A ``Deferred`` that fires with a ``FilePath`` for the device.
"""
class IBlockDeviceAPI(Interface):
"""
Common operations provided by all block device backends, exposed via
synchronous methods.
This will be used by the dataset agent convergence loop, which works
more or less as follows:
1. Call ``list_volumes`` to discover the state of the volumes.
2. Compare the state to the required configuration, calculate
necessary actions (which will involve calls to ``create_volume``
and other methods that change the backend state).
3. Run the actions.
4. Go to step 1.
What this means is that if an action fails for some reason it will be
retried in the next iteration of the convergence loop. Automatic retry
on errors is therefore unnecessary insofar as operations will be
retried by the convergence loop. The methods do need to be able to
deal with intermediate states not exposed by the API, however, by
automatically recovering when they are encountered.
For example, let's imagine ``attach_volume`` internally takes two
steps and might fail between the first and the
second. ``list_volumes`` should list a volume in that intermediate
state as unattached, which will result in ``attach_volume`` being
called again. ``attach_volume`` should then be able to recognize the
intermediate state and skip the first step in attachment and only run
the second step.
Other implementation hints:
* The factory function that creates this instance will be called with
a unique cluster ID (see
https://flocker-docs.clusterhq.com/en/latest/gettinginvolved/plugins/building-driver.html).
If possible it's worth creating volumes with that cluster ID stored
as metadata, so you can filter results from the backend and only
include relevant volumes. This allows sharing the same storage
backend between multiple Flocker clusters.
* Avoid infinite loops. If an operation's time-to-finish is uncertain
then use a timeout.
* Logging the calls between your implementation and the backend with
the Eliot logging library will allow for easier debugging.
"""
def allocation_unit():
"""
The size in bytes up to which ``IDeployer`` will round volume
sizes before calling ``IBlockDeviceAPI.create_volume``.
:rtype: ``int``
"""
def compute_instance_id():
"""
Get the backend-specific identifier for this node.
This will be compared against ``BlockDeviceVolume.attached_to``
to determine which volumes are locally attached and it will be used
with ``attach_volume`` to locally attach volumes.
:raise UnknownInstanceID: If we cannot determine the identifier of the
node.
:returns: A ``unicode`` object giving a provider-specific node
identifier which identifies the node where the method is run.
"""
def create_volume(dataset_id, size):
"""
Create a new volume.
When called by ``IDeployer``, the supplied size will be
rounded up to the nearest
``IBlockDeviceAPI.allocation_unit()``
If the backend supports human-facing volumes names (i.e. names
that show up in management UIs) then it is recommended that the
newly created volume should be given a name that contains the
dataset_id in order to ease debugging. Some implementations may
choose not to do so or may not be able to do so.
The dataset_id must be stored by backend and reported by the API, but
it is not the canonical location of information (which is the control
service registry).
:param UUID dataset_id: The Flocker dataset ID of the dataset on this
volume.
:param int size: The size of the new volume in bytes.
:returns: A ``BlockDeviceVolume``.
"""
def destroy_volume(blockdevice_id):
"""
Destroy an existing volume.
:param unicode blockdevice_id: The unique identifier for the volume to
destroy.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:return: ``None``
"""
def attach_volume(blockdevice_id, attach_to):
"""
Attach ``blockdevice_id`` to the node indicated by ``attach_to``.
:param unicode blockdevice_id: The unique identifier for the block
device being attached.
:param unicode attach_to: An identifier like the one returned by the
``compute_instance_id`` method indicating the node to which to
attach the volume.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
already attached.
:returns: A ``BlockDeviceVolume`` with a ``attached_to`` attribute set
to ``attach_to``.
"""
def detach_volume(blockdevice_id):
"""
Detach ``blockdevice_id`` from whatever host it is attached to.
:param unicode blockdevice_id: The unique identifier for the block
device being detached.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to anything.
:returns: ``None``
"""
def list_volumes():
"""
List all the block devices available via the back end API.
Only volumes for this particular Flocker cluster should be included.
Make sure you can list large numbers of volumes. E.g. some cloud
APIs have a hard limit on how many volumes they include in a
result, and therefore require the use of paging to get all volumes
listed.
:returns: A ``list`` of ``BlockDeviceVolume``s.
"""
def get_device_path(blockdevice_id):
"""
Return the device path that has been allocated to the block device on
the host to which it is currently attached.
Returning the wrong value here can lead to data loss or corruption
if a container is started with an unexpected volume. Make very
sure you are returning the correct result.
:param unicode blockdevice_id: The unique identifier for the block
device.
:raises UnknownVolume: If the supplied ``blockdevice_id`` does not
exist.
:raises UnattachedVolume: If the supplied ``blockdevice_id`` is
not attached to a host.
:returns: A ``FilePath`` for the device.
"""
class ICloudAPI(Interface):
"""
Additional functionality provided specifically by cloud-based block
device providers.
In particular the presumption is that nodes are also managed by a
centralized infrastructure.
This is specifically designed for cloud systems where shut down nodes
continue to have volumes attached to them.
"""
def list_live_nodes():
"""
Return the compute IDs of all nodes that are currently up and running.
This is used to figure out which nodes are dead, so that other
nodes can do the detach.
:returns: A collection of ``unicode`` compute instance IDs, compatible
with those returned by ``IBlockDeviceAPI.compute_instance_id``.
"""
def start_node(node_id):
"""
Start up a node that has been shutdown.
This is only used by acceptance tests, so has no direct functional
tests.
This is done on a best effort basis. Error conditions are not tested
for or even necessarily raised if they occur.
:param unicode node_id: The compute node ID to startup.
"""
@auto_threaded(ICloudAPI, "_reactor", "_sync", "_threadpool")
class _SyncToThreadedAsyncCloudAPIAdapter(PClass):
"""
Adapt any ``ICloudAPI`` to the same interface but with
``Deferred``-returning methods by running those methods in a thread
pool.
:ivar _reactor: The reactor, providing ``IReactorThreads``.
:ivar _sync: The ``ICloudAPI`` provider.
:ivar _threadpool: ``twisted.python.threadpool.ThreadPool`` instance.
"""
_reactor = field()
_sync = field()
_threadpool = field()
class MandatoryProfiles(Values):
"""
Mandatory Storage Profiles to be implemented by ``IProfiledBlockDeviceAPI``
implementers. These will have driver-specific meaning, with the following
desired meaning:
:ivar GOLD: The profile for fast storage.
:ivar SILVER: The profile for intermediate/default storage.
:ivar BRONZE: The profile for cheap storage.
:ivar DEFAULT: The default profile if none is specified.
"""
GOLD = ValueConstant(u'gold')
SILVER = ValueConstant(u'silver')
BRONZE = ValueConstant(u'bronze')
DEFAULT = ValueConstant(BRONZE.value)
class IProfiledBlockDeviceAPI(Interface):
"""
An interface for drivers that are capable of creating volumes with a
specific profile.
"""
def create_volume_with_profile(dataset_id, size, profile_name):
"""
Create a new volume with the specified profile.
When called by ``IDeployer``, the supplied size will be
rounded up to the nearest ``IBlockDeviceAPI.allocation_unit()``.
:param UUID dataset_id: The Flocker dataset ID of the dataset on this
volume.
:param int size: The size of the new volume in bytes.
:param unicode profile_name: The name of the storage profile for this
volume.
:returns: A ``BlockDeviceVolume`` of the newly created volume.
"""
@implementer(IProfiledBlockDeviceAPI)
class ProfiledBlockDeviceAPIAdapter(PClass):
"""
Adapter class to create ``IProfiledBlockDeviceAPI`` providers for
``IBlockDeviceAPI`` implementations that do not implement
``IProfiledBlockDeviceAPI``
:ivar _blockdevice_api: The ``IBlockDeviceAPI`` provider to back the
volume creation.
"""
_blockdevice_api = field(
mandatory=True,
invariant=provides(IBlockDeviceAPI),
)
def create_volume_with_profile(self, dataset_id, size, profile_name):
"""
Reverts to constructing a volume with no profile. To be used with
backends that do not implement ``IProfiledBlockDeviceAPI``, but do
implement ``IBlockDeviceAPI``.
"""
CREATE_VOLUME_PROFILE_DROPPED(dataset_id=dataset_id,
profile_name=profile_name).write()
return self._blockdevice_api.create_volume(dataset_id=dataset_id,
size=size)
@implementer(IBlockDeviceAsyncAPI)
@auto_threaded(IBlockDeviceAPI, "_reactor", "_sync", "_threadpool")
class _SyncToThreadedAsyncAPIAdapter(PClass):
"""
Adapt any ``IBlockDeviceAPI`` to ``IBlockDeviceAsyncAPI`` by running its
methods in threads of a thread pool.
"""
_reactor = field()
_sync = field()
_threadpool = field()
@classmethod
def from_api(cls, block_device_api, reactor=None):
if reactor is None:
from twisted.internet import reactor
return cls(
_sync=block_device_api,
_reactor=reactor,
_threadpool=reactor.getThreadPool(),
)
def log_list_volumes(function):
"""
Decorator to count calls to list_volumes.
:param func function: The function to call.
:return: A function which will call the method and do
the extra logging.
"""
counter = itertools.count(1)
def _count_calls(*args, **kwargs):
"""
Run given function with count.
"""
CALL_LIST_VOLUMES(
function=function.__name__, count=next(counter)
).write()
return function(*args, **kwargs)
return _count_calls
@log_list_volumes
def check_for_existing_dataset(api, dataset_id):
"""
:param IBlockDeviceAPI api: The ``api`` for listing the existing volumes.
:param UUID dataset_id: The dataset_id to check for.
:raises: ``DatasetExists`` if there is already a ``BlockDeviceVolume`` with
the supplied ``dataset_id``.
"""
volumes = api.list_volumes()
for volume in volumes:
if volume.dataset_id == dataset_id:
raise DatasetExists(volume)
@log_list_volumes
def get_blockdevice_volume(api, blockdevice_id):
"""
Find a ``BlockDeviceVolume`` matching the given identifier.
:param unicode blockdevice_id: The backend identifier of the volume to
find.
:raise UnknownVolume: If no volume with a matching identifier can be
found.
:return: The ``BlockDeviceVolume`` that matches.
"""
for volume in api.list_volumes():
if volume.blockdevice_id == blockdevice_id:
return volume
raise UnknownVolume(blockdevice_id)
def _manifestation_from_volume(volume):
"""
:param BlockDeviceVolume volume: The block device which has the
manifestation of a dataset.
:returns: A primary ``Manifestation`` of a ``Dataset`` with the same id as
the supplied ``BlockDeviceVolume``.
"""
dataset = Dataset(
dataset_id=volume.dataset_id,
maximum_size=volume.size,
)
return Manifestation(dataset=dataset, primary=True)
class RawState(PClass):
"""
The raw state of a node.
:param unicode compute_instance_id: The identifier for this node.
:param _live_instances: ``pset`` of the identifiers of live nodes, or
``None`` if unknown.
:param volumes: List of all volumes in the cluster.
:type volumes: ``pvector`` of ``BlockDeviceVolume``
:param devices: Mapping from dataset UUID to block device path containing
filesystem of that dataset, on this particular node.
:type devices: ``pmap`` of ``UUID`` to ``FilePath``
:param system_mounts: Mapping of block device path to mount point of all
mounts on this particular node.
:type system_mounts: ``pmap`` of ``FilePath`` to ``FilePath``.
:param devices_with_filesystems: ``PSet`` of ``FilePath`` including
those devices that have filesystems on this node.
"""
compute_instance_id = field(unicode, mandatory=True)
_live_instances = pset_field(unicode, optional=True)
volumes = pvector_field(BlockDeviceVolume)
devices = pmap_field(UUID, FilePath)
system_mounts = pmap_field(FilePath, FilePath)
devices_with_filesystems = pset_field(FilePath)
def is_known_dead_instance(self, instance_id):
"""
If the node identified by given ID is dead, return ``True``. If it's
alive or unknown, return ``False``.
@param unicode instance_id: Node identifier.
@return: Whether instance is known to be dead.
"""
if self._live_instances is None:
return False
return instance_id not in self._live_instances
@implementer(ILocalState)
class BlockDeviceDeployerLocalState(PClass):
"""
An ``ILocalState`` implementation for the ``BlockDeviceDeployer``.
:ivar unicode hostname: The IP address of the node that has this is the
state for.
:ivar UUID node_uuid: The UUID of the node that this is the state for.
:ivar datasets: The datasets discovered from this node.
:ivar volumes: A ``PVector`` of ``BlockDeviceVolume`` instances for all
volumes in the cluster that this node is aware of.
"""
hostname = field(type=unicode, mandatory=True)
node_uuid = field(type=UUID, mandatory=True)
datasets = pmap_field(UUID, DiscoveredDataset)
def shared_state_changes(self):
"""
Returns the NodeState and the NonManifestDatasets of the local state.
These are the only parts of the state that need to be sent to the
control service.
"""
# XXX The structure of the shared state changes reflects the model
# currently used by the control service. However, that model doesn't
# seem to actually match what any consumer wants.
manifestations = {}
paths = {}
devices = {}
nonmanifest_datasets = {}
for dataset in self.datasets.values():
dataset_id = dataset.dataset_id
if dataset.state == DatasetStates.MOUNTED:
manifestations[unicode(dataset_id)] = Manifestation(
dataset=Dataset(
dataset_id=dataset_id,
maximum_size=dataset.maximum_size,
),
primary=True,
)
paths[unicode(dataset_id)] = dataset.mount_point
elif dataset.state in (
DatasetStates.NON_MANIFEST, DatasetStates.ATTACHED,
DatasetStates.ATTACHED_NO_FILESYSTEM,
DatasetStates.ATTACHED_TO_DEAD_NODE,
):
nonmanifest_datasets[unicode(dataset_id)] = Dataset(
dataset_id=dataset_id,
maximum_size=dataset.maximum_size,
)
if dataset.state in (
DatasetStates.MOUNTED, DatasetStates.ATTACHED,
DatasetStates.ATTACHED_NO_FILESYSTEM,
):
devices[dataset_id] = dataset.device_path
return (
NodeState(
uuid=self.node_uuid,
hostname=self.hostname,
manifestations=manifestations,
paths=paths,
devices=devices,
applications=None,
),
NonManifestDatasets(
datasets=nonmanifest_datasets
),
)
def _provides_IDatasetStateChangeFactory(k, v):
return provides(IDatasetStateChangeFactory)(v)
class TransitionTable(CheckedPMap):
"""
Mapping from desired and discovered dataset state to
``IDatasetStateChangeFactory``.
"""
__key_type__ = NamedConstant
class __value_type__(CheckedPMap):
__key_type__ = NamedConstant
__invariant__ = _provides_IDatasetStateChangeFactory
# If we've nothing to do we want to sleep for no more than a minute:
NOTHING_TO_DO = NoOp(sleep=timedelta(seconds=60))
@provider(IDatasetStateChangeFactory)
class DoNothing(PClass):
"""
Build a no-op ``IStateChange`` from dataset state.
"""
@staticmethod
def from_state_and_config(discovered_dataset, desired_dataset):
return NOTHING_TO_DO
@provider(IDatasetStateChangeFactory)
class Poll(PClass):
"""
Wake up more frequently to see if remote node has detached a volume we
wish to attach.
This polling will not be necessary once FLOC-3834 is done, since we
will no longer conflate local and remote state. Remote updates will
therefore suffice to wake us up immediately once the remote node
detaches the volume.
"""
@staticmethod
def from_state_and_config(discovered_dataset, desired_dataset):
return NoOp(sleep=timedelta(seconds=3))
# Mapping from desired and discovered dataset state to
# IStateChange factory. (The factory is expected to take
# ``desired_dataset`` and ``discovered_dataset``.
Desired = Discovered = DatasetStates
DATASET_TRANSITIONS = TransitionTable.create({
Desired.MOUNTED: {
Discovered.NON_EXISTENT: CreateBlockDeviceDataset,
# Other node will need to detach first, but we need to wake up to
# notice that it has detached until FLOC-3834 makes that info part
# of cluster state. So we need to poll... but not too often:
Discovered.ATTACHED_ELSEWHERE: Poll,
# Some backends (AWS in particular) are only eventually consistent, so
# list_volumes might not report the volume even after it has already
# been created. We poll waiting for the backend to start reporting the
# created volume.
Discovered.REGISTERED: Poll,
Discovered.UNREGISTERED: RegisterVolume,
Discovered.ATTACHED_NO_FILESYSTEM: CreateFilesystem,
Discovered.NON_MANIFEST: AttachVolume,
Discovered.ATTACHED: MountBlockDevice,
Discovered.ATTACHED_TO_DEAD_NODE: DetachVolume,
},
Desired.NON_MANIFEST: {
# XXX FLOC-2206
# Can't create non-manifest datasets yet.
Discovered.NON_EXISTENT: CreateBlockDeviceDataset,
# Other node will detach:
Discovered.ATTACHED_ELSEWHERE: DoNothing,
# We reach this state when a dataset has been deleted.
Discovered.REGISTERED: DoNothing,
Discovered.UNREGISTERED: RegisterVolume,
Discovered.ATTACHED_NO_FILESYSTEM: DetachVolume,
Discovered.ATTACHED: DetachVolume,
Discovered.MOUNTED: UnmountBlockDevice,
Discovered.ATTACHED_TO_DEAD_NODE: DoNothing,
},
Desired.DELETED: {
Discovered.NON_EXISTENT: DoNothing,
# Other node will destroy
Discovered.ATTACHED_ELSEWHERE: DoNothing,
# Can't pick node that will do destruction yet.
Discovered.NON_MANIFEST: DestroyVolume,
Discovered.REGISTERED: DoNothing,
Discovered.UNREGISTERED: RegisterVolume,
Discovered.ATTACHED_NO_FILESYSTEM: DetachVolume,
Discovered.ATTACHED: DetachVolume,
Discovered.MOUNTED: UnmountBlockDevice,
Discovered.ATTACHED_TO_DEAD_NODE: DetachVolume,
},
})
del Desired, Discovered
@implementer(ICalculator)
class BlockDeviceCalculator(PClass):
"""
An ``ICalculator`` that calculates actions that use a
``BlockDeviceDeployer``.
:ivar TransitionTable transitions: Table of convergence actions.
"""
transitions = field(TransitionTable, mandatory=True,
factory=TransitionTable.create,
initial=DATASET_TRANSITIONS)
def _calculate_dataset_change(self, discovered_dataset, desired_dataset):
"""
Calculate the state changes necessary to make ``discovered_dataset``
state match ``desired_dataset`` configuration.
:param discovered_dataset: The current state of the dataset.
:type discovered_dataset: ``DiscoveredDataset`` or ``None``
:param desired_dataset: The desired state of the dataset.
:type desired_dataset: ``DesiredDataset`` or ``None``
"""
# If the configuration doesn't know about a dataset,
# we detach it.
desired_state = (desired_dataset.state
if desired_dataset is not None
else DatasetStates.NON_MANIFEST)
# If we haven't discovered a dataset, then it is doesn't
# exist.
discovered_state = (discovered_dataset.state
if discovered_dataset is not None
else DatasetStates.NON_EXISTENT)
if desired_state != discovered_state:
transition = self.transitions[desired_state][discovered_state]
return transition.from_state_and_config(
discovered_dataset=discovered_dataset,
desired_dataset=desired_dataset,
)
else:
return NOTHING_TO_DO
def calculate_changes_for_datasets(
self, discovered_datasets, desired_datasets
):
actions = []
# If a dataset isn't in the configuration, we don't act on it.
for dataset_id in set(discovered_datasets) | set(desired_datasets):
desired_dataset = desired_datasets.get(dataset_id)
discovered_dataset = discovered_datasets.get(dataset_id)
actions.append(self._calculate_dataset_change(
discovered_dataset=discovered_dataset,
desired_dataset=desired_dataset,
))
return in_parallel(changes=actions)
@implementer(IDeployer)
class BlockDeviceDeployer(PClass):
"""
An ``IDeployer`` that operates on ``IBlockDeviceAPI`` providers.
:ivar unicode hostname: The IP address of the node that has this deployer.
:ivar UUID node_uuid: The UUID of the node that has this deployer.
:ivar IBlockDeviceAPI block_device_api: The block device API that will
be called upon to perform block device operations. This will
typically be a ``ProcessLifetimeCache`` wrapping the underlying
provider.
:ivar _underlying_blockdevice_api: The underlying block device API,
unwrapped.
:ivar FilePath mountroot: The directory where block devices will be
mounted.
:ivar _async_block_device_api: An object to override the value of the
``async_block_device_api`` property. Used by tests. Should be
``None`` in real-world use.
:ivar block_device_manager: An ``IBlockDeviceManager`` implementation used
to interact with the system regarding block devices.
:ivar ICalculator calculator: The object to use to calculate dataset
changes.
"""
hostname = field(type=unicode, mandatory=True)
node_uuid = field(type=UUID, mandatory=True)
block_device_api = field(mandatory=True)
_underlying_blockdevice_api = field(mandatory=True, initial=None)
_async_block_device_api = field(mandatory=True, initial=None)
mountroot = field(type=FilePath, initial=FilePath(b"/flocker"))
block_device_manager = field(initial=BlockDeviceManager())
calculator = field(
invariant=provides(ICalculator),
mandatory=True,
initial=BlockDeviceCalculator(),
)
@property
def profiled_blockdevice_api(self):
"""
Get an ``IProfiledBlockDeviceAPI`` provider which can create volumes
configured based on pre-defined profiles. This will use the
_underlying_blockdevice_api attribute, falling back to the
block_device_api attributed and finally an adapter implementation
around the block_device_api if neither of those provide the interface.
"""
if IProfiledBlockDeviceAPI.providedBy(
self._underlying_blockdevice_api):
return self._underlying_blockdevice_api
if IProfiledBlockDeviceAPI.providedBy(self.block_device_api):
return self.block_device_api
return ProfiledBlockDeviceAPIAdapter(
_blockdevice_api=self.block_device_api
)
@property
def async_block_device_api(self):
"""
Get an ``IBlockDeviceAsyncAPI`` provider which can manipulate volumes
for this deployer.
During real operation, this is a threadpool-based wrapper around the
``IBlockDeviceAPI`` provider. For testing purposes it can be
overridden with a different object entirely (and this large amount of
support code for this is necessary because this class is a ``PClass``
subclass).
"""
if self._async_block_device_api is None:
return _SyncToThreadedAsyncAPIAdapter.from_api(
self.block_device_api,
)
return self._async_block_device_api
@log_list_volumes
def _discover_raw_state(self):
"""
Find the state of this node that is relevant to determining which
datasets are on this node, and return a ``RawState`` containing that
information.
"""
# FLOC-1819 Make this asynchronous
api = self.block_device_api
compute_instance_id = api.compute_instance_id()
volumes = api.list_volumes()
system_mounts = {
mount.blockdevice: mount.mountpoint
for mount in self.block_device_manager.get_mounts()
}
def is_existing_block_device(dataset_id, path):
if isinstance(path, FilePath) and path.isBlockDevice():
return True
INVALID_DEVICE_PATH(
dataset_id=dataset_id, invalid_value=path
).write(_logger)
return False
# XXX This should probably just be included in
# BlockDeviceVolume for attached volumes.
devices = {}
for volume in volumes:
dataset_id = volume.dataset_id
if volume.attached_to == compute_instance_id:
device_path = api.get_device_path(volume.blockdevice_id)
if is_existing_block_device(dataset_id, device_path):
devices[dataset_id] = device_path
else:
# XXX We will detect this as NON_MANIFEST, but this is
# probably an intermediate state where the device is
# externally attached but the device hasn't shown up
# in the filesystem yet.
pass
if ICloudAPI.providedBy(self._underlying_blockdevice_api):
live_instances = self._underlying_blockdevice_api.list_live_nodes()
else:
# Can't know accurately who is alive and who is dead:
live_instances = None
result = RawState(
compute_instance_id=compute_instance_id,
_live_instances=live_instances,
volumes=volumes,
devices=devices,
system_mounts=system_mounts,
devices_with_filesystems=[
device for device in devices.values()
if self.block_device_manager.has_filesystem(device)],
)
DISCOVERED_RAW_STATE(raw_state=result).write()
return result
def discover_state(self, cluster_state, persistent_state):
"""
Find all datasets that are currently associated with this host and
return a ``BlockDeviceDeployerLocalState`` containing all the datasets
that are not manifest or are located on this node.
"""
raw_state = self._discover_raw_state()
datasets = {}
for volume in raw_state.volumes:
dataset_id = volume.dataset_id
owning_blockdevice_id = persistent_state.blockdevice_ownership.get(
dataset_id)
if owning_blockdevice_id is None:
datasets[dataset_id] = DiscoveredDataset(
state=DatasetStates.UNREGISTERED,
dataset_id=dataset_id,
maximum_size=volume.size,
blockdevice_id=volume.blockdevice_id,
)
elif volume.blockdevice_id != owning_blockdevice_id:
# XXX Should we cleanup duplicate volumes?
if volume.attached_to is not None:
UNREGISTERED_VOLUME_ATTACHED(
dataset_id=dataset_id,
block_device_id=volume.blockdevice_id,
).write()
elif dataset_id in raw_state.devices:
device_path = raw_state.devices[dataset_id]
mount_point = self._mountpath_for_dataset_id(
unicode(dataset_id)
)
if (
device_path in raw_state.system_mounts and
raw_state.system_mounts[device_path] == mount_point
):
datasets[dataset_id] = DiscoveredDataset(
state=DatasetStates.MOUNTED,
dataset_id=dataset_id,
maximum_size=volume.size,
blockdevice_id=volume.blockdevice_id,
device_path=device_path,
mount_point=mount_point,
)
else:
if device_path in raw_state.devices_with_filesystems:
state = DatasetStates.ATTACHED
else:
state = DatasetStates.ATTACHED_NO_FILESYSTEM
datasets[dataset_id] = DiscoveredDataset(
state=state,
dataset_id=dataset_id,
maximum_size=volume.size,
blockdevice_id=volume.blockdevice_id,
device_path=device_path,
)
else:
if volume.attached_to in (None, raw_state.compute_instance_id):
# XXX We check for attached locally for the case
# where the volume is attached but the
# blockdevice doesn't exist yet.
datasets[dataset_id] = DiscoveredDataset(
state=DatasetStates.NON_MANIFEST,
dataset_id=dataset_id,
maximum_size=volume.size,
blockdevice_id=volume.blockdevice_id,
)
else:
if raw_state.is_known_dead_instance(volume.attached_to):
state = DatasetStates.ATTACHED_TO_DEAD_NODE
else:
state = DatasetStates.ATTACHED_ELSEWHERE
datasets[dataset_id] = DiscoveredDataset(
state=state,
dataset_id=dataset_id,
maximum_size=volume.size,
blockdevice_id=volume.blockdevice_id,
)
for dataset_id, blockdevice_id in (
persistent_state.blockdevice_ownership.items()
):
if dataset_id not in datasets:
datasets[dataset_id] = DiscoveredDataset(
state=DatasetStates.REGISTERED,
dataset_id=dataset_id,
blockdevice_id=blockdevice_id,
)
local_state = BlockDeviceDeployerLocalState(
node_uuid=self.node_uuid,
hostname=self.hostname,
datasets=datasets,
)
return succeed(local_state)
def _mountpath_for_dataset_id(self, dataset_id):
"""
Calculate the mountpoint for a dataset.
:param unicode dataset_id: The unique identifier of the dataset for
which to calculate a mount point.
:returns: A ``FilePath`` of the mount point.
"""
return self.mountroot.child(dataset_id.encode("ascii"))
def _calculate_desired_for_manifestation(self, manifestation):
"""
Get the ``DesiredDataset`` corresponding to a given manifestation.
:param Manifestation manifestation: The
:return: The ``DesiredDataset`` corresponding to the given
manifestation.
"""
dataset_id = UUID(manifestation.dataset.dataset_id)
# XXX: Make this configurable. FLOC-2679
maximum_size = manifestation.dataset.maximum_size
if maximum_size is None:
maximum_size = int(DEFAULT_DATASET_SIZE.bytes)
common_args = {
'dataset_id': dataset_id,
'metadata': manifestation.dataset.metadata,
}
if manifestation.dataset.deleted:
return DesiredDataset(
state=DatasetStates.DELETED,
**common_args
)
else:
return DesiredDataset(
state=DatasetStates.MOUNTED,
maximum_size=maximum_size,
mount_point=self._mountpath_for_dataset_id(
unicode(dataset_id)
),
**common_args
)
def _calculate_desired_state(
self, configuration, local_applications, local_datasets
):
not_in_use = NotInUseDatasets(
node_uuid=self.node_uuid,
local_applications=local_applications,
leases=configuration.leases,
)
this_node_config = configuration.get_node(
self.node_uuid, hostname=self.hostname)
desired_datasets = {
UUID(manifestation.dataset.dataset_id):
self._calculate_desired_for_manifestation(
manifestation
)
for manifestation in this_node_config.manifestations.values()
}
# If we don't have a given dataset, we default it to `NON_MANIFEST` in
# BlockDeviceCalculator.calculate_changes_for_datasets, so we don't try
# to find them here. We don't have explicit configuration for
# non-manifest datasets anyway. Datasets that should be
# `ATTACHED_ELSEWHERE` need the same behavior as `NON_MANIFEST`, so we
# don't check them either.
# This is a performance optimization. We deliberately use a ``set``
# here to allow fast hash based lookups.
not_in_use_datasets = set(not_in_use(local_datasets.values()))
for dataset_id, dataset in local_datasets.items():
# Hash based lookup rather than iteration here.
if dataset in not_in_use_datasets:
continue
if dataset.state != DatasetStates.MOUNTED:
# A lease doesn't force a mount.
continue
# This may override something from above, if there is a
# lease or application using a dataset.
desired_datasets[dataset_id] = DesiredDataset(
dataset_id=dataset_id,
state=DatasetStates.MOUNTED,
maximum_size=dataset.maximum_size,
# XXX We don't populate metadata here, but it isn't necessary
# until we want to update it.
metadata={},
mount_point=self._mountpath_for_dataset_id(
unicode(dataset_id)
),
)
return desired_datasets
def calculate_changes(self, configuration, cluster_state, local_state):
local_node_state = cluster_state.get_node(self.node_uuid,
hostname=self.hostname)
local_applications = None
if local_node_state.applications is not None:
local_applications = local_node_state.applications.values()
desired_datasets = self._calculate_desired_state(
configuration=configuration,
local_applications=local_applications,
local_datasets=local_state.datasets,
)
return self.calculator.calculate_changes_for_datasets(
discovered_datasets=local_state.datasets,
desired_datasets=desired_datasets,
)
class ProcessLifetimeCache(proxyForInterface(IBlockDeviceAPI, "_api")):
"""
A transparent caching layer around an ``IBlockDeviceAPI`` instance,
intended to exist for the lifetime of the process.
:ivar _api: Wrapped ``IBlockDeviceAPI`` provider.
:ivar _instance_id: Cached result of ``compute_instance_id``.
:ivar _device_paths: Mapping from blockdevice ids to cached device path.
"""
def __init__(self, api):
self._api = api
self._instance_id = None
self._device_paths = {}
def compute_instance_id(self):
"""
Always return initial result since this shouldn't change until a
reboot.
"""
if self._instance_id is None:
self._instance_id = self._api.compute_instance_id()
return self._instance_id
def get_device_path(self, blockdevice_id):
"""
Load the device path from a cache if possible.
"""
if blockdevice_id not in self._device_paths:
self._device_paths[blockdevice_id] = self._api.get_device_path(
blockdevice_id)
return self._device_paths[blockdevice_id]
def detach_volume(self, blockdevice_id):
"""
Clear the cached device path, if it was cached.
"""
try:
del self._device_paths[blockdevice_id]
except KeyError:
pass
return self._api.detach_volume(blockdevice_id)
| 29,693 |
407 | <reponame>iuskye/SREWorks
package com.elasticsearch.cloud.monitor.metric.mapping;
import com.elasticsearch.cloud.monitor.metric.common.blink.utils.Filter;
import com.elasticsearch.cloud.monitor.metric.common.pojo.CommonPojo.EsClusterConf;
import com.elasticsearch.cloud.monitor.metric.mapping.pojo.MetricMappingInfo;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.*;
import static com.elasticsearch.cloud.monitor.metric.mapping.MappingUtil.createHighLevelClient;
/**
* @author xiaoping
* @date 2021/6/23
*/
public class MetricMappingManager {
private static final Log log = LogFactory.getLog(MetricMappingManager.class);
private EsClusterConf esClusterConf;
private String metricMappingIndexName;
private volatile RestHighLevelClient highLevelClient;
private volatile Map<String, MetricMappingInfo> metricToMappingInfo = Maps.newConcurrentMap();
private volatile Map<String, Set<Integer>> tenantToNumbers = Maps.newConcurrentMap();
private Map<String, Long> tenantAccessTimes = Maps.newConcurrentMap();
private ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
private ScheduledExecutorService unknownAssignService = Executors.newSingleThreadScheduledExecutor();
private ExecutorService assignExecutorService = new ThreadPoolExecutor(2, 150, 5, TimeUnit.MINUTES,
new LinkedBlockingQueue<Runnable>());
private boolean keepTenantToNumbers;
private List<String> notKeepTenantNumberPre;
private Set<MetricMappingInfo> unknownMetric = Sets.newConcurrentHashSet();
public MetricMappingManager(EsClusterConf esClusterConf, String metricMappingIndexName, boolean syncMappingInfo,
boolean keepTenantToNumbers, List<String> notKeepTenantNumberPre) {
this.esClusterConf = esClusterConf;
this.metricMappingIndexName = metricMappingIndexName;
this.highLevelClient = createHighLevelClient(esClusterConf);
this.keepTenantToNumbers = keepTenantToNumbers;
this.notKeepTenantNumberPre = notKeepTenantNumberPre;
init(syncMappingInfo);
}
private void init(boolean syncMappingInfo) {
try {
if (syncMappingInfo) {
syncMappingInfo();
}
} catch (Throwable t) {
log.error(String.format("MetricMappingManager init syncMappingInfo error %s", t.getMessage()), t);
}
executorService.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
log.info("begin to syncMappingInfo");
long start = System.currentTimeMillis();
syncMappingInfo();
log.info(String.format("end syncMappingInfo, took %s seconds",
TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis() - start)));
} catch (Throwable t) {
log.error(String
.format("MetricMappingManager scheduleWithFixedDelay syncMappingInfo error %s",
t.getMessage()),
t);
}
}
}, 0, 5, TimeUnit.MINUTES);
unknownAssignService.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
List<MetricMappingInfo> metricMappingInfos = Lists.newArrayList(unknownMetric);
unknownMetric.clear();
getMapping(metricMappingInfos, true, false);
} catch (Throwable t) {
log.error(String
.format("MetricMappingManager scheduleWithFixedDelay assign unknown metric, error %s",
t.getMessage()), t);
}
}
}, 0, 10, TimeUnit.SECONDS);
}
public void addUnknownMetric(List<MetricMappingInfo> metrics) {
unknownMetric.addAll(metrics);
}
public Map<String, MetricMappingInfo> getMapping(List<MetricMappingInfo> metricList, boolean addIfNotExist,
boolean wildcard)
throws IOException, ExecutionException, InterruptedException {
Map<String, MetricMappingInfo> mappings = Maps.newConcurrentMap();
Map<String, List<MetricMappingInfo>> tenantToNotMappingMetric = Maps.newConcurrentMap();
for (MetricMappingInfo mappingInfo : metricList) {
MetricMappingInfo metricMappingInfo = metricToMappingInfo.get(mappingInfo.getMetric());
if (metricMappingInfo != null) {
mappings.put(mappingInfo.getMetric(), metricMappingInfo);
} else {
if (wildcard) {
for (Entry<String, MetricMappingInfo> entry : metricToMappingInfo.entrySet()) {
if (Filter.match(mappingInfo.getMetric(), entry.getKey())) {
mappings.put(entry.getValue().getMetric(), entry.getValue());
}
}
} else {
if (addIfNotExist) {
List<MetricMappingInfo> infos = tenantToNotMappingMetric.computeIfAbsent(
mappingInfo.getTenant(),
t -> new Vector<>());
infos.add(mappingInfo);
}
}
}
}
if (tenantToNotMappingMetric.size() > 0) {
Map<String, MetricMappingInfo> metricMappingInfoMap = assignMetric(tenantToNotMappingMetric);
mappings.putAll(metricMappingInfoMap);
}
for (MetricMappingInfo metricMappingInfo : mappings.values()) {
tenantAccessTimes.put(metricMappingInfo.getTenant(), System.currentTimeMillis());
}
return mappings;
}
private synchronized Map<String, MetricMappingInfo> assignMetric(
Map<String, List<MetricMappingInfo>> tenantToNotMappingMetric)
throws IOException, ExecutionException, InterruptedException {
Map<String, MetricMappingInfo> result = Maps.newConcurrentMap();
List<Future<Map<String, MetricMappingInfo>>> futures = Lists.newArrayList();
for (Entry<String, List<MetricMappingInfo>> entry : tenantToNotMappingMetric.entrySet()) {
Future<Map<String, MetricMappingInfo>> future = assignExecutorService.submit(
new AssignMetricCallable(entry.getKey(), entry.getValue()));
if (future != null) {
futures.add(future);
} else {
log.error("future submit is null");
}
}
for (Future<Map<String, MetricMappingInfo>> future : futures) {
try {
result.putAll(future.get());
} catch (Throwable t) {
if (t instanceof NullPointerException) {
log.error("future get error" + Throwables.getStackTraceAsString(t));
} else {
log.error("future get error ", t);
}
}
}
return result;
}
private class AssignMetricCallable implements Callable<Map<String, MetricMappingInfo>> {
private String tenant;
private List<MetricMappingInfo> metrics;
public AssignMetricCallable(String tenant,
List<MetricMappingInfo> metrics) {
this.tenant = tenant;
this.metrics = metrics;
}
@Override
public Map<String, MetricMappingInfo> call() throws Exception {
return assignMetric(tenant, metrics);
}
}
private Map<String, MetricMappingInfo> assignMetric(String tenant, List<MetricMappingInfo> metrics)
throws IOException {
Map<String, MetricMappingInfo> result = Maps.newConcurrentMap();
Integer assignNumber = null;
boolean needClearNumber = false;
if (metrics != null) {
for (MetricMappingInfo mappingInfo : metrics) {
if (StringUtils.isEmpty(mappingInfo.getMetric())) {
continue;
}
MetricMappingInfo metricMappingInfo = metricToMappingInfo.get(mappingInfo.getMetric());
if (metricMappingInfo != null) {
result.put(mappingInfo.getMetric(), metricMappingInfo);
} else {
if (StringUtils.isEmpty(mappingInfo.getReferenceMetric())) {
if (assignNumber == null) {
assignNumber = findAvailableNumber(tenant);
needClearNumber = true;
}
} else {
assignNumber = getReferenceNumber(mappingInfo.getReferenceTenant(),
mappingInfo.getReferenceMetric());
if (assignNumber == null) {
log.error(String.format("get reference number of tenant %s metric %s is null",
mappingInfo.getReferenceTenant(), mappingInfo.getReferenceMetric()));
continue;
}
}
String pk = MappingUtil.md5(mappingInfo.getMetric());
boolean success = MappingUtil.addMappingInfo(highLevelClient, metricMappingIndexName, pk, tenant,
mappingInfo.getMetric(), assignNumber, true);
MetricMappingInfo assignedMetricMappingInfo;
if (success) {
assignedMetricMappingInfo = new MetricMappingInfo(tenant, mappingInfo.getMetric());
assignedMetricMappingInfo.setNumber(assignNumber);
assignedMetricMappingInfo.setMetric_pk(true);
assignNumber = null;
} else {
assignedMetricMappingInfo = MappingUtil.getMetricMappingInfo(highLevelClient,
metricMappingIndexName, pk);
if (assignedMetricMappingInfo == null) {
log.error("get pk " + pk + " is null");
}
}
if (assignedMetricMappingInfo != null) {
result.put(mappingInfo.getMetric(), assignedMetricMappingInfo);
if (needClearNumber) {
Set<Integer> numbers = tenantToNumbers.computeIfAbsent(tenant,
t -> Sets.newConcurrentHashSet());
numbers.add(assignedMetricMappingInfo.getNumber());
}
metricToMappingInfo.put(mappingInfo.getMetric(), assignedMetricMappingInfo);
}
}
}
}
if (needClearNumber && assignNumber != null) {
String pk = getTenantNumberPk(tenant, assignNumber);
MappingUtil.deleteInfo(highLevelClient, metricMappingIndexName, pk);
}
return result;
}
private String getTenantNumberPk(String tenant, int number) {
return MappingUtil.md5(tenant + ":" + number);
}
private Integer getReferenceNumber(String tenant, String metric) throws IOException {
Map<String, MetricMappingInfo> mappingInfoMap = assignMetric(tenant,
Lists.newArrayList(new MetricMappingInfo(tenant, metric)));
MetricMappingInfo metricMappingInfo = mappingInfoMap.get(metric);
if (metricMappingInfo != null) {
return metricMappingInfo.getNumber();
} else {
return null;
}
}
private int findAvailableNumber(String tenant) throws IOException {
int number = 0;
Set<Integer> numbers = tenantToNumbers.computeIfAbsent(tenant, t -> Sets.newConcurrentHashSet());
while (true) {
if (!numbers.contains(number)) {
String pk = getTenantNumberPk(tenant, number);
boolean success = false;
try {
success = MappingUtil.addMappingInfo(highLevelClient, metricMappingIndexName, pk, tenant, "",
number, false);
} catch (IOException e) {
try {
handleException(e);
number--;
} catch (Throwable throwable) {
throw e;
}
}
if (success) {
return number;
}
}
number++;
}
}
public synchronized void syncMappingInfo() {
List<MetricMappingInfo> infos = null;
try {
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.filter(new TermQueryBuilder("metric_pk", true));
infos = MappingUtil.scanMappingInfo(highLevelClient, metricMappingIndexName, boolQueryBuilder);
log.info("end to real syncMappingInfo, info size " + infos.size());
} catch (Throwable e) {
try {
handleException(e);
} catch (Throwable throwable) {
log.error(String
.format("syncMappingInfo of index %s error %s", metricMappingIndexName, throwable.getMessage()),
throwable);
}
}
if (infos != null) {
Map<String, MetricMappingInfo> tempMetricToMappingInfo = Maps.newConcurrentMap();
Map<String, Set<Integer>> tempTenantToNumbers = Maps.newConcurrentMap();
for (MetricMappingInfo metricMappingInfo : infos) {
String metric = metricMappingInfo.getMetric();
String tenant = metricMappingInfo.getTenant();
int number = metricMappingInfo.getNumber();
tempMetricToMappingInfo.put(metric, metricMappingInfo);
if (needKeepTenantNumber(tenant)) {
Set<Integer> numbers = tempTenantToNumbers.computeIfAbsent(tenant,
t -> Sets.newConcurrentHashSet());
numbers.add(number);
}
}
metricToMappingInfo = tempMetricToMappingInfo;
tenantToNumbers = tempTenantToNumbers;
}
try {
MappingUtil.updateTenantAccessTime(highLevelClient, tenantAccessTimes, metricMappingIndexName);
tenantAccessTimes.clear();
} catch (Throwable e) {
log.error(String.format("update access time error %s", e.getMessage()), e);
}
}
private boolean needKeepTenantNumber(String tenant) {
if (!keepTenantToNumbers) {
return false;
}
if (notKeepTenantNumberPre != null) {
for (String pre : notKeepTenantNumberPre) {
if (tenant.startsWith(pre)) {
return false;
}
}
}
return true;
}
private void handleException(Throwable exception) throws Throwable {
String stackString = Throwables.getStackTraceAsString(exception);
if (stackString.contains("I/O reactor status: STOPPED") || stackString.contains("Connection closed")) {
this.highLevelClient = createHighLevelClient(esClusterConf);
log.error("es connection stopped, re connect");
} else {
throw exception;
}
}
}
| 7,579 |
3,672 | <reponame>ddc7451/RxFFmpeg<filename>rxffmpeg/src/main/java/io/microshow/rxffmpeg/RxFFmpegProgress.java
package io.microshow.rxffmpeg;
/**
* 进度封装
*/
public class RxFFmpegProgress {
public int state = 0;
/**
* 执行进度
*/
public int progress;
/**
* 执行的时间,相对于总时间 单位:微秒
*/
public long progressTime;
public RxFFmpegProgress(int state, int progress, long progressTime) {
this.state = state;
this.progress = progress;
this.progressTime = progressTime;
}
public RxFFmpegProgress(int state) {
this(state, 0, 0);
}
}
| 294 |
530 | <reponame>EnochPrime/node-zwave-js<gh_stars>100-1000
{
"manufacturer": "Fantem",
"manufacturerId": "0x016a",
"label": "FT101",
"description": "Cube",
"devices": [
{
"productType": "0x0001",
"productId": "0x0065",
"zwaveAllianceId": 2409
},
{
"productType": "0x0101",
"productId": "0x0065",
"zwaveAllianceId": [2205, 2677]
},
{
"productType": "0x0201",
"productId": "0x0065",
"zwaveAllianceId": 2408
},
{
"productType": "0x0301",
"productId": "0x0065",
"zwaveAllianceId": 2410
}
],
"firmwareVersion": {
"min": "0.0",
"max": "255.255"
},
"metadata": {
"reset": "If this controller is the primary controller for your network, resetting it will result in the nodes in your network being orphaned and it will be necessary after the reset to exclude and re-include all of the nodes in the network. If this controller is being used as a secondary controller in the network, use this procedure to reset this controller only in the event that the network primary controller is missing or otherwise inoperable.\n\nFollowing the UI guidelines on Oomi Touch or on-screen instructions on PC host, click the \"Reset\" button to reset the Cube.",
"manual": "https://products.z-wavealliance.org/ProductManual/File?folder=&filename=MarketCertificationFiles/2677/Oomi%20Cube%20manual%20-2.pdf"
}
}
| 502 |
4,262 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.api.management.mbean;
import org.apache.camel.api.management.ManagedAttribute;
import org.apache.camel.api.management.ManagedOperation;
public interface ManagedThrottlingExceptionRoutePolicyMBean extends ManagedServiceMBean {
@ManagedAttribute(description = "How long to wait before moving open circuit to half open")
Long getHalfOpenAfter();
@ManagedAttribute(description = "How long to wait before moving open circuit to half open")
void setHalfOpenAfter(Long milliseconds);
@ManagedAttribute(description = "The range of time that failures should occur within")
Long getFailureWindow();
@ManagedAttribute(description = "The range of time that failures should occur within")
void setFailureWindow(Long milliseconds);
@ManagedAttribute(description = "Number of failures before opening circuit")
Integer getFailureThreshold();
@ManagedAttribute(description = "Number of failures before opening circuit")
void setFailureThreshold(Integer numberOfFailures);
@ManagedOperation(description = "The current state of the circuit")
String currentState();
@ManagedAttribute(description = "The half open handler registered (if any)")
String getHalfOpenHandlerName();
@ManagedAttribute(description = "The number of failures caught")
Integer getCurrentFailures();
@ManagedAttribute(description = "Number of ms since the last failure was recorded")
Long getLastFailure();
@ManagedAttribute(description = "Number ms since the circuit was opened")
Long getOpenAt();
}
| 618 |
1,088 | <reponame>amznero/graph-learn<filename>graphlearn/include/dag_dataset.h<gh_stars>1000+
/* Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef GRAPHLEARN_INCLUDE_DAG_DATASET_H_
#define GRAPHLEARN_INCLUDE_DAG_DATASET_H_
#include <mutex> // NOLINT [build/c++11]
#include <semaphore.h>
#include "graphlearn/include/client.h"
#include "graphlearn/include/dag_request.h"
#include "graphlearn/common/threading/runner/threadpool.h"
namespace graphlearn {
class Dataset {
public:
Dataset(Client* client, int32_t dag_id);
~Dataset();
void Close();
GetDagValuesResponse* Next(int32_t epoch);
private:
void PrefetchAsync();
void PrefetchFn();
private:
Client* client_;
int32_t dag_id_;
int32_t cap_;
int32_t cursor_;
std::vector<sem_t> occupied_;
std::atomic<int32_t> head_;
std::unique_ptr<ThreadPool> tp_;
std::vector<GetDagValuesResponse*> buffer_;
};
} // namespace graphlearn
#endif // GRAPHLEARN_INCLUDE_DAG_DATASET_H_
| 532 |
17,702 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class PuddleWorld(gym.Env):
"""This class creates a continous-state maze problem given a map."""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self._load_map()
self.viewer = None
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Box(np.zeros(2), self.room_lengths)
self._seed()
self._reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (
action, type(action))
if (np.random.uniform(0., 1.) > self.motion_noise):
state0 = self.state[0]
state1 = self.state[1]
# Motion length is a truncated normal random variable.
motion_length = np.maximum(
0.,
np.minimum(
self.motion_max,
np.random.normal(self.motion_mean, self.motion_std)))
if action == 0: # north
state1 = np.minimum(self.room_lengths[1],
state1 + motion_length)
elif action == 1: # east
state0 = np.minimum(self.room_lengths[0],
state0 + motion_length)
elif action == 2: # south
state1 = np.maximum(0., state1 - motion_length)
else: # west
state0 = np.maximum(0., state0 - motion_length)
self.state[0] = state0
self.state[1] = state1
done = self._is_goal(self.state)
reward = self._compute_reward(self.state)
return self.state, reward, done, {}
def _reset(self):
self.state = np.copy(self.initial_state)
return self.state
def _load_map(self):
self.room_lengths = np.array([1., 1.])
self.initial_state = np.array([0., 0.])
self.goal_state = np.array([1., 1.])
self.goal_width = 0.01
self.motion_noise = 0.05 # probability of no-motion (staying in same state)
self.motion_mean = 0.1 # mean of motion length
self.motion_std = 0.1 * self.motion_mean # std of motion length
self.motion_max = 2.0 * self.motion_mean
self.puddle_centers = []
self.puddle_radii = []
self._build_puddle(np.array([0.2, 0.4]), 0.1)
self._build_puddle(np.array([0.5, 0.8]), 0.1)
self._build_puddle(np.array([0.9, 0.1]), 0.1)
self.num_puddles = len(self.puddle_centers)
self.puddle_cost = 2.0
def _compute_reward(self, state):
reward = -1
for i in range(self.num_puddles):
delta = state - self.puddle_centers[i]
dist = np.dot(delta, delta)
if dist <= self.puddle_radii[i]:
reward -= self.puddle_cost
return reward
def _is_goal(self, state):
return state[0] >= self.goal_state[0] - self.goal_width and \
state[1] >= self.goal_state[1] - self.goal_width
def _build_puddle(self, center, radius):
self.puddle_centers.append(center)
self.puddle_radii.append(radius)
def _render(self, mode='human', close=False):
pass
| 1,729 |
1,133 | <reponame>mikesep/comdb2<filename>crc32c/crc32c.c
/*
Copyright 2015 <NAME>.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <stddef.h>
#include "crc32c.h"
/* Compute chksum using lookup tables (slicing by 8) */
#include "sb8.h"
#include <logmsg.h>
uint32_t crc32c_software(const uint8_t* buf, uint32_t sz, uint32_t crc)
{
/* Process misaligned data byte at a time */
intptr_t misaligned = (intptr_t)buf & (sizeof(intptr_t) - 1);
unsigned adj = misaligned ? sizeof(intptr_t) - misaligned : 0;
if (adj > sz) adj = sz;
int i = 0;
switch (adj) {
case 7: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 6: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 5: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 4: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 3: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 2: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 1: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
sz -= adj;
buf += i;
}
/* Process 8 bytes at a time */
const uint8_t *end = buf + (sz & (~0x7));
while (buf < end) {
// read two little endian ints
uint32_t u32a, u32b;
u32a = (buf[0]<<0) | (buf[1]<<8) | (buf[2]<<16) | (buf[3]<<24);
buf += 4;
u32b = (buf[0]<<0) | (buf[1]<<8) | (buf[2]<<16) | (buf[3]<<24);
buf += 4;
crc ^= u32a;
uint32_t term1 = crc_tableil8_o88[crc & 0x000000FF] ^ crc_tableil8_o80[(crc >> 8) & 0x000000FF];
uint32_t term2 = crc >> 16;
crc = term1 ^ crc_tableil8_o72[term2 & 0x000000FF] ^ crc_tableil8_o64[(term2 >> 8) & 0x000000FF];
term1 = crc_tableil8_o56[u32b & 0x000000FF] ^ crc_tableil8_o48[(u32b >> 8) & 0x000000FF];
term2 = u32b >> 16;
crc = crc ^ term1 ^ crc_tableil8_o40[term2 & 0x000000FF] ^ crc_tableil8_o32[(term2 >> 8) & 0x000000FF];
}
/* Process the last 7 (or less) bytes */
sz &= 0x7;
i = 0;
switch (sz) {
case 7: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 6: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 5: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 4: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 3: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 2: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
case 1: crc = crc_tableil8_o32[(crc ^ buf[i]) & 0x000000FF] ^ (crc >> 8); ++i;
}
return crc;
}
#ifdef __x86_64__
#include <smmintrin.h>
#include <wmmintrin.h>
/* Fwd declare available methods to compute crc32c */
static uint32_t crc32c_sse_pcl(const uint8_t *buf, uint32_t sz, uint32_t crc);
static uint32_t crc32c_sse(const uint8_t *buf, uint32_t sz, uint32_t crc);
typedef uint32_t(*crc32c_t)(const uint8_t* data, uint32_t size, uint32_t crc);
static crc32c_t crc32c_func;
/* Vector type so that we can use pclmul */
typedef long long v2di __attribute__ ((vector_size(16)));
/* Select best method to compute crc32c */
#include <cpuid.h>
#ifdef __clang__
#define SSE4_2 bit_SSE42
#define PCLMUL bit_PCLMULQDQ
#else
#define SSE4_2 bit_SSE4_2
#define PCLMUL bit_PCLMUL
#endif
void crc32c_init(int v)
{
uint32_t eax, ebx, ecx, edx;
__cpuid(1, eax, ebx, ecx, edx);
if (ecx & SSE4_2) {
if (ecx & PCLMUL) {
crc32c_func = crc32c_sse_pcl;
if (v) {
logmsg(LOGMSG_INFO, "SSE 4.2 + PCLMUL SUPPORT FOR CRC32C\n");
logmsg(LOGMSG_INFO, "crc32c = crc32c_sse_pcl\n");
}
} else {
crc32c_func = crc32c_sse;
if (v) {
logmsg(LOGMSG_INFO, "SSE 4.2 SUPPORT FOR CRC32C\n");
logmsg(LOGMSG_INFO, "crc32c = crc32c_sse\n");
}
}
}
if (crc32c_func == NULL) {
crc32c_func = crc32c_software;
if (v) {
logmsg(LOGMSG_INFO, "NO HARDWARE SUPPORT FOR CRC32C\n");
logmsg(LOGMSG_INFO, "crc32c = crc32c_software\n");
}
}
}
uint32_t crc32c_comdb2(const uint8_t* buf, uint32_t sz)
{
return crc32c_func(buf, sz, CRC32C_SEED);
}
/* Helper routines */
static inline uint32_t crc32c_1024_sse_int(const uint8_t *buf, uint32_t crc);
static inline uint32_t crc32c_until_aligned(const uint8_t **buf, uint32_t *sz, uint32_t crc);
static inline uint32_t crc32c_8s(const uint8_t *buf, uint32_t sz, uint32_t crc);
#define _1K 1024
#define _3K _1K * 3
#define REPEAT_2(x) x x
#define REPEAT_4(x) REPEAT_2(x) REPEAT_2(x)
#define REPEAT_8(x) REPEAT_4(x) REPEAT_4(x)
#define REPEAT_16(x) REPEAT_8(x) REPEAT_8(x)
#define REPEAT_32(x) REPEAT_16(x) REPEAT_16(x)
#define REPEAT_64(x) REPEAT_32(x) REPEAT_32(x)
#define REPEAT_42(x) REPEAT_32(x) REPEAT_8(x) REPEAT_2(x)
#define REPEAT_127(x) REPEAT_64(x) REPEAT_32(x) REPEAT_16(x) \
REPEAT_8(x) REPEAT_4(x) REPEAT_2(x) x
// Intel White Paper: Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction
#define THREESOME \
c1 = _mm_crc32_u64(c1, b1[i]); \
c2 = _mm_crc32_u64(c2, b2[i]); \
c3 = _mm_crc32_u64(c3, b3[i]); \
++i;
/* Compute chksum processing 8 bytes at a time */
static inline uint32_t crc32c_8s(const uint8_t *buf, uint32_t sz, uint32_t crc)
{
crc = crc32c_until_aligned(&buf, &sz, crc);
const uint8_t *end = buf + sz;
const uint64_t *b = (uint64_t *) buf;
const uint64_t *e = b + (sz / 8);
while (b < e) {
crc = _mm_crc32_u64(crc, *b);
++b;
}
buf = (uint8_t *) b;
intptr_t diff = end - buf;
int i = 0;
switch (diff) {
case 7: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 6: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 5: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 4: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 3: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 2: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 1: crc = _mm_crc32_u8(crc, buf[i]); ++i;
}
return crc;
}
/*
* Compute chksum processing 1024 bytes at a time and using
* lookup tables for recombination
*/
static uint32_t crc32c_sse(const uint8_t *buf, uint32_t sz, uint32_t crc)
{
crc = crc32c_until_aligned(&buf, &sz, crc);
uint32_t i = sz % 1024;
if (i) {
sz -= i;
crc = crc32c_8s(buf, i, crc);
buf += i;
i = 0;
}
while (i < sz) {
crc = crc32c_1024_sse_int(&buf[i], crc);
i += 1024;
}
return crc;
}
/*
* Compute chksum processing 3072 bytes at a time and using
* PCLMUL for recombination. Use SSE for processing input < 3K.
*/
static uint32_t crc32c_sse_pcl(const uint8_t *buf, uint32_t sz, uint32_t crc)
{
crc = crc32c_until_aligned(&buf, &sz, crc);
const uint64_t *b1, *b2, *b3;
uint64_t c1, c2, c3;
uint64_t out = crc;
v2di x1 = {0}, x2 = {0};
const v2di K = {0x1a0f717c4, 0x0170076fa};
while (sz >= _3K) {
b1 = (const uint64_t *) &buf[0];
b2 = (const uint64_t *) &buf[1024];
b3 = (const uint64_t *) &buf[2048];
c1 = out;
c2 = c3 = 0;
int i = 0;
REPEAT_127(THREESOME);
// Combine three results
x1[0] = _mm_crc32_u64(c1, b1[127]); // block 1 crc
x2[0] = _mm_crc32_u64(c2, b2[127]); // block 2 crc
x1 = _mm_clmulepi64_si128(x1, K, 0x00); // mul by K[0]
x2 = _mm_clmulepi64_si128(x2, K, 0x10); // mul by K[1]
x1 = _mm_xor_si128(x1, x2);
out = x1[0]; // boring scalar operations
out ^= b3[127];
out = _mm_crc32_u64(c3, out);
buf += _3K;
sz -= _3K;
}
if (sz) out = crc32c_sse(buf, sz, out);
return out;
}
/* Compute chksum 1 byte at a time until input is sizeof(intptr) aligned */
static inline
uint32_t crc32c_until_aligned(const uint8_t **buf_, uint32_t *sz_, uint32_t crc)
{
const uint8_t *buf = *buf_;
uint32_t sz = *sz_;
intptr_t misaligned = (intptr_t)buf & (sizeof(intptr_t) - 1);
unsigned adj = misaligned ? sizeof(intptr_t) - misaligned : 0;
if (adj > sz) adj = sz;
int i = 0;
switch (adj) {
case 7: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 6: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 5: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 4: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 3: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 2: crc = _mm_crc32_u8(crc, buf[i]); ++i;
case 1: crc = _mm_crc32_u8(crc, buf[i]); ++i;
sz -= adj;
*sz_ = sz;
*buf_ = buf + i;
}
return crc;
}
/* Compute chksum for 1024 bytes using SSE & recombine using lookup tables */
#include "crc32c_1024.h"
static inline uint32_t crc32c_1024_sse_int(const uint8_t *buf, uint32_t crc)
{
uint64_t c1, c2, c3, tmp;
const uint64_t *b8 = (const uint64_t *) buf;
const uint64_t *b1 = &b8[1];
const uint64_t *b2 = &b8[43];
const uint64_t *b3 = &b8[85];
c2 = c3 = 0;
c1 = _mm_crc32_u64(crc, b8[0]);
int i = 0;
REPEAT_42(THREESOME);
// merge in c2
tmp = b8[127];
tmp ^= mul_table1_336[c2 & 0xFF];
tmp ^= ((uint64_t) mul_table1_336[(c2 >> 8) & 0xFF]) << 8;
tmp ^= ((uint64_t) mul_table1_336[(c2 >> 16) & 0xFF]) << 16;
tmp ^= ((uint64_t) mul_table1_336[(c2 >> 24) & 0xFF]) << 24;
// merge in c1
tmp ^= mul_table1_672[c1 & 0xFF];
tmp ^= ((uint64_t) mul_table1_672[(c1 >> 8) & 0xFF]) << 8;
tmp ^= ((uint64_t) mul_table1_672[(c1 >> 16) & 0xFF]) << 16;
tmp ^= ((uint64_t) mul_table1_672[(c1 >> 24) & 0xFF]) << 24;
return _mm_crc32_u64(c3, tmp);
}
#endif // Intel only
#if defined(_HAS_CRC32_ARMV7) || defined(_HAS_CRC32_ARMV8)
#include <arm_acle.h>
#include <asm/hwcap.h>
#include <sys/auxv.h>
typedef uint32_t(*crc32c_t)(const uint8_t* data, uint32_t size, uint32_t crc);
static crc32c_t crc32c_func;
/* compute chksum for a small (<8) number of items word then half word then byte
doing loop is more expensive then chunking by word, half, and byte
while (sz >= sizeof(const uint8_t)) {
crc = __crc32cb(crc, *buf++);
sz -= 1;
}
*/
static inline uint32_t crc32c_process_small_arm(const uint8_t **buf_, uint32_t *totsz_, uint32_t sz, uint32_t crc)
{
const uint8_t *buf = *buf_;
if (sz & sizeof(uint32_t)) {
crc = __crc32cw(crc, *(const uint32_t *)buf);
buf += sizeof(uint32_t);
}
if (sz & sizeof(uint16_t)) {
crc = __crc32ch(crc, *(const uint16_t *)buf);
buf += sizeof(uint16_t);
}
if (sz & sizeof(const uint8_t)) {
crc = __crc32cb(crc, *buf);
}
*buf_ = buf + sz;
*totsz_ -= sz;
return crc;
}
/* Compute chksum until input is sizeof(intptr) aligned */
static inline uint32_t crc32c_until_aligned_arm(const uint8_t **buf_, uint32_t *sz_, uint32_t crc)
{
const uint8_t *buf = *buf_;
uint32_t sz = *sz_;
intptr_t misaligned = (intptr_t)buf & (sizeof(intptr_t) - 1);
unsigned adj = misaligned ? sizeof(intptr_t) - misaligned : 0;
if (adj > sz) adj = sz;
return crc32c_process_small_arm(buf_, sz_, adj, crc);
}
static inline uint32_t crc32c_arm(const uint8_t* buf, uint32_t sz, uint32_t crc)
{
// If we will need to process long buffers aligned we
// should call it here: crc32c_until_aligned_arm(&buf, &sz, crc);
while (sz >= sizeof(uint64_t)) {
crc = __crc32cd(crc, *(const uint64_t *)buf);
sz -= sizeof(uint64_t);
buf += sizeof(uint64_t);
}
return crc32c_process_small_arm(&buf, &sz, sz, crc);
}
void crc32c_init(int v)
{
#if defined(_HAS_CRC32_ARMV7)
int en = getauxval(AT_HWCAP) & HWCAP2_CRC32;
#else
int en = getauxval(AT_HWCAP) & HWCAP_CRC32;
#endif
if (en) {
crc32c_func = crc32c_arm;
if (v) {
logmsg(LOGMSG_INFO, "ARM HW SUPPORT FOR CRC32C\n");
}
} else {
crc32c_func = crc32c_software;
if (v) {
logmsg(LOGMSG_INFO, "NO HARDWARE SUPPORT FOR CRC32C\n");
logmsg(LOGMSG_INFO, "crc32c = crc32c_software\n");
}
}
}
uint32_t crc32c_comdb2(const uint8_t* buf, uint32_t sz)
{
return crc32c_func(buf, sz, CRC32C_SEED);
}
#endif
#if TEST_CRC32C
int logmsg(loglvl lvl, const char *fmt, ...) {
va_list args;
va_start(args, fmt);
int ret = printf(fmt, args);
va_end(args);
return ret;
}
#include <assert.h>
#include <sys/time.h>
void timediff(const char * s) {
static struct timeval tv;
struct timeval tmp;
gettimeofday(&tmp, NULL);
int sec = (tmp.tv_sec - tv.tv_sec)*1000000;
int usec = (tmp.tv_usec - tv.tv_usec);
if (tv.tv_sec)
printf("%20.20s diff = %12.dusec\n", s, sec + usec);
tv = tmp;
}
__attribute__((noinline)) int f(int a)
{
return a;
}
int main()
{
crc32c_init(1);
printf("Test that NULL does not crash hw %d", crc32c_comdb2(NULL, 0)); printf("...check\n");
printf("'' sw 0x%x hw 0x%x", crc32c_software((const uint8_t*)"", 0, 0), crc32c_comdb2((const uint8_t*)"", 0));
if (crc32c_comdb2((const uint8_t*)"", 0) != 0x0) {
printf("...crc2c for '' not correct\n");
exit(1);
} else
printf("...check\n");
printf("'a' sw 0x%x hw 0x%x", crc32c_software((const uint8_t*)"a", 1, 0), crc32c_comdb2((const uint8_t*)"a", 1));
if (crc32c_comdb2((const uint8_t*)"a", 1) != 0x93ad1061) {
printf("...crc2c for 'a' not correct\n");
exit(1);
} else
printf("...check\n");
#define MAXLEN 1<<15
uint8_t lbuf[MAXLEN];
int i;
lbuf[0] = 'a';
for(i = 1; i < MAXLEN; i++)
lbuf[i] = i;
timediff("start");
for(i = 1; i < MAXLEN; i++) {
int a = crc32c_software(lbuf, i, 0);
f(a);
}
timediff("software: ");
for(i = 1; i < MAXLEN; i++) {
int a = crc32c_comdb2(lbuf, i);
f(a);
}
timediff("hardware: ");
for(i = 1; i < MAXLEN; i++) {
assert(crc32c_software(lbuf, i, 0) == crc32c_comdb2(lbuf, i));
}
printf("successfully tested %d strings\n", i);
return 0;
}
#endif
| 7,007 |
307 | package se.jiderhamn.classloader.leak.prevention.cleanup;
/**
* Test case for {@link ShutdownHookCleanUp}
* @author <NAME>
*/
public class ShutdownHookCleanUpTest extends ClassLoaderPreMortemCleanUpTestBase<ShutdownHookCleanUp> {
@Override
protected void triggerLeak() throws Exception {
Runtime.getRuntime().addShutdownHook(new ShutdownHookThread());
}
/** Dummy shutdown hook */
private class ShutdownHookThread extends Thread {
}
} | 143 |
1,088 | /* Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef GRAPHLEARN_COMMON_THREADING_LOCKFREE_LOCKFREE_STACK_H_
#define GRAPHLEARN_COMMON_THREADING_LOCKFREE_LOCKFREE_STACK_H_
#include <algorithm>
#include <vector>
#include "graphlearn/common/threading/atomic/atomic.h"
#include "graphlearn/common/threading/lockfree/lockfree_detail.h"
namespace graphlearn {
template<typename T>
class LockFreeStack {
struct Node {
uint32_t mNext;
uint32_t mNextFreeNode;
T mValue;
};
public:
explicit LockFreeStack(size_t size = 65536);
~LockFreeStack();
bool Push(const T& elem);
bool Pop(T* elem);
size_t Size() const;
bool Empty() const;
private:
size_t Normalize(size_t size) const;
inline int32_t GetIndex(Node* node) const;
bool AcquireNode(Node** node);
void ReleaseNode(Node* node);
private:
static const uint64_t kNullPointer = 0xFFFFFFFF00000000;
size_t mLimit;
size_t mSize;
__attribute__((aligned(64))) Node* mNodeBuffer;
__attribute__((aligned(64))) uint64_t mHead;
__attribute__((aligned(64))) uint64_t mFreeNodeHead;
};
template<typename T>
LockFreeStack<T>::LockFreeStack(size_t size) {
mLimit = Normalize(size);
mSize = 0;
mFreeNodeHead = kNullPointer;
mNodeBuffer = new Node[mLimit];
std::vector<size_t> index;
index.reserve(mLimit);
for (size_t k = 0; k < mLimit; ++k) {
index.push_back(k);
}
std::random_shuffle(index.begin(), index.end());
for (size_t k = 0; k < mLimit; ++k) {
Node& node = mNodeBuffer[index[k]];
node.mNext = 0;
node.mNextFreeNode = detail::kLockFreeNullPointer;
ReleaseNode(&node);
}
mHead = kNullPointer;
}
template<typename T>
LockFreeStack<T>::~LockFreeStack() {
delete[] mNodeBuffer;
}
template<typename T>
bool LockFreeStack<T>::Push(const T& elem) {
Node *node = 0;
if (!AcquireNode(&node)) {
return false;
}
node->mValue = elem;
detail::Pointer head;
uint32_t index = GetIndex(node);
detail::Pointer newhead;
do {
head = mHead;
node->mNext = detail::Index(head);
newhead = detail::MakePointer(index, detail::Tag(head) + 1);
} while (!AtomicCompareExchange(&mHead, newhead, head));
AtomicIncrement(&mSize);
return true;
}
template<typename T>
bool LockFreeStack<T>::Pop(T* elem) {
detail::Pointer head;
detail::Pointer newhead;
Node* node = 0;
do {
head = mHead;
size_t index = detail::Index(head);
if (index == detail::kLockFreeNullPointer) {
return false;
}
node = &mNodeBuffer[index];
newhead = detail::MakePointer(node->mNext, detail::Tag(head) + 1);
} while (!AtomicCompareExchange(&mHead, newhead, head));
*elem = node->mValue;
ReleaseNode(node);
AtomicDecrement(&mSize);
return true;
}
template<typename T>
size_t LockFreeStack<T>::Size() const {
return AtomicGet(&mSize);
}
template<typename T>
bool LockFreeStack<T>::Empty() const {
return Size() == 0;
}
template<typename T>
size_t LockFreeStack<T>::Normalize(size_t size) const {
if (size == 0 || size >= (1 << 24)) {
::abort();
}
return size;
}
template<typename T>
int32_t LockFreeStack<T>::GetIndex(Node* node) const {
return node - mNodeBuffer;
}
template<typename T>
bool LockFreeStack<T>::AcquireNode(Node** node) {
Node *pnode;
uint64_t head;
uint64_t newhead;
do {
head = mFreeNodeHead;
uint32_t index = detail::Index(head);
if (__builtin_expect(index == detail::kLockFreeNullPointer, 0)) {
return false;
}
pnode = &mNodeBuffer[index];
uint32_t tag = detail::Tag(head) + 1;
newhead = detail::MakePointer(pnode->mNextFreeNode, tag);
} while (!AtomicCompareExchange(&mFreeNodeHead, newhead, head));
*node = pnode;
return true;
}
template<typename T>
void LockFreeStack<T>::ReleaseNode(Node* node) {
// put this node into the head atomically
node->mValue = T();
uint64_t head;
uint64_t newhead;
do {
head = mFreeNodeHead;
newhead = detail::MakePointer(GetIndex(node),
detail::Tag(head) + 1);
node->mNextFreeNode = detail::Index(head);
} while (!AtomicCompareExchange(&mFreeNodeHead, newhead, head));
}
} // namespace graphlearn
#endif // GRAPHLEARN_COMMON_THREADING_LOCKFREE_LOCKFREE_STACK_H_
| 1,792 |
1,192 | //===-- llvm/CodeGen/AllocationOrder.cpp - Allocation Order ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an allocation order for virtual registers.
//
// The preferred allocation order for a virtual register depends on allocation
// hints and target hooks. The AllocationOrder class encapsulates all of that.
//
//===----------------------------------------------------------------------===//
#include "AllocationOrder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "regalloc"
// Compare VirtRegMap::getRegAllocPref().
AllocationOrder::AllocationOrder(unsigned VirtReg,
const VirtRegMap &VRM,
const RegisterClassInfo &RegClassInfo)
: Pos(0) {
const MachineFunction &MF = VRM.getMachineFunction();
const TargetRegisterInfo *TRI = &VRM.getTargetRegInfo();
Order = RegClassInfo.getOrder(MF.getRegInfo().getRegClass(VirtReg));
TRI->getRegAllocationHints(VirtReg, Order, Hints, MF, &VRM);
rewind();
DEBUG({
if (!Hints.empty()) {
dbgs() << "hints:";
for (unsigned I = 0, E = Hints.size(); I != E; ++I)
dbgs() << ' ' << PrintReg(Hints[I], TRI);
dbgs() << '\n';
}
});
#ifndef NDEBUG
for (unsigned I = 0, E = Hints.size(); I != E; ++I)
assert(std::find(Order.begin(), Order.end(), Hints[I]) != Order.end() &&
"Target hint is outside allocation order.");
#endif
}
| 665 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.