max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
2,151 |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_THIRD_PARTY_HTTP2_PLATFORM_IMPL_HTTP2_RECONSTRUCT_OBJECT_IMPL_H_
#define NET_THIRD_PARTY_HTTP2_PLATFORM_IMPL_HTTP2_RECONSTRUCT_OBJECT_IMPL_H_
#include <utility>
namespace http2 {
namespace test {
class RandomBase;
// Reconstruct an object so that it is initialized as when it was first
// constructed. Runs the destructor to handle objects that might own resources,
// and runs the constructor with the provided arguments, if any.
template <class T, class... Args>
void Http2ReconstructObjectImpl(T* ptr, RandomBase* rng, Args&&... args) {
ptr->~T();
::new (ptr) T(std::forward<Args>(args)...);
}
// This version applies default-initialization to the object.
template <class T>
void Http2DefaultReconstructObjectImpl(T* ptr, RandomBase* rng) {
ptr->~T();
::new (ptr) T;
}
} // namespace test
} // namespace http2
#endif // NET_THIRD_PARTY_HTTP2_PLATFORM_IMPL_HTTP2_RECONSTRUCT_OBJECT_IMPL_H_
| 369 |
8,772 |
<gh_stars>1000+
package org.apereo.cas.qr.web.flow;
import org.apereo.cas.config.QRAuthenticationConfiguration;
import org.apereo.cas.config.TokenCoreConfiguration;
import org.apereo.cas.web.flow.BaseWebflowConfigurerTests;
import org.apereo.cas.web.flow.CasWebflowConfigurer;
import lombok.val;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.springframework.context.annotation.Import;
import org.springframework.webflow.engine.Flow;
import static org.junit.jupiter.api.Assertions.*;
/**
* This is {@link QRAuthenticationWebflowConfigurerTests}.
*
* @author <NAME>
* @since 6.3.0
*/
@Import({
QRAuthenticationConfiguration.class,
TokenCoreConfiguration.class
})
@Tag("WebflowConfig")
public class QRAuthenticationWebflowConfigurerTests extends BaseWebflowConfigurerTests {
@Test
public void verifyOperation() {
assertFalse(casWebflowExecutionPlan.getWebflowConfigurers().isEmpty());
val flow = (Flow) this.loginFlowDefinitionRegistry.getFlowDefinition(CasWebflowConfigurer.FLOW_ID_LOGIN);
assertNotNull(flow);
assertTrue(flow.containsState(QRAuthenticationWebflowConfigurer.STATE_ID_VALIDATE_QR_TOKEN));
}
}
| 433 |
2,497 |
#!/usr/bin/env python3
import ptan
import gym
import argparse
import numpy as np
from lib import common
import torch
import torch.nn.functional as F
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", required=True, help="Model file name")
parser.add_argument("-w", "--write", required=True, help="Monitor directory name")
parser.add_argument("--cuda", default=False, action="store_true")
parser.add_argument("--seed", type=int, default=0, help="Random seed")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
torch.manual_seed(args.seed)
np.random.seed(args.seed)
make_env = lambda: ptan.common.wrappers.wrap_dqn(gym.make("BreakoutNoFrameskip-v4"),
stack_frames=common.FRAMES_COUNT,
episodic_life=False, reward_clipping=False)
env = make_env()
env = gym.wrappers.Monitor(env, args.write)
net = common.AtariA2C(env.observation_space.shape, env.action_space.n)
net.load_state_dict(torch.load(args.model, map_location=lambda storage, loc: storage))
if args.cuda:
net.cuda()
act_selector = ptan.actions.ProbabilityActionSelector()
obs = env.reset()
total_reward = 0.0
total_steps = 0
while True:
obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
logits_v, values_v = net(obs_v)
probs_v = F.softmax(logits_v)
probs = probs_v.data.cpu().numpy()
actions = act_selector(probs)
obs, r, done, _ = env.step(actions[0])
total_reward += r
total_steps += 1
if done:
break
print("Done in %d steps, reward %.2f" % (total_steps, total_reward))
| 804 |
1,350 |
<reponame>Shashi-rk/azure-sdk-for-java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.extendedlocation.generated;
import com.azure.core.util.Context;
/** Samples for CustomLocations ListOperations. */
public final class CustomLocationsListOperationsSamples {
/*
* x-ms-original-file: specification/extendedlocation/resource-manager/Microsoft.ExtendedLocation/stable/2021-08-15/examples/CustomLocationsListOperations.json
*/
/**
* Sample code: List Custom Locations operations.
*
* @param manager Entry point to CustomLocationsManager.
*/
public static void listCustomLocationsOperations(
com.azure.resourcemanager.extendedlocation.CustomLocationsManager manager) {
manager.customLocations().listOperations(Context.NONE);
}
}
| 294 |
303 |
<reponame>DannyParker0001/Kisak-Strike
// See Copyright Notice in gmMachine.h
#ifndef _timer_h
#define _timer_h
#include <wtypes.h>
class Timer
{
public:
Timer() : resetTimer(true), dTime(0.0f) {}
~Timer() {}
bool Init();
void Reset() { resetTimer = true; }
float Tick();
float GetDelta() { return dTime; }
private:
LARGE_INTEGER timerFrequency, lastTime, thisTime;
float dTime, oof;
bool resetTimer;
};
#endif //_timer_h
| 172 |
1,738 |
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// Original file Copyright Crytek GMBH or its affiliates, used under license.
#ifndef CRYINCLUDE_EDITOR_MATEDITPREVIEWDLG_H
#define CRYINCLUDE_EDITOR_MATEDITPREVIEWDLG_H
#pragma once
#include <QDialog>
#include <QScopedPointer>
#include "IDataBaseManager.h"
class MaterialPreviewModelView;
class QMenuBar;
// MatEditPreviewDlg.h : header file
//
/////////////////////////////////////////////////////////////////////////////
// CMatEditPreviewDlg dialog
class CMatEditPreviewDlg
: public QDialog
, public IDataBaseManagerListener
{
Q_OBJECT
// Construction
public:
CMatEditPreviewDlg(QWidget* parent); // standard constructor
~CMatEditPreviewDlg();
QSize sizeHint() const override;
void showEvent(QShowEvent*) override;
//Functions
virtual void OnDataBaseItemEvent(IDataBaseItem* pItem, EDataBaseItemEvent event);
protected:
void SetupMenuBar();
private slots:
void OnPreviewSphere();
void OnPreviewPlane();
void OnPreviewBox();
void OnPreviewTeapot();
void OnPreviewCustom();
private:
QScopedPointer<MaterialPreviewModelView> m_previewCtrl;
QScopedPointer<QMenuBar> m_menubar;
};
#endif // CRYINCLUDE_EDITOR_MATEDITPREVIEWDLG_H
| 562 |
303 |
import pytest
from redun.scheduler import Scheduler
@pytest.fixture
def scheduler() -> Scheduler:
"""
Returns an in memory Scheduler for testing.
"""
return Scheduler()
| 64 |
460 |
package de.saxsys.mvvmfx.scopes.example1.views;
import de.saxsys.mvvmfx.InjectScope;
import de.saxsys.mvvmfx.ViewModel;
import de.saxsys.mvvmfx.scopes.example1.Example1Scope1;
import de.saxsys.mvvmfx.scopes.example1.Example1Scope2;
public class ScopedViewModelD implements ViewModel {
@InjectScope
public Example1Scope1 injectedScope1;
@InjectScope
public Example1Scope2 injectedScope2;
}
| 161 |
575 |
<filename>chrome/browser/android/autofill_assistant/generic_ui_interactions_android.h
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_ANDROID_AUTOFILL_ASSISTANT_GENERIC_UI_INTERACTIONS_ANDROID_H_
#define CHROME_BROWSER_ANDROID_AUTOFILL_ASSISTANT_GENERIC_UI_INTERACTIONS_ANDROID_H_
#include <string>
#include "base/android/jni_android.h"
#include "base/android/scoped_java_ref.h"
#include "base/memory/weak_ptr.h"
#include "chrome/browser/android/autofill_assistant/interaction_handler_android.h"
#include "components/autofill_assistant/browser/basic_interactions.h"
#include "components/autofill_assistant/browser/generic_ui.pb.h"
namespace autofill_assistant {
class RadioButtonController;
class ViewHandlerAndroid;
namespace android_interactions {
// Writes a value to the model.
void SetValue(base::WeakPtr<BasicInteractions> basic_interactions,
const SetModelValueProto& proto);
// Computes a value and writes it to the model.
void ComputeValue(base::WeakPtr<BasicInteractions> basic_interactions,
const ComputeValueProto& proto);
// Sets the list of available user actions (i.e., chips and direct actions).
void SetUserActions(base::WeakPtr<BasicInteractions> basic_interactions,
const SetUserActionsProto& proto);
// Ends the current ShowGenericUi action.
void EndAction(base::WeakPtr<BasicInteractions> basic_interactions,
const EndActionProto& proto);
// Enables or disables a particular user action.
void ToggleUserAction(base::WeakPtr<BasicInteractions> basic_interactions,
const ToggleUserActionProto& proto);
// Displays an info popup on the screen.
void ShowInfoPopup(const InfoPopupProto& proto,
base::android::ScopedJavaGlobalRef<jobject> jcontext);
// Displays a list popup on the screen.
void ShowListPopup(base::WeakPtr<UserModel> user_model,
const ShowListPopupProto& proto,
base::android::ScopedJavaGlobalRef<jobject> jcontext,
base::android::ScopedJavaGlobalRef<jobject> jdelegate);
// Displays a calendar popup on the screen.
void ShowCalendarPopup(base::WeakPtr<UserModel> user_model,
const ShowCalendarPopupProto& proto,
base::android::ScopedJavaGlobalRef<jobject> jcontext,
base::android::ScopedJavaGlobalRef<jobject> jdelegate);
// Displays a generic popup on the screen.
void ShowGenericPopup(const ShowGenericUiPopupProto& proto,
base::android::ScopedJavaGlobalRef<jobject> jcontent_view,
base::android::ScopedJavaGlobalRef<jobject> jcontext,
base::android::ScopedJavaGlobalRef<jobject> jdelegate);
// Sets the text of a view.
void SetViewText(base::WeakPtr<UserModel> user_model,
const SetTextProto& proto,
ViewHandlerAndroid* view_handler,
base::android::ScopedJavaGlobalRef<jobject> jdelegate);
// Sets the visibility of a view.
void SetViewVisibility(base::WeakPtr<UserModel> user_model,
const SetViewVisibilityProto& proto,
ViewHandlerAndroid* view_handler);
// Enables or disables a view.
void SetViewEnabled(base::WeakPtr<UserModel> user_model,
const SetViewEnabledProto& proto,
ViewHandlerAndroid* view_handler);
// A simple wrapper around a basic interaction, needed because we can't directly
// bind a repeating callback to a method with non-void return value.
void RunConditionalCallback(
base::WeakPtr<BasicInteractions> basic_interactions,
const std::string& condition_identifier,
InteractionHandlerAndroid::InteractionCallback callback);
// Sets the checked state of a toggle button.
void SetToggleButtonChecked(base::WeakPtr<UserModel> user_model,
const std::string& view_identifier,
const std::string& model_identifier,
ViewHandlerAndroid* view_handler);
// Removes all child views from |view_identifier|.
void ClearViewContainer(const std::string& view_identifier,
ViewHandlerAndroid* view_handler,
base::android::ScopedJavaGlobalRef<jobject> jdelegate);
// Attaches |jview| to a parent view.
bool AttachViewToParent(base::android::ScopedJavaGlobalRef<jobject> jview,
const std::string& parent_view_identifier,
ViewHandlerAndroid* view_handler);
void UpdateRadioButtonGroup(
base::WeakPtr<RadioButtonController> radio_button_controller,
const std::string& radio_group,
const std::string& model_identifier);
} // namespace android_interactions
} // namespace autofill_assistant
#endif // CHROME_BROWSER_ANDROID_AUTOFILL_ASSISTANT_GENERIC_UI_INTERACTIONS_ANDROID_H_
| 1,945 |
2,174 |
package ceui.lisa.models;
import java.util.List;
public class ProfilePresetsBean {
private ImageUrlsBean default_profile_image_urls;
private List<AddressesBean> addresses;
private List<CountriesBean> countries;
private List<JobsBean> jobs;
public ImageUrlsBean getDefault_profile_image_urls() {
return default_profile_image_urls;
}
public void setDefault_profile_image_urls(ImageUrlsBean default_profile_image_urls) {
this.default_profile_image_urls = default_profile_image_urls;
}
public List<AddressesBean> getAddresses() {
return addresses;
}
public void setAddresses(List<AddressesBean> addresses) {
this.addresses = addresses;
}
public List<CountriesBean> getCountries() {
return countries;
}
public void setCountries(List<CountriesBean> countries) {
this.countries = countries;
}
public List<JobsBean> getJobs() {
return jobs;
}
public void setJobs(List<JobsBean> jobs) {
this.jobs = jobs;
}
}
| 419 |
2,151 |
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// http://code.google.com/p/protobuf/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: <EMAIL> (<NAME>)
// Based on original Protocol Buffers design by
// <NAME>, <NAME>, and others.
#include <map>
#include <string>
#include <google/protobuf/compiler/javamicro/javamicro_enum_field.h>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/compiler/javamicro/javamicro_helpers.h>
#include <google/protobuf/io/printer.h>
#include <google/protobuf/wire_format.h>
#include <google/protobuf/stubs/strutil.h>
namespace google {
namespace protobuf {
namespace compiler {
namespace javamicro {
namespace {
// TODO(kenton): Factor out a "SetCommonFieldVariables()" to get rid of
// repeat code between this and the other field types.
void SetEnumVariables(const Params& params,
const FieldDescriptor* descriptor, map<string, string>* variables) {
(*variables)["name"] =
UnderscoresToCamelCase(descriptor);
(*variables)["capitalized_name"] =
UnderscoresToCapitalizedCamelCase(descriptor);
(*variables)["number"] = SimpleItoa(descriptor->number());
(*variables)["type"] = "int";
(*variables)["default"] = DefaultValue(params, descriptor);
(*variables)["tag"] = SimpleItoa(internal::WireFormat::MakeTag(descriptor));
(*variables)["tag_size"] = SimpleItoa(
internal::WireFormat::TagSize(descriptor->number(), descriptor->type()));
(*variables)["message_name"] = descriptor->containing_type()->name();
}
} // namespace
// ===================================================================
EnumFieldGenerator::
EnumFieldGenerator(const FieldDescriptor* descriptor, const Params& params)
: FieldGenerator(params), descriptor_(descriptor) {
SetEnumVariables(params, descriptor, &variables_);
}
EnumFieldGenerator::~EnumFieldGenerator() {}
void EnumFieldGenerator::
GenerateMembers(io::Printer* printer) const {
printer->Print(variables_,
"private boolean has$capitalized_name$;\n"
"private int $name$_ = $default$;\n"
"public boolean has$capitalized_name$() { return has$capitalized_name$; }\n"
"public int get$capitalized_name$() { return $name$_; }\n"
"public $message_name$ set$capitalized_name$(int value) {\n"
" has$capitalized_name$ = true;\n"
" $name$_ = value;\n"
" return this;\n"
"}\n"
"public $message_name$ clear$capitalized_name$() {\n"
" has$capitalized_name$ = false;\n"
" $name$_ = $default$;\n"
" return this;\n"
"}\n");
}
void EnumFieldGenerator::
GenerateMergingCode(io::Printer* printer) const {
printer->Print(variables_,
"what is other??"
"if (other.has$capitalized_name$()) {\n"
" set$capitalized_name$(other.get$capitalized_name$());\n"
"}\n");
}
void EnumFieldGenerator::
GenerateParsingCode(io::Printer* printer) const {
printer->Print(variables_,
" set$capitalized_name$(input.readInt32());\n");
}
void EnumFieldGenerator::
GenerateSerializationCode(io::Printer* printer) const {
printer->Print(variables_,
"if (has$capitalized_name$()) {\n"
" output.writeInt32($number$, get$capitalized_name$());\n"
"}\n");
}
void EnumFieldGenerator::
GenerateSerializedSizeCode(io::Printer* printer) const {
printer->Print(variables_,
"if (has$capitalized_name$()) {\n"
" size += com.google.protobuf.micro.CodedOutputStreamMicro\n"
" .computeInt32Size($number$, get$capitalized_name$());\n"
"}\n");
}
string EnumFieldGenerator::GetBoxedType() const {
return ClassName(params_, descriptor_->enum_type());
}
// ===================================================================
RepeatedEnumFieldGenerator::
RepeatedEnumFieldGenerator(const FieldDescriptor* descriptor, const Params& params)
: FieldGenerator(params), descriptor_(descriptor) {
SetEnumVariables(params, descriptor, &variables_);
if (descriptor_->options().packed()) {
GOOGLE_LOG(FATAL) << "MicroRuntime does not support packed";
}
}
RepeatedEnumFieldGenerator::~RepeatedEnumFieldGenerator() {}
void RepeatedEnumFieldGenerator::
GenerateMembers(io::Printer* printer) const {
if (params_.java_use_vector()) {
printer->Print(variables_,
"private java.util.Vector $name$_ = new java.util.Vector();\n"
"public java.util.Vector get$capitalized_name$List() {\n"
" return $name$_;\n"
"}\n"
"public int get$capitalized_name$Count() { return $name$_.size(); }\n"
"public int get$capitalized_name$(int index) {\n"
" return ((Integer)$name$_.elementAt(index)).intValue();\n"
"}\n"
"public $message_name$ set$capitalized_name$(int index, int value) {\n"
" $name$_.setElementAt(new Integer(value), index);\n"
" return this;\n"
"}\n"
"public $message_name$ add$capitalized_name$(int value) {\n"
" $name$_.addElement(new Integer(value));\n"
" return this;\n"
"}\n"
"public $message_name$ clear$capitalized_name$() {\n"
" $name$_.removeAllElements();\n"
" return this;\n"
"}\n");
} else {
printer->Print(variables_,
"private java.util.List<Integer> $name$_ =\n"
" java.util.Collections.emptyList();\n"
"public java.util.List<Integer> get$capitalized_name$List() {\n"
" return $name$_;\n" // note: unmodifiable list
"}\n"
"public int get$capitalized_name$Count() { return $name$_.size(); }\n"
"public int get$capitalized_name$(int index) {\n"
" return $name$_.get(index);\n"
"}\n"
"public $message_name$ set$capitalized_name$(int index, int value) {\n"
" $name$_.set(index, value);\n"
" return this;\n"
"}\n"
"public $message_name$ add$capitalized_name$(int value) {\n"
" if ($name$_.isEmpty()) {\n"
" $name$_ = new java.util.ArrayList<java.lang.Integer>();\n"
" }\n"
" $name$_.add(value);\n"
" return this;\n"
"}\n"
"public $message_name$ clear$capitalized_name$() {\n"
" $name$_ = java.util.Collections.emptyList();\n"
" return this;\n"
"}\n");
}
if (descriptor_->options().packed()) {
printer->Print(variables_,
"private int $name$MemoizedSerializedSize;\n");
}
}
void RepeatedEnumFieldGenerator::
GenerateMergingCode(io::Printer* printer) const {
if (params_.java_use_vector()) {
printer->Print(variables_,
"if (other.$name$_.size() != 0) {\n"
" for (int i = 0; i < other.$name$_.size(); i++)) {\n"
" result.$name$_.addElement(other.$name$_.elementAt(i));\n"
" }\n"
"}\n");
} else {
printer->Print(variables_,
"if (!other.$name$_.isEmpty()) {\n"
" if (result.$name$_.isEmpty()) {\n"
" result.$name$_ = new java.util.ArrayList<java.lang.Integer>();\n"
" }\n"
" result.$name$_.addAll(other.$name$_);\n"
"}\n");
}
}
void RepeatedEnumFieldGenerator::
GenerateParsingCode(io::Printer* printer) const {
// If packed, set up the while loop
if (descriptor_->options().packed()) {
printer->Print(variables_,
"int length = input.readRawVarint32();\n"
"int oldLimit = input.pushLimit(length);\n"
"while(input.getBytesUntilLimit() > 0) {\n");
printer->Indent();
}
// Read and store the enum
printer->Print(variables_,
" add$capitalized_name$(input.readInt32());\n");
if (descriptor_->options().packed()) {
printer->Outdent();
printer->Print(variables_,
"}\n"
"input.popLimit(oldLimit);\n");
}
}
void RepeatedEnumFieldGenerator::
GenerateSerializationCode(io::Printer* printer) const {
if (descriptor_->options().packed()) {
printer->Print(variables_,
"if (get$capitalized_name$List().size() > 0) {\n"
" output.writeRawVarint32($tag$);\n"
" output.writeRawVarint32($name$MemoizedSerializedSize);\n"
"}\n");
if (params_.java_use_vector()) {
printer->Print(variables_,
"for (int i = 0; i < get$capitalized_name$List().size(); i++) {\n"
" output.writeRawVarint32(get$capitalized_name$(i));\n"
"}\n");
} else {
printer->Print(variables_,
"for ($type$ element : get$capitalized_name$List()) {\n"
" output.writeRawVarint32(element.getNumber());\n"
"}\n");
}
} else {
if (params_.java_use_vector()) {
printer->Print(variables_,
"for (int i = 0; i < get$capitalized_name$List().size(); i++) {\n"
" output.writeInt32($number$, (int)get$capitalized_name$(i));\n"
"}\n");
} else {
printer->Print(variables_,
"for (java.lang.Integer element : get$capitalized_name$List()) {\n"
" output.writeInt32($number$, element);\n"
"}\n");
}
}
}
void RepeatedEnumFieldGenerator::
GenerateSerializedSizeCode(io::Printer* printer) const {
printer->Print(variables_,
"{\n"
" int dataSize = 0;\n");
printer->Indent();
if (params_.java_use_vector()) {
printer->Print(variables_,
"for (int i = 0; i < get$capitalized_name$List().size(); i++) {\n"
" dataSize += com.google.protobuf.micro.CodedOutputStreamMicro\n"
" .computeInt32SizeNoTag(get$capitalized_name$(i));\n"
"}\n");
} else {
printer->Print(variables_,
"for (java.lang.Integer element : get$capitalized_name$List()) {\n"
" dataSize += com.google.protobuf.micro.CodedOutputStreamMicro\n"
" .computeInt32SizeNoTag(element);\n"
"}\n");
}
printer->Print(
"size += dataSize;\n");
if (descriptor_->options().packed()) {
printer->Print(variables_,
"if (get$capitalized_name$List().size() != 0) {"
" size += $tag_size$;\n"
" size += com.google.protobuf.micro.CodedOutputStreamMicro\n"
" .computeRawVarint32Size(dataSize);\n"
"}");
} else {
printer->Print(variables_,
"size += $tag_size$ * get$capitalized_name$List().size();\n");
}
// cache the data size for packed fields.
if (descriptor_->options().packed()) {
printer->Print(variables_,
"$name$MemoizedSerializedSize = dataSize;\n");
}
printer->Outdent();
printer->Print("}\n");
}
string RepeatedEnumFieldGenerator::GetBoxedType() const {
return ClassName(params_, descriptor_->enum_type());
}
} // namespace javamicro
} // namespace compiler
} // namespace protobuf
} // namespace google
| 4,611 |
435 |
{
"copyright_text": "Standard YouTube License",
"description": "The session is about accessing environmental data over the Internet using the OpenDAP protocol and displaying it in a web browser using the Bokeh library. The session will include a presentation/demonstration of some Python 3 software (available from PyPI) that I've developed for this purpose. My main interest is in oceanographic data so that will be the focus of the talk (though any environmental data could be used). The talk will cover the increased availability of environmental data via the Internet, the suitability of Python for accessing and processing the data, and using the Bokeh library for displaying the data. Bokeh allows for excellent interactive displays in a web browser driven by Python, and the talk will focus on the benefits I think this provides.",
"duration": 1345,
"language": "eng",
"recorded": "2017-10-27T16:30:00+01:00",
"related_urls": [
{
"label": "event schedule",
"url": "http://2017.pyconuk.org/schedule/"
}
],
"speakers": [
"<NAME>"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/WlzhZSsOfWQ/hqdefault.jpg",
"title": "Visualising environmental data with Python and Bokeh",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=WlzhZSsOfWQ"
}
]
}
| 410 |
839 |
<filename>systests/ws-rm/src/test/java/org/apache/cxf/systest/ws/rm/ProtocolVariationsTest.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.systest.ws.rm;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.logging.Logger;
import javax.xml.transform.dom.DOMSource;
import javax.xml.ws.Dispatch;
import javax.xml.ws.Endpoint;
import org.apache.cxf.Bus;
import org.apache.cxf.BusFactory;
import org.apache.cxf.binding.soap.SoapFault;
import org.apache.cxf.bus.spring.SpringBusFactory;
import org.apache.cxf.common.logging.LogUtils;
import org.apache.cxf.endpoint.Client;
import org.apache.cxf.frontend.ClientProxy;
import org.apache.cxf.greeter_control.Control;
import org.apache.cxf.greeter_control.ControlService;
import org.apache.cxf.greeter_control.Greeter;
import org.apache.cxf.greeter_control.GreeterService;
import org.apache.cxf.interceptor.transform.TransformOutInterceptor;
import org.apache.cxf.jaxws.DispatchImpl;
import org.apache.cxf.systest.ws.util.ConnectionHelper;
import org.apache.cxf.systest.ws.util.MessageFlow;
import org.apache.cxf.testutil.common.AbstractBusClientServerTestBase;
import org.apache.cxf.testutil.common.AbstractBusTestServerBase;
import org.apache.cxf.testutil.recorders.InMessageRecorder;
import org.apache.cxf.testutil.recorders.MessageRecorder;
import org.apache.cxf.testutil.recorders.OutMessageRecorder;
import org.apache.cxf.transport.http.HTTPConduit;
import org.apache.cxf.transports.http.configuration.HTTPClientPolicy;
import org.apache.cxf.ws.addressing.Names;
import org.apache.cxf.ws.addressing.VersionTransformer.Names200408;
import org.apache.cxf.ws.rm.RM10Constants;
import org.apache.cxf.ws.rm.RM11Constants;
import org.apache.cxf.ws.rm.RMConstants;
import org.apache.cxf.ws.rm.RMException;
import org.apache.cxf.ws.rm.RMManager;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Tests control of WS-RM protocol variations on the client, and of the server responses matching whichever
* variation is used by the client.
*/
public class ProtocolVariationsTest extends AbstractBusClientServerTestBase {
public static final String PORT = allocatePort(ProtocolVariationsTest.class);
private static final Logger LOG = LogUtils.getLogger(ProtocolVariationsTest.class);
private static final String GREETME_ACTION
= "http://cxf.apache.org/greeter_control/Greeter/greetMeRequest";
private static final String GREETME_RESPONSE_ACTION
= "http://cxf.apache.org/greeter_control/Greeter/greetMeResponse";
private static final String GREETME_ONEWAY_ACTION
= "http://cxf.apache.org/greeter_control/Greeter/greetMeOneWayRequest";
private static String decoupledEndpoint;
private static int decoupledCount = 1;
private Bus controlBus;
private Control control;
private Bus greeterBus;
private Greeter greeter;
private OutMessageRecorder outRecorder;
private InMessageRecorder inRecorder;
private Dispatch<DOMSource> dispatch;
public static class Server extends AbstractBusTestServerBase {
Endpoint ep;
protected void run() {
SpringBusFactory factory = new SpringBusFactory();
Bus bus = factory.createBus();
BusFactory.setDefaultBus(bus);
setBus(bus);
//System.out.println("Created control bus " + bus);
ControlImpl implementor = new ControlImpl();
implementor.setDbName("pvt-server");
implementor.setAddress("http://localhost:" + PORT + "/SoapContext/GreeterPort");
GreeterImpl greeterImplementor = new GreeterImpl();
implementor.setImplementor(greeterImplementor);
ep = Endpoint.publish("http://localhost:" + PORT + "/SoapContext/ControlPort", implementor);
BusFactory.setDefaultBus(null);
BusFactory.setThreadDefaultBus(null);
}
public void tearDown() {
ep.stop();
ep = null;
}
}
@BeforeClass
public static void startServers() throws Exception {
assertTrue("server did not launch correctly", launchServer(Server.class, true));
}
@After
public void tearDown() throws Exception {
try {
stopClient();
stopControl();
} catch (Throwable t) {
//ignore
}
Thread.sleep(100);
}
@Test
public void testDefault() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names200408.WSA_NAMESPACE_NAME, RM10Constants.INSTANCE);
}
@Test
public void testRM10WSA200408() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// same as default, but explicitly setting the WS-Addressing namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names200408.WSA_NAMESPACE_NAME);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names200408.WSA_NAMESPACE_NAME, RM10Constants.INSTANCE);
}
@Test
public void testRM10WSA15() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// WS-RM 1.0, but using the WS-A 1.0 namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names.WSA_NAMESPACE_NAME);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names.WSA_NAMESPACE_NAME, RM10Constants.INSTANCE);
}
@Test
public void testRM11() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// WS-RM 1.1 and WS-A 1.0
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM11Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names.WSA_NAMESPACE_NAME);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names.WSA_NAMESPACE_NAME, RM11Constants.INSTANCE);
}
@Test
public void testInvalidRM11WSA200408() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// WS-RM 1.1, but using the WS-A 1.0 namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM11Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names200408.WSA_NAMESPACE_NAME);
try {
greeter.greetMe("one");
fail("invalid namespace combination accepted");
} catch (Exception e) {
assertTrue(e.getCause() instanceof RMException);
// verify a partial error text match to exclude an unexpected exception
// (see UNSUPPORTED_NAMESPACE in Messages.properties)
final String text = Names200408.WSA_NAMESPACE_NAME + " is not supported";
assertTrue(e.getCause().getMessage() != null
&& e.getCause().getMessage().indexOf(text) >= 0);
}
}
@Test
public void testInvalidRM11WSA200408OnReceive() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// WS-RM 1.0 using the WS-A 1.0 namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM10Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names200408.WSA_NAMESPACE_NAME);
// rewrite the outgoing message's WS-RM namespace to 1.1
TransformOutInterceptor trans = new TransformOutInterceptor();
Map<String, String> outElements = new HashMap<>();
outElements.put("{" + RM10Constants.NAMESPACE_URI + "}*", "{" + RM11Constants.NAMESPACE_URI + "}*");
trans.setOutTransformElements(outElements);
client.getOutInterceptors().add(trans);
try {
greeter.greetMe("one");
fail("invalid namespace combination accepted");
} catch (Exception e) {
assertTrue(e.getCause() instanceof SoapFault);
// verify a partial error text match to exclude an unexpected exception
// (see WSRM_REQUIRED_EXC in Messages.properties)
final String text = "WS-ReliableMessaging is required";
assertTrue(e.getCause().getMessage() != null
&& e.getCause().getMessage().indexOf(text) >= 0);
}
}
@Test
public void testInvalidWSAOnReceive() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// WS-RM 1.0 using the WS-A 1.0 namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM10Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names200408.WSA_NAMESPACE_NAME);
// rewrite the outgoing message's WS-A namespace to an invalid one
TransformOutInterceptor trans = new TransformOutInterceptor();
Map<String, String> outElements = new HashMap<>();
outElements.put("{" + Names200408.WSA_NAMESPACE_NAME + "}*", "{http://cxf.apache.org/invalid}*");
trans.setOutTransformElements(outElements);
client.getOutInterceptors().add(trans);
try {
greeter.greetMe("one");
fail("invalid wsa header accepted");
} catch (Exception e) {
assertTrue(e.getCause() instanceof SoapFault);
// verify a partial error text match to exclude an unexpected exception
// (see WSA_REQUIRED_EXC in Messages.properties)
final String text = "WS-Addressing is required";
assertTrue(e.getCause().getMessage() != null
&& e.getCause().getMessage().indexOf(text) >= 0);
}
}
@Test
public void testInvalidWSRMMustUnderstandOnReceive() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// WS-RM 1.0 using the WS-A 1.0 namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM10Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names200408.WSA_NAMESPACE_NAME);
// rewrite the outgoing message's WS-RM namespace to an invalid one
TransformOutInterceptor trans = new TransformOutInterceptor();
Map<String, String> outElements = new HashMap<>();
outElements.put("{" + RM10Constants.NAMESPACE_URI + "}*", "{http://cxf.apache.org/invalid}*");
trans.setOutTransformElements(outElements);
client.getOutInterceptors().add(trans);
try {
greeter.greetMe("one");
fail("invalid wsrm header");
} catch (Exception e) {
assertTrue(e.getCause() instanceof SoapFault);
final String text = "WS-ReliableMessaging is required";
assertTrue(e.getCause().getMessage(), e.getCause().getMessage() != null
&& e.getCause().getMessage().indexOf(text) >= 0);
}
}
@Test
public void testInvalidWSRMOnReceive() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
// WS-RM 1.0 using the WS-A 1.0 namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM10Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names200408.WSA_NAMESPACE_NAME);
// remove the outgoing message's WS-RM header
TransformOutInterceptor trans = new TransformOutInterceptor();
Map<String, String> outElements = new HashMap<>();
outElements.put("{" + RM10Constants.NAMESPACE_URI + "}Sequence", "");
trans.setOutTransformElements(outElements);
client.getOutInterceptors().add(trans);
try {
greeter.greetMe("one");
fail("invalid wsrm header");
} catch (Exception e) {
assertTrue(e.getCause() instanceof SoapFault);
// verify a partial error text match to exclude an unexpected exception
// (see WSRM_REQUIRED_EXC in Messages.properties)
final String text = "WS-ReliableMessaging is required";
assertTrue(e.getCause().getMessage() != null
&& e.getCause().getMessage().indexOf(text) >= 0);
}
}
@Test
public void testDefaultDecoupled() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", true);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names200408.WSA_NAMESPACE_NAME, RM10Constants.INSTANCE);
}
@Test
public void testRM10WSA200408Decoupled() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", true);
// same as default, but explicitly setting the WS-Addressing namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names200408.WSA_NAMESPACE_NAME);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names200408.WSA_NAMESPACE_NAME, RM10Constants.INSTANCE);
}
@Test
public void testRM10WSA15Decoupled() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", true);
// WS-RM 1.0, but using the WS-A 1.0 namespace
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names.WSA_NAMESPACE_NAME);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names.WSA_NAMESPACE_NAME, RM10Constants.INSTANCE);
}
@Test
public void testRM11Decoupled() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", true);
// WS-RM 1.1 and WS-A 1.0
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM11Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names.WSA_NAMESPACE_NAME);
assertEquals("ONE", greeter.greetMe("one"));
assertEquals("TWO", greeter.greetMe("two"));
assertEquals("THREE", greeter.greetMe("three"));
verifyTwowayNonAnonymous(Names.WSA_NAMESPACE_NAME, RM11Constants.INSTANCE);
}
@Test
public void testTerminateSequenceDefault() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
RMManager manager = greeterBus.getExtension(RMManager.class);
manager.getSourcePolicy().getSequenceTerminationPolicy().setMaxLength(1);
greeter.greetMeOneWay("one");
verifyTerminateSequence(Names200408.WSA_NAMESPACE_NAME, RM10Constants.INSTANCE);
}
@Test
public void testTerminateSequenceRM11() throws Exception {
init("org/apache/cxf/systest/ws/rm/rminterceptors.xml", false);
RMManager manager = greeterBus.getExtension(RMManager.class);
manager.getSourcePolicy().getSequenceTerminationPolicy().setMaxLength(1);
// WS-RM 1.1 and WS-A 1.0
Client client = ClientProxy.getClient(greeter);
client.getRequestContext().put(RMManager.WSRM_VERSION_PROPERTY, RM11Constants.NAMESPACE_URI);
client.getRequestContext().put(RMManager.WSRM_WSA_VERSION_PROPERTY, Names.WSA_NAMESPACE_NAME);
greeter.greetMeOneWay("one");
verifyTerminateSequence(Names.WSA_NAMESPACE_NAME, RM11Constants.INSTANCE);
}
private void verifyTerminateSequence(String wsaUri, RMConstants consts) throws Exception {
MessageFlow mf = new MessageFlow(outRecorder.getOutboundMessages(),
inRecorder.getInboundMessages(), wsaUri, consts.getWSRMNamespace());
if (RM11Constants.NAMESPACE_URI.equals(consts.getWSRMNamespace())) {
awaitMessages(4, 4);
mf.verifyMessages(4, true);
String[] expectedActions = new String[] {consts.getCreateSequenceAction(),
GREETME_ONEWAY_ACTION,
consts.getCloseSequenceAction(),
consts.getTerminateSequenceAction()};
mf.verifyActions(expectedActions, true);
mf.verifyMessageNumbers(new String[] {null, "1", null, null}, true);
// no LastMessage
mf.verifyLastMessage(new boolean[] {false, false, false, false}, true);
// CrSR, ACK, ClSR, TSR
mf.verifyMessages(4, false);
expectedActions = new String[] {consts.getCreateSequenceResponseAction(),
consts.getSequenceAckAction(),
RM11Constants.INSTANCE.getCloseSequenceResponseAction(),
RM11Constants.INSTANCE.getTerminateSequenceResponseAction()};
mf.verifyActions(expectedActions, false);
mf.verifyAcknowledgements(new boolean[] {false, true, false, false}, false);
} else {
awaitMessages(3, 2);
mf.verifyMessages(3, true);
String[] expectedActions = new String[] {consts.getCreateSequenceAction(),
GREETME_ONEWAY_ACTION,
consts.getTerminateSequenceAction()};
mf.verifyActions(expectedActions, true);
mf.verifyMessageNumbers(new String[] {null, "1", null}, true);
// uses LastMessage
mf.verifyLastMessage(new boolean[] {false, true, false}, true);
// CrSR, ACK, PR
mf.verifyMessages(2, false);
expectedActions = new String[] {consts.getCreateSequenceResponseAction(),
consts.getSequenceAckAction()};
mf.verifyActions(expectedActions, false);
mf.verifyAcknowledgements(new boolean[] {false, true}, false);
}
}
private void verifyTwowayNonAnonymous(String wsaUri, RMConstants consts) throws Exception {
// CreateSequence and three greetMe messages
awaitMessages(4, 4);
MessageFlow mf = new MessageFlow(outRecorder.getOutboundMessages(),
inRecorder.getInboundMessages(), wsaUri, consts.getWSRMNamespace());
mf.verifyMessages(4, true);
String[] expectedActions = new String[] {consts.getCreateSequenceAction(),
GREETME_ACTION,
GREETME_ACTION,
GREETME_ACTION};
mf.verifyActions(expectedActions, true);
mf.verifyMessageNumbers(new String[] {null, "1", "2", "3"}, true);
mf.verifyLastMessage(new boolean[] {false, false, false, false}, true);
mf.verifyAcknowledgements(new boolean[] {false, false, true, true}, true);
// createSequenceResponse plus 3 greetMeResponse messages
// the first response should not include an acknowledgement, the other three should
mf.verifyMessages(4, false);
expectedActions = new String[] {consts.getCreateSequenceResponseAction(),
GREETME_RESPONSE_ACTION,
GREETME_RESPONSE_ACTION,
GREETME_RESPONSE_ACTION};
mf.verifyActions(expectedActions, false);
mf.verifyMessageNumbers(new String[] {null, "1", "2", "3"}, false);
mf.verifyLastMessage(new boolean[4], false);
mf.verifyAcknowledgements(new boolean[] {false, true, true, true}, false);
}
// --- test utilities ---
private void init(String cfgResource, boolean useDecoupledEndpoint) {
SpringBusFactory bf = new SpringBusFactory();
initControl(bf, cfgResource);
initGreeterBus(bf, cfgResource);
initProxy(useDecoupledEndpoint, null);
}
private void initControl(SpringBusFactory bf, String cfgResource) {
controlBus = bf.createBus();
BusFactory.setDefaultBus(controlBus);
ControlService cs = new ControlService();
control = cs.getControlPort();
try {
updateAddressPort(control, PORT);
} catch (Exception ex) {
//ignore
}
assertTrue("Failed to start greeter", control.startGreeter(cfgResource));
}
private void initGreeterBus(SpringBusFactory bf,
String cfgResource) {
greeterBus = bf.createBus(cfgResource);
BusFactory.setDefaultBus(greeterBus);
LOG.fine("Initialised greeter bus with configuration: " + cfgResource);
outRecorder = new OutMessageRecorder();
greeterBus.getOutInterceptors().add(outRecorder);
inRecorder = new InMessageRecorder();
greeterBus.getInInterceptors().add(inRecorder);
}
private void initProxy(boolean useDecoupledEndpoint, Executor executor) {
GreeterService gs = new GreeterService();
if (null != executor) {
gs.setExecutor(executor);
}
greeter = gs.getGreeterPort();
try {
updateAddressPort(greeter, PORT);
} catch (Exception e) {
//ignore
}
LOG.fine("Created greeter client.");
ConnectionHelper.setKeepAliveConnection(greeter, true);
if (useDecoupledEndpoint) {
initDecoupledEndpoint(ClientProxy.getClient(greeter));
}
}
private void initDecoupledEndpoint(Client c) {
// programatically configure decoupled endpoint that is guaranteed to
// be unique across all test cases
decoupledEndpoint = "http://localhost:"
+ allocatePort("decoupled-" + decoupledCount++) + "/decoupled_endpoint";
HTTPConduit hc = (HTTPConduit)(c.getConduit());
HTTPClientPolicy cp = hc.getClient();
cp.setDecoupledEndpoint(decoupledEndpoint);
LOG.fine("Using decoupled endpoint: " + cp.getDecoupledEndpoint());
}
private void stopClient() {
if (null != greeterBus) {
//ensure we close the decoupled destination of the conduit,
//so that release the port if the destination reference count hit zero
if (greeter != null) {
ClientProxy.getClient(greeter).getConduit().close();
}
if (dispatch != null) {
((DispatchImpl<?>)dispatch).getClient().getConduit().close();
}
greeterBus.shutdown(true);
greeter = null;
dispatch = null;
greeterBus = null;
}
}
private void stopControl() {
if (null != control) {
assertTrue("Failed to stop greeter", control.stopGreeter(null));
controlBus.shutdown(true);
}
}
private void awaitMessages(int nExpectedOut, int nExpectedIn) {
awaitMessages(nExpectedOut, nExpectedIn, 10000);
}
private void awaitMessages(int nExpectedOut, int nExpectedIn, int timeout) {
MessageRecorder mr = new MessageRecorder(outRecorder, inRecorder);
mr.awaitMessages(nExpectedOut, nExpectedIn, timeout);
}
}
| 10,756 |
425 |
/**
* @file iter_solver.h
* @brief Template for an iterate solver
*/
#include "solver/data_parallel.h"
namespace dmlc {
namespace solver {
using Progress = std::vector<double>;
/**
* \brief encode/decode a command
*/
struct IterCmd : public DataParCmd {
IterCmd() {}
IterCmd(int c) : DataParCmd(c) {}
// mutators
void set_iter(int iter) { cmd |= (iter+1) << 16; }
void set_load_model() { cmd |= 1<<1; }
void set_save_model() { cmd |= 1<<2; }
// accessors
bool load_model() const { return cmd & 1<<1; }
bool save_model() const { return cmd & 1<<2; }
int iter() const { return (cmd >> 16)-1; }
};
/**
* \brief the scheduler node for an iterate solver
*/
class IterScheduler : public DataParScheduler {
protected:
/**
* \brief Ask all servers to load model, return the timestamp of this request
*
* @param filename model filename
* @param iter load from a particualr iteration. if -1, then load the last
*/
int LoadModel(const std::string& filename, int iter) {
IterCmd cmd; cmd.set_load_model(); cmd.set_iter(iter);
ps::Task task; task.set_cmd(cmd.cmd); task.set_msg(filename);
return Submit(task, ps::kServerGroup);
}
/**
* \brief Ask all servers to save model, return the timestamp of this request
*
* @param filename model filename
* @param iter save for a particualr iteration. if -1, then saved as the last
*/
int SaveModel(const std::string& filename, int iter) {
IterCmd cmd; cmd.set_save_model(); cmd.set_iter(iter);
ps::Task task; task.set_cmd(cmd.cmd); task.set_msg(filename);
return Submit(task, ps::kServerGroup);
}
/**
* \brief Returns the aggregated progress among all woreker/servers since the
* last time calling this function
*/
Progress GetProgress() { Progress prog; monitor_.Get(&prog); return prog; }
// implementation
public:
IterScheduler() { }
virtual ~IterScheduler() { }
private:
ps::Root<double> monitor_;
};
/**
* \brief A server node. One must implement \ref SaveModel and \ref LoadModel
*/
class IterServer : public ps::App {
protected:
/**
* \brief Save model to disk
*/
virtual void SaveModel(Stream* fo) const = 0;
/**
* \brief Load model from disk
*/
virtual void LoadModel(Stream* fi) = 0;
/**
* \brief Report the progress to the scheduler
*/
void ReportToScheduler(const Progress& prog) { reporter_.Push(prog); }
// implementation
public:
IterServer() {}
virtual ~IterServer() {}
virtual void ProcessRequest(ps::Message* request) {
if (request->task.msg().size() == 0) return;
IterCmd cmd(request->task.cmd());
auto filename = ModelName(request->task.msg(), cmd.iter());
if (cmd.save_model()) {
Stream* fo = CHECK_NOTNULL(Stream::Create(filename.c_str(), "w"));
SaveModel(fo);
delete fo;
} else if (cmd.load_model()) {
Stream* fi = CHECK_NOTNULL(Stream::Create(filename.c_str(), "r"));
LoadModel(fi);
delete fi;
}
}
private:
std::string ModelName(const std::string& base, int iter) {
std::string name = base;
if (iter >= 0) name += "_iter-" + std::to_string(iter);
return name + "_part-" + std::to_string(ps::NodeInfo::MyRank());
}
ps::Slave<double> reporter_;
};
/**
* \brief A worker node.
*/
class IterWorker : public DataParWorker {
protected:
/**
* \brief Report the progress to the scheduler
*/
void ReportToScheduler(const Progress& prog) { reporter_.Push(prog); }
/**
* \brief Returns stream for output prediction
*
* \param filename the predict out filename
* \param wl the received workload
*/
Stream* PredictStream(const std::string& filename, const Workload& wl) {
CHECK_EQ(wl.type, Workload::PRED);
CHECK_GE(wl.file.size(), (size_t)1);
auto in = wl.file[0].filename;
size_t pos = in.find_last_of("/\\");
auto in_base = pos == std::string::npos ? in : in.substr(pos+1);
auto out = filename + in_base + "_part-" + std::to_string(wl.file[0].k);
if (out != prev_out_) {
delete pred_out_;
pred_out_ = CHECK_NOTNULL(Stream::Create(out.c_str(), "w"));
prev_out_ = out;
}
return pred_out_;
}
// implementation
public:
IterWorker() { }
virtual ~IterWorker() { delete pred_out_; }
private:
ps::Slave<double> reporter_;
Stream* pred_out_ = NULL;
std::string prev_out_;
};
} // namespace solver
} // namespace dmlc
| 1,597 |
634 |
/*
* Copyright 2013-2020 consulo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.wm.impl.status.widget;
import com.intellij.ide.ui.UISettings;
import com.intellij.openapi.components.PersistentStateComponent;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.components.State;
import com.intellij.openapi.components.Storage;
import com.intellij.openapi.wm.StatusBarWidgetFactory;
import com.intellij.openapi.wm.impl.status.MemoryUsagePanel;
import com.intellij.util.xmlb.XmlSerializerUtil;
import jakarta.inject.Singleton;
import javax.annotation.Nullable;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* from kotlin
*/
@Singleton
@State(name = "StatusBar", storages = @Storage(value = "ide.general.xml"))
public class StatusBarWidgetSettings implements PersistentStateComponent<StatusBarWidgetSettings.StatusBarState> {
public static class StatusBarState {
public Map<String, Boolean> widgets = new LinkedHashMap<>();
}
public static StatusBarWidgetSettings getInstance() {
return ServiceManager.getService(StatusBarWidgetSettings.class);
}
private StatusBarState myState = new StatusBarState();
public boolean isEnabled(StatusBarWidgetFactory factory) {
Boolean state = myState.widgets.get(factory.getId());
return state == Boolean.TRUE || factory.isEnabledByDefault();
}
public void setEnabled(StatusBarWidgetFactory factory, boolean newValue) {
if (factory.isEnabledByDefault() == newValue) {
myState.widgets.remove(factory.getId());
}
else {
myState.widgets.put(factory.getId(), newValue);
}
}
@Nullable
@Override
public StatusBarState getState() {
return myState;
}
@Override
public void loadState(StatusBarState state) {
XmlSerializerUtil.copyBean(state, myState);
}
@Override
public void afterLoadState() {
UISettings uiSettings = UISettings.getInstance();
if(uiSettings.SHOW_MEMORY_INDICATOR) {
uiSettings.SHOW_MEMORY_INDICATOR = false;
myState.widgets.put(MemoryUsagePanel.WIDGET_ID, true);
}
}
}
| 839 |
3,049 |
<gh_stars>1000+
from TfServingProxy import TfServingProxy
from seldon_core.proto import prediction_pb2
from seldon_core.utils import get_data_from_proto, array_to_grpc_datadef, json_to_seldon_message
from tensorflow_serving.apis import predict_pb2
import numpy as np
from unittest import mock
import tensorflow as tf
import requests
ARR_REQUEST_VALUE=np.random.rand(1,1).astype(np.float32)
ARR_RESPONSE_VALUE=np.random.rand(1,1).astype(np.float32)
class FakeStub(object):
def __init__(self, channel):
self.channel = channel
@staticmethod
def Predict(*args, **kwargs):
data = ARR_RESPONSE_VALUE
tensor_proto = tf.contrib.util.make_tensor_proto(
data.tolist(),
shape=data.shape)
tfresponse = predict_pb2.PredictResponse()
tfresponse.model_spec.name = "newmodel"
tfresponse.model_spec.signature_name = "signame"
tfresponse.outputs["scores"].CopyFrom(
tensor_proto)
return tfresponse
@mock.patch("tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub", new=FakeStub)
def test_grpc_predict_function_tensor():
t = TfServingProxy(
grpc_endpoint="localhost:8080",
model_name="newmodel",
signature_name="signame",
model_input="images",
model_output="scores")
data = ARR_REQUEST_VALUE
datadef = array_to_grpc_datadef("tensor", data)
request = prediction_pb2.SeldonMessage(data=datadef)
response = t.predict_grpc(request)
resp_data = get_data_from_proto(response)
assert resp_data == ARR_RESPONSE_VALUE
@mock.patch("tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub", new=FakeStub)
def test_grpc_predict_function_tftensor():
t = TfServingProxy(
grpc_endpoint="localhost:8080",
model_name="newmodel",
signature_name="signame",
model_input="images",
model_output="scores")
data = ARR_REQUEST_VALUE
tensor_proto = tf.contrib.util.make_tensor_proto(
data.tolist(),
shape=data.shape)
datadef = prediction_pb2.DefaultData(
tftensor=tensor_proto
)
request = prediction_pb2.SeldonMessage(data=datadef)
response = t.predict_grpc(request)
resp_data = get_data_from_proto(response)
assert resp_data == ARR_RESPONSE_VALUE
@mock.patch("tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub", new=FakeStub)
def test_grpc_predict_function_ndarray():
t = TfServingProxy(
grpc_endpoint="localhost:8080",
model_name="newmodel",
signature_name="signame",
model_input="images",
model_output="scores")
data = ARR_REQUEST_VALUE
datadef = array_to_grpc_datadef(
"ndarray", data, [])
request = prediction_pb2.SeldonMessage(data=datadef)
response = t.predict_grpc(request)
resp_data = get_data_from_proto(response)
assert resp_data == ARR_RESPONSE_VALUE
@mock.patch.object(requests, "post")
def test_rest_predict_function_json(mock_request_post):
data = {"jsonData": ARR_RESPONSE_VALUE.tolist() }
def res():
r = requests.Response()
r.status_code = 200
type(r).text = mock.PropertyMock(return_value="text") # property mock
def json_func():
return data
r.json = json_func
return r
mock_request_post.return_value = res()
t = TfServingProxy(
rest_endpoint="http://localhost:8080",
model_name="newmodel",
signature_name="signame",
model_input="images",
model_output="scores")
request = { "jsonData": ARR_REQUEST_VALUE.tolist() }
response = t.predict(request)
assert response == data
@mock.patch.object(requests, "post")
def test_rest_predict_function_ndarray(mock_request_post):
data = {"data": { "ndarray": ARR_RESPONSE_VALUE.tolist(), "names": [] } }
def res():
r = requests.Response()
r.status_code = 200
type(r).text = mock.PropertyMock(return_value="text") # property mock
def json_func():
return data
r.json = json_func
return r
mock_request_post.return_value = res()
t = TfServingProxy(
rest_endpoint="http://localhost:8080",
model_name="newmodel",
signature_name="signame",
model_input="images",
model_output="scores")
request = { "data": { "ndarray": ARR_REQUEST_VALUE.tolist(), "names": [] }}
response = t.predict(request)
assert response == data
| 1,962 |
984 |
{
"name": "validator",
"filename": "validator.js",
"version": "1.0.4",
"description": "轻量级的JavaScript表单验证,字符串验证。没有依赖,支持UMD,~3kb。",
"homepage": "http://jaywcjlove.github.io/validator.js",
"keywords": [
"validator",
"validator.tool",
"validator.js",
"validation",
"validate",
"sanitization",
"sanitize",
"sanitisation",
"sanitise",
"assert"
],
"maintainers": [
{
"name": "<NAME>",
"web": "http://wangchujiang.com",
"mail": "<EMAIL>"
}
],
"repositories": [
{
"type": "git",
"url": "https://github.com/jaywcjlove/validator.js"
}
]
}
| 354 |
3,428 |
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark numpy.ndarray."""
from __future__ import print_function
import timeit
REPEATS = 3
COUNT = [0] # use a list to allow modification within nested scopes
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(iterations, elapsed):
"""Print benchmark results.
# Arguments
* `iterations`: number of iterations
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(100000, 0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark(name, setup, stmt, iterations):
"""Run a benchmark and print benchmark results.
# Arguments
* `name`: benchmark name (suffix)
* `setup`: benchmark setup
* `stmt`: statement to benchmark
* `iterations`: number of iterations
# Examples
``` python
python> benchmark("::random", "from random import random;", "y = random()", 1000000)
```
"""
t = timeit.Timer(stmt, setup=setup)
i = 0
while i < REPEATS:
print("# python::numpy" + name)
COUNT[0] += 1
elapsed = t.timeit(number=iterations)
print_results(iterations, elapsed)
print("ok " + str(COUNT[0]) + " benchmark finished")
i += 1
def main():
"""Run the benchmarks."""
print_version()
name = "::instantiation"
setup = "import numpy as np; x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype='float64'); shape = [3, 2]; strides = [2, 1]; offset = 0; order = 'C';"
stmt = "y = np.ndarray(buffer=x, shape=shape, strides=strides, offset=offset, order=order)"
iterations = 100000
benchmark(name, setup, stmt, iterations)
name = "::get:data"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.data"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:dtype"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.dtype"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:flags"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.flags"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:length"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.size"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:BYTES_PER_ELEMENT"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.itemsize"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:byteLength"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.nbytes"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:ndims"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.ndim"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:shape"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.shape"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = "::get:strides"
setup = "import numpy as np; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape);"
stmt = "z = y.strides"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = ":get"
setup = "import numpy as np; from math import floor; from random import random; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape, dtype='float64');"
stmt = "z = y[int(floor(random()*3.0)), 1]"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = ":set"
setup = "import numpy as np; from math import floor; from random import random; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape, dtype='float64');"
stmt = "y[int(floor(random()*3.0)), 1] = random()"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = ":iget"
setup = "import numpy as np; from math import floor; from random import random; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape, dtype='float64');"
stmt = "z = y.item(int(floor(random()*4.0)))"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
name = ":iset"
setup = "import numpy as np; from math import floor; from random import random; shape = [3, 2]; y = np.ndarray(buffer=np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=shape, dtype='float64');"
stmt = "y.itemset(int(floor(random()*4.0)), random())"
iterations = 1000000
benchmark(name, setup, stmt, iterations)
print_summary(COUNT[0], COUNT[0])
if __name__ == "__main__":
main()
| 2,619 |
713 |
<filename>leetcode.com/python/653_Two_Sum_IV_Input_is_a_BST.py<gh_stars>100-1000
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
diffSet = set()
return self.findTargetHelper(root, k, diffSet)
def findTargetHelper(self, root, k, diffSet):
if not root:
return False
if root.val in diffSet:
return True
else:
diffSet.add((k - root.val))
left = self.findTargetHelper(root.left, k, diffSet)
right = self.findTargetHelper(root.right, k, diffSet)
return left or right
| 419 |
848 |
<filename>tools/AKS/kernel_zoo/kernel_classification_accuracy.json
{
"kernel_name": "ClassificationAccuracy",
"description": "Measure accuracy of a classification network",
"kernel_type": "cpp",
"device_type": "cpu",
"kernel_lib" : "libs/libClassificationAccuracy.so",
"param_list" : {
"ground_truth": {"type" : "string"}
}
}
| 133 |
480 |
/*
* Copyright [2013-2021], Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.polardbx.sequence.impl;
import com.alibaba.polardbx.sequence.exception.SequenceException;
import com.alibaba.polardbx.common.IdGenerator;
import com.alibaba.polardbx.common.constants.SequenceAttribute.Type;
/**
* This type of sequence doesn't rely on database and has the following format:
* timestamp base + worker id + sequence within the period of the timestamp.
*
* @author chensr 2016年10月17日 下午1:58:00
* @since 5.0.0
*/
public class TimeBasedSequence extends BaseSequence {
private IdGenerator idGenerator;
public TimeBasedSequence(String name) {
this.type = Type.TIME;
this.name = name;
this.idGenerator = IdGenerator.getIdGenerator();
}
@Override
public long nextValue() throws SequenceException {
return idGenerator.nextId();
}
@Override
public long nextValue(int size) throws SequenceException {
return idGenerator.nextId(size);
}
@Override
public boolean exhaustValue() throws SequenceException {
return true;
}
public IdGenerator getIdGenerator() {
return idGenerator;
}
}
| 564 |
565 |
<gh_stars>100-1000
/*
+----------------------------------------------------------------------+
| PHP Version 7 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| <EMAIL> so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: krakjoe |
+----------------------------------------------------------------------+
*/
#ifndef HAVE_PHP_UI_MENU
#define HAVE_PHP_UI_MENU
#include <ui.h>
#include "php.h"
#include <classes/menu.h>
#include <classes/item.h>
zend_object_handlers php_ui_menu_handlers;
zend_class_entry *uiMenu_ce;
extern void php_ui_item_click_handler(uiMenuItem *i, uiWindow *w, void *_item);
zend_object* php_ui_menu_create(zend_class_entry *ce) {
php_ui_menu_t *menu =
(php_ui_menu_t*) ecalloc(1, sizeof(php_ui_menu_t) + zend_object_properties_size(ce));
zend_object_std_init(&menu->std, ce);
object_properties_init(&menu->std, ce);
menu->std.handlers = &php_ui_menu_handlers;
return &menu->std;
}
ZEND_BEGIN_ARG_INFO_EX(php_ui_menu_construct_info, 0, 0, 1)
ZEND_ARG_TYPE_INFO(0, name, IS_STRING, 0)
ZEND_END_ARG_INFO()
/* {{{ proto Menu Menu::__construct(string name) */
PHP_METHOD(Menu, __construct)
{
php_ui_menu_t *menu = php_ui_menu_fetch(getThis());
zend_string *name = NULL;
if (zend_parse_parameters_throw(ZEND_NUM_ARGS(), "S", &name) != SUCCESS) {
return;
}
menu->m = uiNewMenu(ZSTR_VAL(name));
} /* }}} */
#if PHP_VERSION_ID >= 70200
ZEND_BEGIN_ARG_WITH_RETURN_OBJ_INFO_EX(php_ui_menu_append_info, 0, 0, "UI\\MenuItem", 1)
#else
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(php_ui_menu_append_info, 0, 0, IS_OBJECT, "UI\\MenuItem", 1)
#endif
ZEND_ARG_TYPE_INFO(0, name, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, type, IS_STRING, 0)
ZEND_END_ARG_INFO()
/* {{{ proto Menu Menu::append(string name [, string type = MenuItem::class]) */
PHP_METHOD(Menu, append)
{
php_ui_menu_t *menu = php_ui_menu_fetch(getThis());
zend_string *name = NULL;
zend_class_entry *type = uiItem_ce;
php_ui_item_t *item = NULL;
if (zend_parse_parameters_throw(ZEND_NUM_ARGS(), "S|C", &name, &type) != SUCCESS) {
return;
}
if (ZEND_NUM_ARGS() > 1 && !instanceof_function(type, uiItem_ce)) {
/* throw */
return;
}
object_init_ex(return_value, type);
item = php_ui_item_fetch(return_value);
item->i = uiMenuAppendItem(menu->m, ZSTR_VAL(name));
uiMenuItemOnClicked(item->i, php_ui_item_click_handler, item);
} /* }}} */
/* {{{ proto Menu Menu::appendCheck(string name [, string type = MenuItem::class]) */
PHP_METHOD(Menu, appendCheck)
{
php_ui_menu_t *menu = php_ui_menu_fetch(getThis());
zend_string *name = NULL;
zend_class_entry *type = uiItem_ce;
php_ui_item_t *item = NULL;
if (zend_parse_parameters_throw(ZEND_NUM_ARGS(), "S", &name) != SUCCESS) {
return;
}
if (ZEND_NUM_ARGS() > 1 && !instanceof_function(type, uiItem_ce)) {
/* throw */
return;
}
object_init_ex(return_value, type);
item = php_ui_item_fetch(return_value);
item->i = uiMenuAppendCheckItem(menu->m, ZSTR_VAL(name));
} /* }}} */
#if PHP_VERSION_ID >= 70200
ZEND_BEGIN_ARG_WITH_RETURN_OBJ_INFO_EX(php_ui_menu_append_anon_info, 0, 0, "UI\\MenuItem", 0)
#else
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(php_ui_menu_append_anon_info, 0, 0, IS_OBJECT, "UI\\MenuItem", 0)
#endif
ZEND_ARG_TYPE_INFO(0, type, IS_STRING, 0)
ZEND_END_ARG_INFO()
/* {{{ proto Menu Menu::appendQuit([string type = MenuItem::class]) */
PHP_METHOD(Menu, appendQuit)
{
php_ui_menu_t *menu = php_ui_menu_fetch(getThis());
php_ui_item_t *item = NULL;
zend_class_entry *type = uiItem_ce;
if (zend_parse_parameters_throw(ZEND_NUM_ARGS(), "|C", &type) != SUCCESS) {
return;
}
if (ZEND_NUM_ARGS() > 1 && !instanceof_function(type, uiItem_ce)) {
/* throw */
return;
}
object_init_ex(return_value, type);
item = php_ui_item_fetch(return_value);
item->i = uiMenuAppendQuitItem(menu->m);
} /* }}} */
/* {{{ proto Menu Menu::appendPreferences([string type = MenuItem::class]) */
PHP_METHOD(Menu, appendPreferences)
{
php_ui_menu_t *menu = php_ui_menu_fetch(getThis());
php_ui_item_t *item = NULL;
zend_class_entry *type = uiItem_ce;
if (zend_parse_parameters_throw(ZEND_NUM_ARGS(), "|C", &type) != SUCCESS) {
return;
}
if (ZEND_NUM_ARGS() > 1 && !instanceof_function(type, uiItem_ce)) {
/* throw */
return;
}
object_init_ex(return_value, type);
item = php_ui_item_fetch(return_value);
item->i = uiMenuAppendPreferencesItem(menu->m);
} /* }}} */
/* {{{ proto Menu Menu::appendAbout([string type = MenuItem::class]) */
PHP_METHOD(Menu, appendAbout)
{
php_ui_menu_t *menu = php_ui_menu_fetch(getThis());
php_ui_item_t *item = NULL;
zend_class_entry *type = uiItem_ce;
if (zend_parse_parameters_throw(ZEND_NUM_ARGS(), "|C", &type) != SUCCESS) {
return;
}
if (ZEND_NUM_ARGS() > 1 && !instanceof_function(type, uiItem_ce)) {
/* throw */
return;
}
object_init_ex(return_value, type);
item = php_ui_item_fetch(return_value);
item->i = uiMenuAppendAboutItem(menu->m);
} /* }}} */
ZEND_BEGIN_ARG_INFO_EX(php_ui_menu_append_separator_info, 0, 0, 0)
ZEND_END_ARG_INFO()
/* {{{ proto void Menu::appendSeparator(void) */
PHP_METHOD(Menu, appendSeparator)
{
php_ui_menu_t *menu = php_ui_menu_fetch(getThis());
if (zend_parse_parameters_none() != SUCCESS) {
return;
}
uiMenuAppendSeparator(menu->m);
} /* }}} */
/* {{{ */
const zend_function_entry php_ui_menu_methods[] = {
PHP_ME(Menu, __construct, php_ui_menu_construct_info, ZEND_ACC_PUBLIC)
PHP_ME(Menu, append, php_ui_menu_append_info, ZEND_ACC_PUBLIC)
PHP_ME(Menu, appendCheck, php_ui_menu_append_info, ZEND_ACC_PUBLIC)
PHP_ME(Menu, appendQuit, php_ui_menu_append_anon_info, ZEND_ACC_PUBLIC)
PHP_ME(Menu, appendPreferences, php_ui_menu_append_anon_info, ZEND_ACC_PUBLIC)
PHP_ME(Menu, appendAbout, php_ui_menu_append_anon_info, ZEND_ACC_PUBLIC)
PHP_ME(Menu, appendSeparator, php_ui_menu_append_separator_info, ZEND_ACC_PUBLIC)
PHP_FE_END
}; /* }}} */
/* {{{ */
PHP_MINIT_FUNCTION(UI_Menu)
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "UI", "Menu", php_ui_menu_methods);
uiMenu_ce = zend_register_internal_class(&ce);
uiMenu_ce->create_object = php_ui_menu_create;
memcpy(&php_ui_menu_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
php_ui_menu_handlers.offset = XtOffsetOf(php_ui_menu_t, std);
return SUCCESS;
} /* }}} */
#endif
| 3,117 |
520 |
<filename>stats/src/main/java/com/facebook/stats/cardinality/BucketAndHash.java<gh_stars>100-1000
/*
* Copyright (C) 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.stats.cardinality;
import static com.google.common.base.Preconditions.checkArgument;
public class BucketAndHash {
private final int bucket;
private final long hash;
BucketAndHash(int bucket, long hash) {
this.bucket = bucket;
this.hash = hash;
}
/**
* Extracts the bucket and truncated hash from the given 64-bit hash.
*
* <p>Only the 64-log2(numberOfBuckets) least significant bits of the resulting hash are usable
*/
public static BucketAndHash fromHash(long hash, int numberOfBuckets) {
checkArgument(Numbers.isPowerOf2(numberOfBuckets), "numberOfBuckets must be a power of 2");
// bucket comes from the bottommost log2(numberOfBuckets) bits
int bucketMask = numberOfBuckets - 1;
int bucket = (int) (hash & bucketMask);
// hyperloglog will count number of trailing zeros, so fill in with ones at the top to avoid
// the msb bits affecting the count (very unlikely, but...)
// first, set the top most bit
hash |= 1L << (Long.SIZE - 1);
// then, shift with sign propagation to fill with ones
int bits = Integer.numberOfTrailingZeros(numberOfBuckets); // log2(numberOfBuckets)
hash >>= bits;
return new BucketAndHash(bucket, hash);
}
public int getBucket() {
return bucket;
}
public long getHash() {
return hash;
}
}
| 634 |
775 |
<filename>utest/namespace/test_retrievercontextfactory.py
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nose.tools import assert_equal
from robotide.namespace.namespace import _RetrieverContextFactory
from robotide.robotapi import ResourceFile
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(
datafileWithVariables({'${foo}': 'moi', '${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
print(ctx.vars)
assert_equal(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
| 559 |
348 |
{"nom":"Laloeuf","circ":"5ème circonscription","dpt":"Meurthe-et-Moselle","inscrits":221,"abs":118,"votants":103,"blancs":17,"nuls":2,"exp":84,"res":[{"nuance":"SOC","nom":"<NAME>","voix":51},{"nuance":"REM","nom":"<NAME>","voix":33}]}
| 98 |
3,012 |
/** @file
GUID for MdePkg PCD Token Space
Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#ifndef _MDEPKG_TOKEN_SPACE_GUID_H_
#define _MDEPKG_TOKEN_SPACE_GUID_H_
#define MDEPKG_TOKEN_SPACE_GUID \
{ \
0x914AEBE7, 0x4635, 0x459b, { 0xAA, 0x1C, 0x11, 0xE2, 0x19, 0xB0, 0x3A, 0x10 } \
}
extern EFI_GUID gEfiMdePkgTokenSpaceGuid;
#endif
| 234 |
483 |
<filename>tests/gdata_tests/contacts/profiles/live_client_test.py
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = '<EMAIL> (<NAME>)'
import atom.core
import atom.data
import atom.http_core
import gdata.contacts.client
import gdata.data
import gdata.test_config as conf
import unittest
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
conf.options.register_option(conf.TARGET_USERNAME_OPTION)
class ProfileTest(unittest.TestCase):
def setUp(self):
self.client = gdata.contacts.client.ContactsClient(domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.contacts.client.ContactsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'ProfileTest',
self.client.auth_service, True)
self.client.username = conf.options.get_value('appsusername').split('@')[0]
def tearDown(self):
conf.close_client(self.client)
def test_profiles_feed(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
feed = self.client.get_profiles_feed()
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
def test_profiles_query(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
query = gdata.contacts.client.ProfilesQuery(max_results=1)
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
# Needs at least 2 profiles in the feed to test the start-key
# query param.
next = feed.GetNextLink()
feed = None
if next:
# Retrieve the start-key query param from the next link.
uri = atom.http_core.Uri.parse_uri(next.href)
if 'start-key' in uri.query:
query.start_key = uri.query['start-key']
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
self.assert_(feed.GetSelfLink().href == next.href)
# Compare with a feed retrieved with the next link.
next_feed = self.client.get_profiles_feed(uri=next.href)
self.assert_(len(next_feed.entry) == 1)
self.assert_(next_feed.entry[0].id.text == feed.entry[0].id.text)
def suite():
return conf.build_suite([ProfileTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| 1,239 |
377 |
<filename>FasterRCNN/data/transforms/__init__.py
from .transform import *
from fvcore.transforms.transform import *
from .transform_gen import *
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| 72 |
2,130 |
//
// RapidCRC.c
// OOMDetector
//
// Tencent is pleased to support the open source community by making OOMDetector available.
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
//
//
#include "RapidCRC.h"
#define POLY64REV 0x95AC9329AC4BC9B5ULL
static uint64_t crc_table[8][256];
#ifdef __cplusplus
extern "C" {
#endif
void init_crc_table_for_oom(void)
{
uint64_t c;
int n, k;
static int first = 1;
if(first){
first = 0;
for (n = 0; n < 256; n++)
{
c = (uint64_t)n;
for (k = 0; k < 8; k++)
{
if (c & 1)
c = (c >> 1) ^ POLY64REV;
else
c >>= 1;
}
crc_table[0][n] = c;
}
for (n = 0; n < 256; n++) {
c = crc_table[0][n];
for (k = 1; k < 8; k++) {
c = crc_table[0][c & 0xff] ^ (c >> 8);
crc_table[k][n] = c;
}
}
}
}
uint64_t rapid_crc64(uint64_t crc, const char *buf, uint64_t len)
{
register uint64_t *buf64 = (uint64_t *)buf;
register uint64_t c = crc;
register uint64_t length = len;
c = ~c;
while (length >= 8) {
c ^= *buf64++;
c = crc_table[0][c & 0xff] ^ crc_table[1][(c >> 8) & 0xff] ^ \
crc_table[2][(c >> 16) & 0xff] ^ crc_table[3][(c >> 24) & 0xff] ^\
crc_table[4][(c >> 32) & 0xff] ^ crc_table[5][(c >> 40) & 0xff] ^\
crc_table[6][(c >> 48) & 0xff] ^ crc_table[7][(c >> 56) & 0xff];
length -= 8;
}
// buf = (char *)buf64;
// while (length > 0) {
// crc = (crc >> 8) ^ crc_table[0][(crc & 0xff) ^ *buf++];
// length--;
// }
c = ~c;
return c;
}
#ifdef __cplusplus
}
#endif
| 1,355 |
788 |
<filename>exp/base.py
from tools.utils import xml_format
import numpy as np
# TODO: classes should hold dictionaries of variables, vxa tags and values
# TODO: remove most of the hard coded text from read_write_voxelyze.py and replace with a few loops
# TODO: add method to VoxCadParams for organizing (nested) subsections in vxa files
class VoxCadParams(object):
"""Container for VoxCad parameters."""
def __init__(self):
self.sub_groups = []
self.new_param_tag_dict = {}
def add_param(self, name, val, tag):
setattr(self, name, val)
self.new_param_tag_dict[name] = xml_format(tag)
class Sim(VoxCadParams):
"""Container for VoxCad simulation parameters."""
def __init__(self, self_collisions_enabled=True, simulation_time=10.5, dt_frac=0.9, stop_condition=2,
fitness_eval_init_time=0.5, actuation_start_time=0, equilibrium_mode=0, min_temp_fact=0.1,
max_temp_fact_change=0.00001, max_stiffness_change=10000, min_elastic_mod=5e006,
max_elastic_mod=5e008, damp_evolved_stiffness=True):
VoxCadParams.__init__(self)
self.sub_groups = ["Integration", "Damping", "Collisions", "Features", "StopCondition", "EquilibriumMode", "GA"]
# custom nested things in "SurfMesh", "CMesh"
self.self_collisions_enabled = self_collisions_enabled
self.simulation_time = simulation_time
self.dt_frac = dt_frac
self.stop_condition = stop_condition
self.fitness_eval_init_time = fitness_eval_init_time
self.actuation_start_time = actuation_start_time
self.equilibrium_mode = equilibrium_mode
self.min_temp_fact = min_temp_fact
self.max_temp_fact_change = max_temp_fact_change
self.max_stiffness_change = max_stiffness_change
self.min_elastic_mod = min_elastic_mod
self.max_elastic_mod = max_elastic_mod
self.damp_evolved_stiffness = damp_evolved_stiffness
class Env(VoxCadParams):
"""Container for VoxCad environment parameters."""
def __init__(self, frequency=4.0, gravity_enabled=1, grav_acc=-9.81, density=1e+006, temp_enabled=1,
floor_enabled=1, floor_slope=0.0, lattice_dimension=0.01, fat_stiffness=5e+006, bone_stiffness=5e+008,
muscle_stiffness=5e+006, sticky_floor=0, time_between_traces=0, save_passive_data=False,
actuation_variance=0, temp_amp=39, growth_amp=0, growth_speed_limit=0,
greedy_growth=False, greedy_threshold=0, squeeze_rate=0, constant_squeeze=False, squeeze_start=0.5,
squeeze_end=2, num_hurdles=0, space_between_hurdles=3, hurdle_height=1, hurdle_stop=np.inf,
circular_hurdles=False, tunnel_width=8, forward_hurdles_only=True, wall_height=3, back_stop=False,
fence=False, debris=False, debris_size=0, debris_start=-np.inf, biped=False, biped_leg_proportion=0.6,
block_position=0, falling_prohibited=False, contract_only=False, expand_only=False,
passive_body_only=False, tilt_vectors_updates_per_cycle=0, regeneration_model_updates_per_cycle=0,
num_regeneration_model_synapses=0, regeneration_model_input_bias=1, num_hidden_regeneration_neurons=0,
forward_model_updates_per_cycle=0, controller_updates_per_cycle=0, num_forward_model_synapses=0,
num_controller_synapses=0, signaling_updates_per_cycle=0, depolarizations_per_cycle=5,
repolarizations_per_cycle=1, lightsource_xyz=None, fluid_environment=False,
aggregate_drag_coefficient=0.0, block_material=8, block_density=1e+006, block_static_friction=1,
block_dynamic_friction=0.5, external_block=False):
VoxCadParams.__init__(self)
self.sub_groups = ["Fixed_Regions", "Forced_Regions", "Gravity", "Thermal"]
self.frequency = frequency
self.gravity_enabled = gravity_enabled
self.grav_acc = grav_acc
self.density = density
self.floor_enabled = floor_enabled
self.temp_enabled = temp_enabled
self.floor_slope = floor_slope
self.lattice_dimension = lattice_dimension # TODO: remove this (it is in Material)
self.muscle_stiffness = muscle_stiffness # TODO: remove this (it is in Material)
self.bone_stiffness = bone_stiffness # TODO: remove this (it is in Material)
self.fat_stiffness = fat_stiffness # TODO: remove this (it is in Material)
self.sticky_floor = sticky_floor
self.time_between_traces = time_between_traces
self.save_passive_data = save_passive_data
self.actuation_variance = actuation_variance
self.temp_amp = temp_amp
self.growth_amp = growth_amp
self.growth_speed_limit = growth_speed_limit
self.greedy_growth = greedy_growth
self.greedy_threshold = greedy_threshold
self.num_hurdles = num_hurdles
self.space_between_hurdles = space_between_hurdles
self.hurdle_height = -1
if num_hurdles > 0:
self.hurdle_height = hurdle_height
self.circular_hurdles = circular_hurdles
self.forward_hurdles_only = forward_hurdles_only
self.hurdle_stop = hurdle_stop
self.wall_height = wall_height
self.back_stop = back_stop
self.fence = fence
self.debris = debris
self.debris_size = debris_size
self.debris_start = debris_start
self.tunnel_width = tunnel_width
self.squeeze_rate = squeeze_rate
self.constant_squeeze = constant_squeeze
self.squeeze_start = squeeze_start
self.squeeze_end = squeeze_end
self.block_position = block_position
self.biped = biped
self.biped_leg_proportion = biped_leg_proportion
self.falling_prohibited = falling_prohibited
self.contract_only = contract_only
self.expand_only = expand_only
self.passive_body_only = passive_body_only
self.tilt_vectors_updates_per_cycle = tilt_vectors_updates_per_cycle
self.regeneration_model_updates_per_cycle = regeneration_model_updates_per_cycle
self.forward_model_updates_per_cycle = forward_model_updates_per_cycle
self.controller_updates_per_cycle = controller_updates_per_cycle
self.num_hidden_regeneration_neurons = num_hidden_regeneration_neurons
self.regeneration_model_input_bias = regeneration_model_input_bias
self.num_regeneration_model_synapses = num_regeneration_model_synapses
self.num_forward_model_synapses = num_forward_model_synapses
self.num_controller_synapses = num_controller_synapses
self.signaling_updates_per_cycle = signaling_updates_per_cycle
self.depolarizations_per_cycle = depolarizations_per_cycle
self.repolarizations_per_cycle = repolarizations_per_cycle
self.lightsource_xyz = lightsource_xyz
if lightsource_xyz is None:
self.lightsource_xyz = [0, 0, 0]
self.fluid_environment = fluid_environment
self.aggregate_drag_coefficient = aggregate_drag_coefficient
self.block_material = block_material
self.block_density = block_density
self.block_static_friction = block_static_friction
self.block_dynamic_friction = block_dynamic_friction
self.external_block = external_block
class Material(VoxCadParams):
"""Container for VoxCad material parameters."""
# TODO: this class is currently not used
def __init__(self, lattice_dimension=0.01, softest_material=5, material_stiffness=5e+006, dim_adj=1, line_offset=0,
layer_offset=0, squeeze=1):
VoxCadParams.__init__(self)
self.sub_groups = ["Lattice", "Voxel"]
self.palette = {}
def add_material_to_palette(self, id, mat_type, name, rgba, mat_model, elastic_mod, plastic_mod, yield_stress,
fail_model, fail_stress, fail_strain, density, poissons_ratio, cte, u_static,
u_dynamic):
self.palette[id] = {"Name": name}
# TODO: match structure
class ObjectiveDict(dict):
"""A dictionary describing the objectives for optimization. See self.add_objective()."""
def __init__(self):
super(ObjectiveDict, self).__init__()
self.max_rank = 0
# def __setitem__(self, key, value):
# # only allow adding entries through add_objective()
# raise SyntaxError
# TODO: want to restrict input but this prevents deep copying: maybe instead just make object with embedded dict
def add_objective(self, name, maximize, tag, meta_func=None, node_func=None, output_node_name=None,
logging_only=False, default_value=None, combine_func=np.mean, compare_func=None):
"""Add an optimization objective to the dictionary.
Objectives must be added in order of importance, however fitness is fixed to be the most important.
The keys of an ObjectiveDict correspond to the objective's rank or importance. The ranks are set via the order
in which objectives are added (fitness will auto-correct to rank 0).
For each rank key, starting with 0, the corresponding value is another dictionary with three components:
name, maximized, tag.
Parameters
----------
name : str
The associated individual-level attribute name
maximize : bool
Whether superior individuals maximized (True) or minimize (False) the objective.
tag : str or None
The tag used in parsing the resulting output from a VoxCad simulation.
If this is None then the attribute is calculated outside of VoxCad (in Python only).
meta_func : function
This is applied as a function of the objective value and individual, i.e. func(val, ind)
node_func : function
If tag is None then the objective is not computed in VoxCad and is instead calculated on an output of a
network.
output_node_name : str
The output node which node_func operates on.
logging_only : bool
If True then don't use as objective, only to track statistics from the simulation.
"""
curr_rank = self.max_rank
# if fitness is not added first, shift every other objective "down" in importance
if name == "fitness" and self.max_rank > 0:
curr_rank = 0 # change the key to rank 0
for rank in reversed(range(len(self))):
self[rank+1] = self[rank]
super(ObjectiveDict, self).__setitem__(curr_rank, {"name": name,
"maximize": maximize,
"tag": xml_format(tag) if tag is not None else None,
"worst_value": -10e6 if maximize else 10e6,
"default_value": default_value,
"meta_func": meta_func,
"node_func": node_func,
"output_node_name": output_node_name,
"logging_only": logging_only,
"combine_func": combine_func,
"compare_func": compare_func})
# TODO: logging_only 'objectives' should be a separate 'SimStats' class
self.max_rank += 1
| 5,180 |
313 |
<reponame>gridgentoo/titus-control-plane<gh_stars>100-1000
/*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.titus.runtime.loadbalancer;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.netflix.titus.api.loadbalancer.model.JobLoadBalancer;
import org.junit.Before;
import org.junit.Test;
import static org.assertj.core.api.Java6Assertions.assertThat;
public class LoadBalancerCursorsTest {
private List<JobLoadBalancer> loadBalancerList;
@Before
public void setUp() {
loadBalancerList = Stream.of(
new JobLoadBalancer("job1", "lb1"),
new JobLoadBalancer("job3", "lb3"),
new JobLoadBalancer("job2", "lb2"),
new JobLoadBalancer("job4", "lb4")
).sorted(LoadBalancerCursors.loadBalancerComparator()).collect(Collectors.toList());
}
@Test
public void checkValidCursor() {
final String cursor = LoadBalancerCursors.newCursorFrom(new JobLoadBalancer("job3", "lb3"));
final Optional<Integer> loadBalancerIndex = LoadBalancerCursors.loadBalancerIndexOf(loadBalancerList, cursor);
assertThat(loadBalancerIndex.isPresent()).isTrue();
assertThat(loadBalancerIndex.get()).isEqualTo(2);
}
@Test
public void checkMissingCursor() {
final String cursor = LoadBalancerCursors.newCursorFrom(new JobLoadBalancer("job30", "lb3"));
final Optional<Integer> loadBalancerIndex = LoadBalancerCursors.loadBalancerIndexOf(loadBalancerList, cursor);
assertThat(loadBalancerIndex.isPresent()).isTrue();
assertThat(loadBalancerIndex.get()).isEqualTo(1);
}
@Test
public void checkFirstMissingCursorElement() {
final String cursor = LoadBalancerCursors.newCursorFrom(new JobLoadBalancer("job1", "lb0"));
final Optional<Integer> loadBalancerIndex = LoadBalancerCursors.loadBalancerIndexOf(loadBalancerList, cursor);
assertThat(loadBalancerIndex.isPresent()).isTrue();
assertThat(loadBalancerIndex.get()).isEqualTo(-1);
}
}
| 963 |
365 |
# ############################################################
# Importing - Same For All Render Layer Tests
# ############################################################
import unittest
import os
import sys
from view_layer_common import *
# ############################################################
# Testing
# ############################################################
class UnitTesting(ViewLayerTesting):
def setup_family(self):
import bpy
scene = bpy.context.scene
# Just add a bunch of collections on which we can do various tests.
grandma = scene.master_collection.collections.new('grandma')
grandpa = scene.master_collection.collections.new('grandpa')
mom = grandma.collections.new('mom')
son = mom.collections.new('son')
daughter = mom.collections.new('daughter')
uncle = grandma.collections.new('uncle')
cousin = uncle.collections.new('cousin')
lookup = {c.name: c for c in (grandma, grandpa, mom, son, daughter, uncle, cousin)}
return lookup
def test_rename_a(self):
family = self.setup_family()
family['mom'].name = family['daughter'].name
# Since they are not siblings, we allow them to have the same name.
self.assertEqual(family['mom'].name, family['daughter'].name)
def test_rename_b(self):
family = self.setup_family()
family['grandma'].name = family['grandpa'].name
self.assertNotEqual(family['grandma'].name, family['grandpa'].name)
def test_rename_c(self):
family = self.setup_family()
family['cousin'].name = family['daughter'].name
# Since they are not siblings, we allow them to have the same name.
self.assertEqual(family['cousin'].name, family['daughter'].name)
def test_rename_d(self):
family = self.setup_family()
family['son'].name = family['daughter'].name
self.assertNotEqual(family['son'].name, family['daughter'].name)
def test_rename_e(self):
family = self.setup_family()
family['grandma'].name = family['grandpa'].name
self.assertNotEqual(family['grandma'].name, family['grandpa'].name)
def test_add_equal_name_a(self):
family = self.setup_family()
other_daughter = family['mom'].collections.new(family['daughter'].name)
self.assertNotEqual(other_daughter.name, family['daughter'].name)
def test_add_equal_name_b(self):
family = self.setup_family()
other_aunt = family['grandma'].collections.new(family['daughter'].name)
# Since they are not siblings, we allow them to have the same name.
self.assertEqual(other_aunt.name, family['daughter'].name)
def test_add_equal_name_c(self):
family = self.setup_family()
other_aunt = family['grandma'].collections.new(family['mom'].name)
self.assertNotEqual(other_aunt.name, family['mom'].name)
# ############################################################
# Main - Same For All Render Layer Tests
# ############################################################
if __name__ == '__main__':
UnitTesting._extra_arguments = setup_extra_arguments(__file__)
unittest.main()
| 1,151 |
870 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.accumulo.core.classloader.ClassLoaderUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ConfigurationTypeHelper {
private static final Logger log = LoggerFactory.getLogger(ConfigurationTypeHelper.class);
/**
* Interprets a string specifying bytes. A bytes type is specified as a long integer followed by
* an optional B (bytes), K (KB), M (MB), or G (GB).
*
* @param str
* String value
* @return interpreted memory size in bytes
*/
public static long getFixedMemoryAsBytes(String str) {
char lastChar = str.charAt(str.length() - 1);
if (lastChar == 'b') {
log.warn(
"The 'b' in {} is being considered as bytes. Setting memory by bits is not supported",
str);
}
try {
int multiplier;
switch (Character.toUpperCase(lastChar)) {
case 'G':
multiplier = 30;
break;
case 'M':
multiplier = 20;
break;
case 'K':
multiplier = 10;
break;
case 'B':
multiplier = 0;
break;
default:
return Long.parseLong(str);
}
return Long.parseLong(str.substring(0, str.length() - 1)) << multiplier;
} catch (Exception ex) {
throw new IllegalArgumentException(
"The value '" + str + "' is not a valid memory setting. A valid value would a number "
+ "possibly followed by an optional 'G', 'M', 'K', or 'B'.");
}
}
/**
* Interprets a string specifying a Memory type which is specified as a long integer followed by
* an optional B (bytes), K (KB), M (MB), G (GB) or % (percentage).
*
* @param str
* String value
* @return interpreted memory size in bytes
*/
public static long getMemoryAsBytes(String str) {
char lastChar = str.charAt(str.length() - 1);
if (lastChar == '%') {
try {
int percent = Integer.parseInt(str.substring(0, str.length() - 1));
if (percent <= 0 || percent >= 100) {
throw new IllegalArgumentException(
"The value '" + str + "' is not a valid memory setting.");
}
return Runtime.getRuntime().maxMemory() * percent / 100;
} catch (Exception ex) {
throw new IllegalArgumentException(
"The value '" + str + "' is not a valid memory setting.");
}
}
return getFixedMemoryAsBytes(str);
}
/**
* Interprets a string specifying a time duration. A time duration is specified as a long integer
* followed by an optional d (days), h (hours), m (minutes), s (seconds), or ms (milliseconds). A
* value without a unit is interpreted as seconds.
*
* @param str
* string value
* @return interpreted time duration in milliseconds
*/
public static long getTimeInMillis(String str) {
TimeUnit timeUnit;
int unitsLen = 1;
switch (str.charAt(str.length() - 1)) {
case 'd':
timeUnit = TimeUnit.DAYS;
break;
case 'h':
timeUnit = TimeUnit.HOURS;
break;
case 'm':
timeUnit = TimeUnit.MINUTES;
break;
case 's':
timeUnit = TimeUnit.SECONDS;
if (str.endsWith("ms")) {
timeUnit = TimeUnit.MILLISECONDS;
unitsLen = 2;
}
break;
default:
timeUnit = TimeUnit.SECONDS;
unitsLen = 0;
break;
}
return timeUnit.toMillis(Long.parseLong(str.substring(0, str.length() - unitsLen)));
}
/**
* Interprets a string specifying a fraction. A fraction is specified as a double. An optional %
* at the end signifies a percentage.
*
* @param str
* string value
* @return interpreted fraction as a decimal value
*/
public static double getFraction(String str) {
if (!str.isEmpty() && str.charAt(str.length() - 1) == '%')
return Double.parseDouble(str.substring(0, str.length() - 1)) / 100.0;
return Double.parseDouble(str);
}
// This is not a cache for loaded classes, just a way to avoid spamming the debug log
private static Map<String,Class<?>> loaded = Collections.synchronizedMap(new HashMap<>());
/**
* Loads a class in the given classloader context, suppressing any exceptions, and optionally
* providing a default instance to use.
*
* @param context
* the per-table context, can be null
* @param clazzName
* the name of the class to load
* @param base
* the type of the class
* @param defaultInstance
* a default instance if the class cannot be loaded
* @return a new instance of the class, or the defaultInstance
*/
public static <T> T getClassInstance(String context, String clazzName, Class<T> base,
T defaultInstance) {
T instance = null;
try {
instance = getClassInstance(context, clazzName, base);
} catch (RuntimeException | IOException | ReflectiveOperationException e) {
log.warn("Failed to load class {}", clazzName, e);
}
if (instance == null && defaultInstance != null) {
log.info("Using default class {}", defaultInstance.getClass().getName());
instance = defaultInstance;
}
return instance;
}
/**
* Loads a class in the given classloader context.
*
* @param context
* the per-table context, can be null
* @param clazzName
* the name of the class to load
* @param base
* the type of the class
* @return a new instance of the class
*/
public static <T> T getClassInstance(String context, String clazzName, Class<T> base)
throws IOException, ReflectiveOperationException {
T instance;
Class<? extends T> clazz = ClassLoaderUtil.loadClass(context, clazzName, base);
instance = clazz.getDeclaredConstructor().newInstance();
if (loaded.put(clazzName, clazz) != clazz)
log.debug("Loaded class : {}", clazzName);
return instance;
}
/**
* Get the number of threads from string property. If the value ends with C, then it will be
* multiplied by the number of cores.
*/
public static int getNumThreads(String threads) {
if (threads == null) {
threads = ClientProperty.BULK_LOAD_THREADS.getDefaultValue();
}
int nThreads;
if (threads.toUpperCase().endsWith("C")) {
nThreads = Runtime.getRuntime().availableProcessors()
* Integer.parseInt(threads.substring(0, threads.length() - 1));
} else {
nThreads = Integer.parseInt(threads);
}
return nThreads;
}
}
| 2,775 |
5,964 |
<gh_stars>1000+
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrBatch_DEFINED
#define GrBatch_DEFINED
#include <new>
#include "GrBatchTarget.h"
#include "GrGeometryProcessor.h"
#include "GrVertices.h"
#include "SkAtomics.h"
#include "SkRefCnt.h"
#include "SkTypes.h"
class GrGpu;
class GrPipeline;
struct GrInitInvariantOutput;
/*
* GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
* reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
* captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
* subclasses complete freedom to decide how / what they can batch.
*
* Batches are created when GrContext processes a draw call. Batches of the same subclass may be
* merged using combineIfPossible. When two batches merge, one takes on the union of the data
* and the other is left empty. The merged batch becomes responsible for drawing the data from both
* the original batches.
*
* If there are any possible optimizations which might require knowing more about the full state of
* the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
* information will be communicated to the GrBatch prior to geometry generation.
*/
class GrBatch : public SkRefCnt {
public:
GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) }
virtual ~GrBatch() {}
virtual const char* name() const = 0;
virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
/*
* initBatchTracker is a hook for the some additional overrides / optimization possibilities
* from the GrXferProcessor.
*/
virtual void initBatchTracker(const GrPipelineInfo& init) = 0;
bool combineIfPossible(GrBatch* that) {
if (this->classID() != that->classID()) {
return false;
}
return this->onCombineIfPossible(that);
}
virtual bool onCombineIfPossible(GrBatch*) = 0;
virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0;
const SkRect& bounds() const { return fBounds; }
// TODO this goes away when batches are everywhere
void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; }
int numberOfDraws() const { return fNumberOfDraws; }
void* operator new(size_t size);
void operator delete(void* target);
void* operator new(size_t size, void* placement) {
return ::operator new(size, placement);
}
void operator delete(void* target, void* placement) {
::operator delete(target, placement);
}
/**
* Helper for down-casting to a GrBatch subclass
*/
template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
template <typename T> T* cast() { return static_cast<T*>(this); }
uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; }
// TODO no GrPrimitiveProcessors yet read fragment position
bool willReadFragmentPosition() const { return false; }
SkDEBUGCODE(bool isUsed() const { return fUsed; })
protected:
template <typename PROC_SUBCLASS> void initClassID() {
static uint32_t kClassID = GenClassID();
fClassID = kClassID;
}
uint32_t fClassID;
// NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
// rect because we outset it for dst copy textures
void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
void joinBounds(const SkRect& otherBounds) {
return fBounds.joinPossiblyEmptyRect(otherBounds);
}
/** Helper for rendering instances using an instanced index index buffer. This class creates the
space for the vertices and flushes the draws to the batch target.*/
class InstancedHelper {
public:
InstancedHelper() {}
/** Returns the allocated storage for the vertices. The caller should populate the before
vertices before calling issueDraws(). */
void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStride,
const GrIndexBuffer*, int verticesPerInstance, int indicesPerInstance,
int instancesToDraw);
/** Call after init() to issue draws to the batch target.*/
void issueDraw(GrBatchTarget* batchTarget) {
SkASSERT(fVertices.instanceCount());
batchTarget->draw(fVertices);
}
private:
GrVertices fVertices;
};
static const int kVerticesPerQuad = 4;
static const int kIndicesPerQuad = 6;
/** A specialization of InstanceHelper for quad rendering. */
class QuadHelper : private InstancedHelper {
public:
QuadHelper() : INHERITED() {}
/** Finds the cached quad index buffer and reserves vertex space. Returns NULL on failure
and on sucess a pointer to the vertex data that the caller should populate before
calling issueDraws(). */
void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw);
using InstancedHelper::issueDraw;
private:
typedef InstancedHelper INHERITED;
};
SkRect fBounds;
private:
static uint32_t GenClassID() {
// fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
// atomic inc returns the old value not the incremented value. So we add
// 1 to the returned value.
uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
if (!id) {
SkFAIL("This should never wrap as it should only be called once for each GrBatch "
"subclass.");
}
return id;
}
enum {
kIllegalBatchClassID = 0,
};
static int32_t gCurrBatchClassID;
SkDEBUGCODE(bool fUsed;)
int fNumberOfDraws;
typedef SkRefCnt INHERITED;
};
#endif
| 2,168 |
435 |
<reponame>Montana/datawave
package datawave.webservice.results.mr;
import java.io.IOException;
import java.io.Serializable;
import javax.xml.bind.annotation.XmlAccessOrder;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorOrder;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlTransient;
import org.apache.commons.lang.builder.HashCodeBuilder;
import io.protostuff.Input;
import io.protostuff.Message;
import io.protostuff.Output;
import io.protostuff.Schema;
@XmlAccessorType(XmlAccessType.NONE)
@XmlAccessorOrder(XmlAccessOrder.ALPHABETICAL)
public class JobExecution implements Serializable, Message<JobExecution>, Comparable<JobExecution> {
private static final long serialVersionUID = 1L;
@XmlAttribute(name = "mapReduceJobId")
private String mapReduceJobId;
@XmlAttribute(name = "timestamp")
private long timestamp;
@XmlAttribute(name = "state")
private String state;
public String getMapReduceJobId() {
return mapReduceJobId;
}
public long getTimestamp() {
return timestamp;
}
public String getState() {
return state;
}
public void setMapReduceJobId(String mapReduceJobId) {
this.mapReduceJobId = mapReduceJobId;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
public void setState(String state) {
this.state = state;
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37).append(this.getTimestamp()).append(this.getMapReduceJobId()).append(this.getState()).toHashCode();
}
@Override
public boolean equals(Object o) {
if (null == o)
return false;
if (o == this)
return true;
if (o.getClass() != this.getClass())
return false;
JobExecution other = (JobExecution) o;
if (this.mapReduceJobId.equals(other.mapReduceJobId) && this.state.equals(other.state) && this.timestamp == other.timestamp)
return true;
return false;
}
@Override
public int compareTo(JobExecution o) {
// sort by the map reduce job id, timestamp, and then state
int result = this.mapReduceJobId.compareTo(o.mapReduceJobId);
if (result != 0)
return result;
else {
if (this.timestamp < o.timestamp)
return -1;
else if (this.timestamp > o.timestamp)
return 1;
else {
return this.state.compareTo(o.state);
}
}
}
@Override
public Schema<JobExecution> cachedSchema() {
return SCHEMA;
}
public static Schema<JobExecution> getSchema() {
return SCHEMA;
}
@XmlTransient
private static final Schema<JobExecution> SCHEMA = new Schema<JobExecution>() {
// schema methods
public JobExecution newMessage() {
return new JobExecution();
}
public Class<JobExecution> typeClass() {
return JobExecution.class;
}
public String messageName() {
return JobExecution.class.getSimpleName();
}
public String messageFullName() {
return JobExecution.class.getName();
}
public boolean isInitialized(JobExecution message) {
return true;
}
public void writeTo(Output output, JobExecution message) throws IOException {
if (message.getMapReduceJobId() != null) {
output.writeString(1, message.getMapReduceJobId(), false);
}
output.writeUInt64(2, message.getTimestamp(), false);
if (message.getState() != null) {
output.writeString(3, message.getState(), false);
}
}
public void mergeFrom(Input input, JobExecution message) throws IOException {
int number;
while ((number = input.readFieldNumber(this)) != 0) {
switch (number) {
case 1:
message.setMapReduceJobId(input.readString());
break;
case 2:
message.setTimestamp(input.readUInt64());
break;
case 3:
message.setState(input.readString());
break;
default:
input.handleUnknownField(number, this);
break;
}
}
}
public String getFieldName(int number) {
switch (number) {
case 1:
return "mapReduceJobId";
case 2:
return "timestamp";
case 3:
return "state";
default:
return null;
}
}
public int getFieldNumber(String name) {
final Integer number = fieldMap.get(name);
return number == null ? 0 : number.intValue();
}
final java.util.HashMap<String,Integer> fieldMap = new java.util.HashMap<String,Integer>();
{
fieldMap.put("mapReduceJobId", 1);
fieldMap.put("timestamp", 2);
fieldMap.put("state", 3);
}
};
}
| 2,790 |
311 |
<reponame>dev-66/dd-trace-java
package com.datadog.appsec.config;
import com.datadog.appsec.util.Generated;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class AppSecConfig {
private static final JsonAdapter<AppSecConfig> ADAPTER =
new Moshi.Builder().build().adapter(AppSecConfig.class);
private String version;
private List<Event> events;
// We need to keep original raw config because DDWAF can't consume custom objects
// Remove rawConfig when DDWAF will be able to get any object
private Map<String, Object> rawConfig;
private AppSecConfig() {}
static AppSecConfig createFromMap(Map<String, Object> rawConfig) {
AppSecConfig config = ADAPTER.fromJsonValue(rawConfig);
if (config == null) {
return null;
}
config.rawConfig = rawConfig;
return config;
}
public List<Event> getEvents() {
return events;
}
public Map<String, Object> getRawConfig() {
return rawConfig;
}
public static class Event {
private String id;
private String name;
private Map<String, String> tags;
private Object conditions;
private Object transformers;
private Object action;
public String getId() {
return id;
}
public String getName() {
return name;
}
public Map<String, String> getTags() {
return tags;
}
}
@Generated
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AppSecConfig config = (AppSecConfig) o;
return Objects.equals(version, config.version)
&& Objects.equals(events, config.events)
&& Objects.equals(rawConfig, config.rawConfig);
}
@Generated
@Override
public int hashCode() {
return Objects.hash(version, events, rawConfig);
}
}
| 658 |
1,393 |
<filename>petastorm/tests/generate_dataset_for_legacy_tests.py
# Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import petastorm
from petastorm.tests.test_common import create_test_dataset
def generate_dataset_for_legacy_test():
"""Generates a test dataset and stores it into petastorm/tests/data/legacy/x.x.x folder. The version number
is acquired automatically from petastorm.__version__"""
dataset_name = petastorm.__version__
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'legacy', dataset_name)
url = 'file://' + path
create_test_dataset(url, range(100))
if __name__ == '__main__':
generate_dataset_for_legacy_test()
| 393 |
357 |
<reponame>dcnieho/Psychtoolbox-3
/*
SCREENHideCursorHelper.c
AUTHORS:
<EMAIL> awi
<EMAIL> mk
PLATFORMS:
All.
HISTORY:
12/14/02 awi Created.
10/12/04 awi Changed "SCREEN" to "Screen" in useString.
11/16/04 awi Added "Helper" suffix.
DESCRIPTION:
Hides the mouse pointer
TO DO:
*/
#include "Screen.h"
// If you change the useString then also change the corresponding synopsis string in ScreenSynopsis.c
static char useString[] = "Screen('HideCursorHelper', windowPntr [, mouseIndex]);";
// 1 2
static char synopsisString[] =
"This is a helper function called by HideCursor. Do not call Screen(\'HideCursorHelper\'), use "
"HideCursor instead.\n"
"Hides the mouse pointer";
static char seeAlsoString[] = "ShowCursorHelper";
PsychError SCREENHideCursorHelper(void)
{
int screenNumber, mouseIdx;
//all subfunctions should have these two lines.
PsychPushHelp(useString, synopsisString, seeAlsoString);
if(PsychIsGiveHelp()){PsychGiveHelp();return(PsychError_none);};
PsychErrorExit(PsychCapNumInputArgs(2)); //The maximum number of inputs
PsychErrorExit(PsychCapNumOutputArgs(0)); //The maximum number of outputs
//get the screen number from the window record or screen number
PsychCopyInScreenNumberArg(1, TRUE, &screenNumber);
mouseIdx = -1;
PsychCopyInIntegerArg(2, FALSE, &mouseIdx);
PsychHideCursor(screenNumber, mouseIdx);
return(PsychError_none);
}
| 576 |
482 |
package io.nutz.demo.simple;
import org.nutz.boot.starter.caffeine.UpdateStrategy;
import org.nutz.ioc.loader.annotation.IocBean;
import org.nutz.lang.Lang;
import org.nutz.mvc.Mvcs;
@IocBean
public class MvcUpdateStrategy implements UpdateStrategy {
@Override
public boolean shouldUpdate(String key) {
// 凡是request中携带update=true,都强制更新缓存
return Lang.parseBoolean(Mvcs.getReq().getParameter("update"));
}
}
| 179 |
372 |
<reponame>kbore/pbis-open
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*-
* ex: set softtabstop=4 tabstop=8 expandtab shiftwidth=4: *
* Editor Settings: expandtabs and use 4 spaces for indentation */
/*
* Copyright © BeyondTrust Software 2004 - 2019
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* BEYONDTRUST MAKES THIS SOFTWARE AVAILABLE UNDER OTHER LICENSING TERMS AS
* WELL. IF YOU HAVE ENTERED INTO A SEPARATE LICENSE AGREEMENT WITH
* BEYONDTRUST, THEN YOU MAY ELECT TO USE THE SOFTWARE UNDER THE TERMS OF THAT
* SOFTWARE LICENSE AGREEMENT INSTEAD OF THE TERMS OF THE APACHE LICENSE,
* NOTWITHSTANDING THE ABOVE NOTICE. IF YOU HAVE QUESTIONS, OR WISH TO REQUEST
* A COPY OF THE ALTERNATE LICENSING TERMS OFFERED BY BEYONDTRUST, PLEASE CONTACT
* BEYONDTRUST AT beyondtrust.com/contact
*/
/*
* Copyright (C) BeyondTrust Software. All rights reserved.
*
* Module Name:
*
* samr_getaliasmembership.c
*
* Abstract:
*
* Remote Procedure Call (RPC) Client Interface
*
* SamrGetAliasMembership function
*
* Authors: <NAME> (<EMAIL>)
*/
#include "includes.h"
NTSTATUS
SamrGetAliasMembership(
IN SAMR_BINDING hBinding,
IN DOMAIN_HANDLE hDomain,
IN PSID *ppSids,
IN DWORD dwNumSids,
OUT PDWORD *ppdwRids,
OUT PDWORD pdwCount
)
{
NTSTATUS ntStatus = STATUS_SUCCESS;
UINT32 iSid = 0;
SID_ARRAY Sids = {0};
IDS Rids = {0};
UINT32 *pRids = NULL;
DWORD dwOffset = 0;
DWORD dwSpaceLeft = 0;
DWORD dwSize = 0;
BAIL_ON_INVALID_PTR(hBinding, ntStatus);
BAIL_ON_INVALID_PTR(hDomain, ntStatus);
BAIL_ON_INVALID_PTR(ppSids, ntStatus);
BAIL_ON_INVALID_PTR(ppdwRids, ntStatus);
BAIL_ON_INVALID_PTR(pdwCount, ntStatus);
Sids.dwNumSids = dwNumSids;
ntStatus = SamrAllocateMemory(OUT_PPVOID(&Sids.pSids),
sizeof(Sids.pSids[0]) * dwNumSids);
BAIL_ON_NT_STATUS(ntStatus);
for (iSid = 0; iSid < dwNumSids; iSid++)
{
Sids.pSids[iSid].pSid = ppSids[iSid];
}
DCERPC_CALL(ntStatus, cli_SamrGetAliasMembership(
(handle_t)hBinding,
hDomain,
&Sids,
&Rids));
BAIL_ON_NT_STATUS(ntStatus);
dwSpaceLeft = sizeof(pRids[0]) * Rids.dwCount;
dwSize = 0;
ntStatus = SamrAllocateMemory(OUT_PPVOID(&pRids),
dwSpaceLeft);
BAIL_ON_NT_STATUS(ntStatus);
ntStatus = SamrAllocateIds(pRids,
&dwOffset,
&dwSpaceLeft,
&Rids,
&dwSize);
BAIL_ON_NT_STATUS(ntStatus);
*ppdwRids = pRids;
*pdwCount = Rids.dwCount;
cleanup:
SamrCleanStubIds(&Rids);
return ntStatus;
error:
if (pRids)
{
SamrFreeMemory(pRids);
}
if (ppdwRids)
{
*ppdwRids = NULL;
}
if (pdwCount)
{
*pdwCount = 0;
}
goto cleanup;
}
| 1,770 |
1,587 |
package io.reflectoring.featureflags.implementations.contextsensitive;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
public class Feature {
public enum RolloutStrategy {
GLOBAL,
PERCENTAGE;
}
private final RolloutStrategy rolloutStrategy;
private final int percentage;
private final String value;
private final String defaultValue;
public Feature(RolloutStrategy rolloutStrategy, String value, String defaultValue, int percentage) {
this.rolloutStrategy = rolloutStrategy;
this.percentage = percentage;
this.value = value;
this.defaultValue = defaultValue;
}
public boolean evaluateBoolean(String userId) {
switch (this.rolloutStrategy) {
case GLOBAL:
return this.getBooleanValue();
case PERCENTAGE:
if (percentageHashCode(userId) <= this.percentage) {
return this.getBooleanValue();
} else {
return this.getBooleanDefaultValue();
}
}
return this.getBooleanDefaultValue();
}
public Integer evaluateInt(String userId) {
switch (this.rolloutStrategy) {
case GLOBAL:
return this.getIntValue();
case PERCENTAGE:
if (percentageHashCode(userId) <= this.percentage) {
return this.getIntValue();
} else {
return this.getIntDefaultValue();
}
}
return this.getIntDefaultValue();
}
double percentageHashCode(String text) {
try {
MessageDigest digest = MessageDigest.getInstance("SHA-256");
byte[] encodedhash = digest.digest(
text.getBytes(StandardCharsets.UTF_8));
double INTEGER_RANGE = 1L << 32;
return (((long) Arrays.hashCode(encodedhash) - Integer.MIN_VALUE) / INTEGER_RANGE) * 100;
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException(e);
}
}
public RolloutStrategy getTargeting() {
return rolloutStrategy;
}
public int getPercentage() {
return percentage;
}
public int getIntValue() {
return Integer.parseInt(this.value);
}
public int getIntDefaultValue() {
return Integer.parseInt(this.defaultValue);
}
public boolean getBooleanValue() {
return Boolean.parseBoolean(this.value);
}
public boolean getBooleanDefaultValue() {
return Boolean.parseBoolean(this.defaultValue);
}
public String getStringValue() {
return value;
}
public String getDefaultValue() {
return defaultValue;
}
}
| 1,230 |
515 |
package br.com.caelum.stella.nfe.security;
public enum TokenAlgorithm {
PKCS11
}
| 34 |
12,278 |
# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
from advisor.db_log_parser import NO_COL_FAMILY
from advisor.db_options_parser import DatabaseOptions
from advisor.rule_parser import Suggestion
import copy
import random
class ConfigOptimizer:
SCOPE = 'scope'
SUGG_VAL = 'suggested values'
@staticmethod
def apply_action_on_value(old_value, action, suggested_values):
chosen_sugg_val = None
if suggested_values:
chosen_sugg_val = random.choice(list(suggested_values))
new_value = None
if action is Suggestion.Action.set or not old_value:
assert(chosen_sugg_val)
new_value = chosen_sugg_val
else:
# For increase/decrease actions, currently the code tries to make
# a 30% change in the option's value per iteration. An addend is
# also present (+1 or -1) to handle the cases when the option's
# old value was 0 or the final int() conversion suppressed the 30%
# change made to the option
old_value = float(old_value)
mul = 0
add = 0
if action is Suggestion.Action.increase:
if old_value < 0:
mul = 0.7
add = 2
else:
mul = 1.3
add = 2
elif action is Suggestion.Action.decrease:
if old_value < 0:
mul = 1.3
add = -2
else:
mul = 0.7
add = -2
new_value = int(old_value * mul + add)
return new_value
@staticmethod
def improve_db_config(options, rule, suggestions_dict):
# this method takes ONE 'rule' and applies all its suggestions on the
# appropriate options
required_options = []
rule_suggestions = []
for sugg_name in rule.get_suggestions():
option = suggestions_dict[sugg_name].option
action = suggestions_dict[sugg_name].action
# A Suggestion in the rules spec must have the 'option' and
# 'action' fields defined, always call perform_checks() method
# after parsing the rules file using RulesSpec
assert(option)
assert(action)
required_options.append(option)
rule_suggestions.append(suggestions_dict[sugg_name])
current_config = options.get_options(required_options)
# Create the updated configuration from the rule's suggestions
updated_config = {}
for sugg in rule_suggestions:
# case: when the option is not present in the current configuration
if sugg.option not in current_config:
try:
new_value = ConfigOptimizer.apply_action_on_value(
None, sugg.action, sugg.suggested_values
)
if sugg.option not in updated_config:
updated_config[sugg.option] = {}
if DatabaseOptions.is_misc_option(sugg.option):
# this suggestion is on an option that is not yet
# supported by the Rocksdb OPTIONS file and so it is
# not prefixed by a section type.
updated_config[sugg.option][NO_COL_FAMILY] = new_value
else:
for col_fam in rule.get_trigger_column_families():
updated_config[sugg.option][col_fam] = new_value
except AssertionError:
print(
'WARNING(ConfigOptimizer): provide suggested_values ' +
'for ' + sugg.option
)
continue
# case: when the option is present in the current configuration
if NO_COL_FAMILY in current_config[sugg.option]:
old_value = current_config[sugg.option][NO_COL_FAMILY]
try:
new_value = ConfigOptimizer.apply_action_on_value(
old_value, sugg.action, sugg.suggested_values
)
if sugg.option not in updated_config:
updated_config[sugg.option] = {}
updated_config[sugg.option][NO_COL_FAMILY] = new_value
except AssertionError:
print(
'WARNING(ConfigOptimizer): provide suggested_values ' +
'for ' + sugg.option
)
else:
for col_fam in rule.get_trigger_column_families():
old_value = None
if col_fam in current_config[sugg.option]:
old_value = current_config[sugg.option][col_fam]
try:
new_value = ConfigOptimizer.apply_action_on_value(
old_value, sugg.action, sugg.suggested_values
)
if sugg.option not in updated_config:
updated_config[sugg.option] = {}
updated_config[sugg.option][col_fam] = new_value
except AssertionError:
print(
'WARNING(ConfigOptimizer): provide ' +
'suggested_values for ' + sugg.option
)
return current_config, updated_config
@staticmethod
def pick_rule_to_apply(rules, last_rule_name, rules_tried, backtrack):
if not rules:
print('\nNo more rules triggered!')
return None
# if the last rule provided an improvement in the database performance,
# and it was triggered again (i.e. it is present in 'rules'), then pick
# the same rule for this iteration too.
if last_rule_name and not backtrack:
for rule in rules:
if rule.name == last_rule_name:
return rule
# there was no previous rule OR the previous rule did not improve db
# performance OR it was not triggered for this iteration,
# then pick another rule that has not been tried yet
for rule in rules:
if rule.name not in rules_tried:
return rule
print('\nAll rules have been exhausted')
return None
@staticmethod
def apply_suggestions(
triggered_rules,
current_rule_name,
rules_tried,
backtrack,
curr_options,
suggestions_dict
):
curr_rule = ConfigOptimizer.pick_rule_to_apply(
triggered_rules, current_rule_name, rules_tried, backtrack
)
if not curr_rule:
return tuple([None]*4)
# if a rule has been picked for improving db_config, update rules_tried
rules_tried.add(curr_rule.name)
# get updated config based on the picked rule
curr_conf, updated_conf = ConfigOptimizer.improve_db_config(
curr_options, curr_rule, suggestions_dict
)
conf_diff = DatabaseOptions.get_options_diff(curr_conf, updated_conf)
if not conf_diff: # the current and updated configs are the same
curr_rule, rules_tried, curr_conf, updated_conf = (
ConfigOptimizer.apply_suggestions(
triggered_rules,
None,
rules_tried,
backtrack,
curr_options,
suggestions_dict
)
)
print('returning from apply_suggestions')
return (curr_rule, rules_tried, curr_conf, updated_conf)
# TODO(poojam23): check if this method is required or can we directly set
# the config equal to the curr_config
@staticmethod
def get_backtrack_config(curr_config, updated_config):
diff = DatabaseOptions.get_options_diff(curr_config, updated_config)
bt_config = {}
for option in diff:
bt_config[option] = {}
for col_fam in diff[option]:
bt_config[option][col_fam] = diff[option][col_fam][0]
print(bt_config)
return bt_config
def __init__(self, bench_runner, db_options, rule_parser, base_db):
self.bench_runner = bench_runner
self.db_options = db_options
self.rule_parser = rule_parser
self.base_db_path = base_db
def run(self):
# In every iteration of this method's optimization loop we pick ONE
# RULE from all the triggered rules and apply all its suggestions to
# the appropriate options.
# bootstrapping the optimizer
print('Bootstrapping optimizer:')
options = copy.deepcopy(self.db_options)
old_data_sources, old_metric = (
self.bench_runner.run_experiment(options, self.base_db_path)
)
print('Initial metric: ' + str(old_metric))
self.rule_parser.load_rules_from_spec()
self.rule_parser.perform_section_checks()
triggered_rules = self.rule_parser.get_triggered_rules(
old_data_sources, options.get_column_families()
)
print('\nTriggered:')
self.rule_parser.print_rules(triggered_rules)
backtrack = False
rules_tried = set()
curr_rule, rules_tried, curr_conf, updated_conf = (
ConfigOptimizer.apply_suggestions(
triggered_rules,
None,
rules_tried,
backtrack,
options,
self.rule_parser.get_suggestions_dict()
)
)
# the optimizer loop
while curr_rule:
print('\nRule picked for next iteration:')
print(curr_rule.name)
print('\ncurrent config:')
print(curr_conf)
print('updated config:')
print(updated_conf)
options.update_options(updated_conf)
# run bench_runner with updated config
new_data_sources, new_metric = (
self.bench_runner.run_experiment(options, self.base_db_path)
)
print('\nnew metric: ' + str(new_metric))
backtrack = not self.bench_runner.is_metric_better(
new_metric, old_metric
)
# update triggered_rules, metric, data_sources, if required
if backtrack:
# revert changes to options config
print('\nBacktracking to previous configuration')
backtrack_conf = ConfigOptimizer.get_backtrack_config(
curr_conf, updated_conf
)
options.update_options(backtrack_conf)
else:
# run advisor on new data sources
self.rule_parser.load_rules_from_spec() # reboot the advisor
self.rule_parser.perform_section_checks()
triggered_rules = self.rule_parser.get_triggered_rules(
new_data_sources, options.get_column_families()
)
print('\nTriggered:')
self.rule_parser.print_rules(triggered_rules)
old_metric = new_metric
old_data_sources = new_data_sources
rules_tried = set()
# pick rule to work on and set curr_rule to that
curr_rule, rules_tried, curr_conf, updated_conf = (
ConfigOptimizer.apply_suggestions(
triggered_rules,
curr_rule.name,
rules_tried,
backtrack,
options,
self.rule_parser.get_suggestions_dict()
)
)
# return the final database options configuration
return options
| 6,025 |
2,132 |
// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE
package org.bytedeco.tensorflow;
import org.bytedeco.tensorflow.Allocator;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.tensorflow.global.tensorflow.*;
// Local Tensor Handle: Handle to a Tensor present on the local host.
@Namespace("tensorflow") @NoOffset @Properties(inherit = org.bytedeco.tensorflow.presets.tensorflow.class)
public class LocalTensorHandleData extends TensorHandleData {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LocalTensorHandleData(Pointer p) { super(p); }
public LocalTensorHandleData(@Const @ByRef Tensor t) { super((Pointer)null); allocate(t); }
private native void allocate(@Const @ByRef Tensor t);
// A local tensor handle should be able to satisfy all of these requests.
public native @ByVal Status Tensor(@Cast("const tensorflow::Tensor**") PointerPointer t);
public native @ByVal Status Tensor(@Const @ByPtrPtr Tensor t);
public native @ByVal Status TensorValue(TensorValue t);
public native @ByVal Status Shape(TensorShape shape);
public native @ByVal Status NumDims(IntPointer num_dims);
public native @ByVal Status NumDims(IntBuffer num_dims);
public native @ByVal Status NumDims(int... num_dims);
public native @ByVal Status Dim(int dim_index, @Cast("tensorflow::int64*") LongPointer dim);
public native @ByVal Status Dim(int dim_index, @Cast("tensorflow::int64*") LongBuffer dim);
public native @ByVal Status Dim(int dim_index, @Cast("tensorflow::int64*") long... dim);
public native @ByVal Status NumElements(@Cast("tensorflow::int64*") LongPointer num_elements);
public native @ByVal Status NumElements(@Cast("tensorflow::int64*") LongBuffer num_elements);
public native @ByVal Status NumElements(@Cast("tensorflow::int64*") long... num_elements);
public native @StdString BytePointer DebugString();
}
| 668 |
5,937 |
<gh_stars>1000+
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//-----------------------------------------------------------------------------
//
//
#include "precomp.hpp"
// Meters ----------------------------------------------------------------------
MtDefine(Stack, Mem, "Stack");
MtDefine(CWatermarkStack, Stack, "CWatermarkStack");
| 114 |
1,223 |
<gh_stars>1000+
{
"template": "nodejs",
"ports": [3000],
"name": "messenger-bot",
"description": "A Node client for the Facebook Messenger Platform",
"scripts": {
"post-create": "echo 'running npm install - this might take awhile...' && npm install && sed -i 's/listen(3000)/listen(3000, \"0.0.0.0\")/g' example/echo.js",
"Start Messenger Echo Bot": "cd ~/code/messenger-bot && node example/echo.js"
}
}
| 152 |
1,236 |
// The MIT License (MIT)
// Copyright (c) 2016, Microsoft
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#pragma once
#include <array> // For arrays in FreeList.
#include <cstdint>
#include <iosfwd> // For debugging output.
#include "NativeJIT/AllocatorVector.h" // Embedded member.
#include "NativeJIT/CodeGen/JumpTable.h" // ExpressionTree embeds Label.
#include "NativeJIT/CodeGen/Register.h"
#include "NativeJIT/TypePredicates.h" // RegisterStorage used in typedef.
#include "Temporary/NonCopyable.h"
namespace Allocators
{
class IAllocator;
}
namespace NativeJIT
{
class ExecutionPreconditionTest;
class FunctionBuffer;
class NodeBase;
class RIPRelativeImmediate;
// A class which increases reference counter on construction and decreases
// it on destruction.
// This class is not thread safe.
class ReferenceCounter final
{
public:
// Default constructor performs no observable reference counting. Used
// in cases where an object of this class needs to be constructed before
// the counter object is known (f. ex. in constructors of classes which
// contain the ReferenceCounter as a member).
ReferenceCounter();
// Increases the reference count in the specified counter.
ReferenceCounter(unsigned& counter);
// Uses the counter in the other object as its counter and increases it.
ReferenceCounter(ReferenceCounter const & other);
// Decreases the current counter.
~ReferenceCounter();
// Decreases the current counter, replaces it with the one in the other
// object and increases that counter.
ReferenceCounter& operator=(ReferenceCounter const & other);
// Decreases the current counter and disassociates itself from it.
void Reset();
private:
// Pointer to the current counter.
unsigned* m_counter;
// Methods to add and remove a reference to the counter.
void AddReference();
void RemoveReference();
};
enum class StorageClass {Direct, Indirect, Immediate};
class ExpressionTree : public NonCopyable
{
private:
class Data;
public:
template <typename T> class Storage;
// Returns the function return register for the specified type.
template <typename T>
static typename Storage<T>::DirectRegister GetResultRegister();
ExpressionTree(Allocators::IAllocator& allocator, FunctionBuffer& code);
Allocators::IAllocator& GetAllocator() const;
FunctionBuffer& GetCodeGenerator() const;
void EnableDiagnostics(std::ostream& out);
void DisableDiagnostics();
// In-place constructs an object using the class allocator. The object's
// lifetime cannot be longer than that of the ExpressionTree.
template <typename T, typename... ConstructorArgs>
T&
PlacementConstruct(ConstructorArgs&&... constructorArgs);
//
// Tree construction
//
unsigned AddNode(NodeBase& node);
// DESIGN NOTE: This might be better if ParameterNode<T> (and get position from it)
// to ensure that other nodes can't be passed to AddParameter. To make
// that possible, a circular include dependency between ExpressionTree.h
// and Node.h (through ParameterNode.h) needs to be broken. Then forward
// declaring ParameterNode<T> and using #include further below would work.
void AddParameter(NodeBase& parameter, unsigned position);
void AddRIPRelative(RIPRelativeImmediate& node);
void ReportFunctionCallNode(unsigned parameterCount);
void Compile();
//
// Storage allocation.
//
template <typename T>
Storage<T> Direct();
template <typename T>
Storage<T> Direct(typename Storage<T>::DirectRegister r);
template <typename T>
Storage<T> RIPRelative(int32_t offset);
// Returns indirect storage relative to the base pointer for a variable
// of type T. It is guaranteed that it's legal to access the whole quadword
// at the target address.
template <typename T>
Storage<T> Temporary();
template <typename T>
Storage<T> Immediate(T value);
template <unsigned SIZE>
void ReleaseRegister(Register<SIZE, false> r);
template <unsigned SIZE>
void ReleaseRegister(Register<SIZE, true> r);
// Given an offset off BaseRegister, checks whether the register
// belongs to a valid offset describing an already allocated slot and,
// if so, releases the corresponding temporary slot. Some valid offsets
// off the base register that don't refer to temporaries include offsets
// referring to compiled function's parameters.
void ReleaseIfTemporary(int32_t offset);
// Returns whether a register is pinned.
template <unsigned SIZE, bool ISFLOAT>
bool IsPinned(Register<SIZE, ISFLOAT> reg);
unsigned GetRXXUsedMask() const;
unsigned GetXMMUsedMask() const;
Label GetStartOfEpilogue() const;
protected:
bool IsDiagnosticsStreamAvailable() const;
// Returns the diagnostic stream. Throws if it is not available.
std::ostream& GetDiagnosticsStream() const;
// Adds a precondition for executing the expression. See the
// m_preconditionTests variable for more information.
void AddExecutionPreconditionTest(ExecutionPreconditionTest& test);
void const * GetUntypedEntryPoint() const;
private:
// Keeps track of used and free registers and the Data* associated with
// each register.
//
// The Data* is kept so that it's easily possible to create another
// Storage around an allocated register when necessary (f. ex. for
// spilling. Thus, there's a required 1-1 mapping between a general
// purpose register and the Data* that refers to it (either as Direct
// or Indirect).
//
// Note that it's neither necessary nor possible to hold information
// about a singular Data* for *Indirect* references to shared base
// registers such as RSP and RIP since multiple Data* refer to them by
// definition and since they don't need to be spilled. The Data* for
// Direct reference to shared base registers is kept, though.
template <unsigned REGISTER_COUNT, bool ISFLOAT>
class FreeList
{
public:
// The bit-mask signifying that all valid registers have been allocated.
static const unsigned c_fullUsedMask = (1ul << REGISTER_COUNT) - 1;
FreeList(Allocators::IAllocator& allocator);
// Returns the number of unallocated registers.
unsigned GetFreeCount() const;
bool IsAvailable(unsigned id) const;
unsigned Allocate();
void Allocate(unsigned id);
// Returns a pin for a register. Pinned register cannot be spilled.
// IMPORTANT: Register pinning should be done in a very limited
// scope. Otherwise, in a larger scope (f. ex. before a CodeGen()
// call) there is a risk of pinning some of the registers used for
// parameter passing, which would cause the compilation of a function
// call to fail.
ReferenceCounter GetPin(unsigned id);
// Returns whether a register is pinned.
bool IsPinned(unsigned id) const;
void Release(unsigned id);
// The methods to set and retrieve the Data* owned by the respective
// register. The InitializeData() and UpdateData() methods differ
// only in sanity checks: the former requires that no Data* was
// previously set for the register whereas the latter requires that
// a Data* already existed.
void InitializeData(unsigned id, Data* data);
void UpdateData(unsigned id, Data* data);
Data* GetData(unsigned id) const;
// Returns the bit-mask for used and allocated registers.
unsigned GetUsedMask() const;
unsigned GetFreeMask() const;
// Returns a register mask specifying which registers were touched
// at any point of time, regardless of whether they were later
// released or not.
unsigned GetLifetimeUsedMask() const;
// Returns the ID of an allocated register that is not pinned and
// can be spilled. Throws if there are no such registers available.
unsigned GetAllocatedSpillable() const;
private:
// Helper methods to perform sanity check on arguments and data contents.
void AssertValidID(unsigned id) const;
void AssertValidData(unsigned id) const;
void AssertValidData(unsigned id, Data* data) const;
// The mask tracking registers which are currently allocated.
unsigned m_usedMask;
// The mask tracking registers which were touched at any point of
// time, regardless of whether they were later released or not.
unsigned m_lifetimeUsedMask;
const unsigned m_volatileRegisterMask;
const unsigned m_nonVolatileRegisterMask;
// See the class description for more details.
std::array<Data*, REGISTER_COUNT> m_data;
// An array of currently allocated IDs, oldest at the beginning.
// DESIGN NOTE: Deque and list better satisfy this variable's usage
// pattern in general (elements always added at the back, mostly pulled
// out from the front). However, given the small number of elements
// and the simplicitly of the vector, it is likely to perform better.
AllocatorVector<uint8_t> m_allocatedRegisters;
// Number of active references to a pinned register. The register
// cannot be spilled while it's pinned.
std::array<unsigned, REGISTER_COUNT> m_pinCount;
};
bool IsBasePointer(PointerRegister r) const;
PointerRegister GetBasePointer() const;
// Returns whether the register is one of the reserved/shared base
// registers (instruction, stack or base pointer).
template <unsigned SIZE>
bool IsAnySharedBaseRegister(Register<SIZE, false> r) const;
template <unsigned SIZE>
bool IsAnySharedBaseRegister(Register<SIZE, true> r) const;
// Converts a valid temporary slot index into an offset off base register.
int32_t TemporarySlotToOffset(unsigned temporarySlot);
// If the temporary offset off base register describes a valid allocated
// temporary slot, returns true and fills in the temporary slot out
// parameter. Returns false otherwise.
bool TemporaryOffsetToSlot(int32_t temporaryOffset, unsigned& temporarySlot);
void Pass0();
void Pass1();
void Pass2();
void Pass3();
void Print() const;
// The following template and the alias template provide a way to access
// the free list for a register and a C++ type respectively.
template <bool ISFLOAT>
class FreeListForRegister;
template <typename T>
using FreeListForType = class FreeListForRegister<RegisterStorage<T>::c_isFloat>;
// The allocator and STL-compatible wrapper around it.
Allocators::IAllocator& m_allocator;
Allocators::StlAllocator<void*> m_stlAllocator;
FunctionBuffer & m_code;
// Stream used to print diagnostics or nullptr if disabled.
std::ostream* m_diagnosticsStream;
AllocatorVector<NodeBase*> m_topologicalSort;
AllocatorVector<NodeBase*> m_parameters;
AllocatorVector<RIPRelativeImmediate*> m_ripRelatives;
// Preconditions for evaluating the whole expression. The preconditions
// are evaluated right after the parameters and will cause the function
// to return early if any of them is not met.
AllocatorVector<ExecutionPreconditionTest*> m_preconditionTests;
FreeList<RegisterBase::c_maxIntegerRegisterID + 1, false> m_rxxFreeList;
FreeList<RegisterBase::c_maxFloatRegisterID + 1, true> m_xmmFreeList;
AllocatorVector<Storage<void*>> m_reservedRxxRegisterStorages;
AllocatorVector<Storage<double>> m_reservedXmmRegisterStorages;
AllocatorVector<ReferenceCounter> m_reservedRegistersPins;
unsigned m_temporaryCount;
AllocatorVector<int32_t> m_temporaries;
// Maximum number of parameters used in function calls done by the tree.
// Negative value signifies no function calls made.
int m_maxFunctionCallParameters;
PointerRegister m_basePointer;
Label m_startOfEpilogue;
};
class ExpressionTree::Data : public NonCopyable
{
public:
template <unsigned SIZE, bool ISFLOAT>
Data(ExpressionTree& tree, Register<SIZE, ISFLOAT> r);
// DESIGN NOTE: It would be better if indrect memory knew the size of the memory it
// points to, otherwise access violation can happen if larger, non-owned
// memory area is dereferenced.
// Also applies to ConvertDirectToIndirect and Storage::Direct(Register).
Data(ExpressionTree& tree, PointerRegister r, int32_t offset);
// Note: attempt to create an immediate storage will static_assert for
// invalid immediates.
template <typename T>
Data(ExpressionTree& tree, T value);
ExpressionTree& GetTree() const;
StorageClass GetStorageClass() const;
unsigned GetRegisterId() const;
int32_t GetOffset() const;
// Note: attempt to GetImmediate() will static_assert for invalid immediates.
template <typename T>
T GetImmediate() const;
void ConvertDirectToIndirect(int32_t offset);
void ConvertIndirectToDirect();
unsigned GetRefCount() const;
unsigned Decrement();
void Increment();
// Swaps the targets between two Data objects keeping the reference
// count unchanged and notifies the free list of the register change.
// Used when all clients of both data objects need to have the contents
// exchanged.
void SwapContents(Data* other);
private:
// WARNING: This class is designed to be allocated by an arena allocator,
// so its destructor will never be called. Therefore, it should hold no
// resources other than memory from the arena allocator.
~Data();
// The type of the register change that the free list gets notified about.
enum class RegisterChangeType { Initialize, Update };
// Returns true if data's storage class is direct/immediate and if the
// register it refers to is one of the shared registers.
bool IsSharedBaseRegister() const;
// Notifies the free list that register ID of this data object has
// been modified. Calls the templated version of the same method to
// peform the actual work.
void NotifyDataRegisterChange(RegisterChangeType type);
// See above.
template <bool ISFLOAT>
void NotifyDataRegisterChange(RegisterChangeType type);
ExpressionTree& m_tree;
// How the data is stored.
StorageClass m_storageClass;
// Which register.
bool m_isFloat;
unsigned m_registerId;
int32_t m_offset;
// Holds the immediate value for Data whose storage class is Immediate.
size_t m_immediate;
// Who is using it.
unsigned m_refCount;
};
// Storage should be public because it is a return type for Node.
// Storage should be private so that its constructor can take an m_data.
template <typename T>
class ExpressionTree::Storage
{
public:
// All Storages need to be treated as the same class, regardless of
// the template parameter.
template <typename U> friend class Storage;
typedef typename RegisterStorage<T>::RegisterType DirectRegister;
typedef PointerRegister BaseRegister;
typedef typename DirectRegister::FullRegister FullRegister;
// Types of swaps. The Single swap affects only the two storages directly
// involved by swapping their Data*. The AllReferences swap modifies the
// underlying Data objects thus swapping the data for the first storage
// as well as the storages that share the same Data* with the second
// storage as well as the storages that share its Data*.
enum class SwapType { Single, AllReferences };
Storage();
template <typename U>
explicit Storage(const Storage<U>& other);
Storage(Storage const & other);
// Takes ownership of a base storage, adds the offset to it and
// dereferences it to produce a Storage<T>. Equivalent to
// *static_cast<T*>(base + byteOffset).
Storage(Storage<void*>&& base, int32_t byteOffset);
// Creates another storage referencing a register. The register must
// already be allocated. For shared base registers, the Storage will
// reference the direct Data version which cannot be converted to
// indirect nor spilled since it's pinned.
static Storage<T> ForAdditionalReferenceToRegister(
ExpressionTree& tree,
DirectRegister reg);
// Allocates an empty register and creates a direct storage referencing it.
// Can only be called when there are available registers.
static Storage<T> ForAnyFreeRegister(ExpressionTree& tree);
// Allocates a specific empty register and creates a direct storage
// referencing it. Can only be called when the specified register is free.
static Storage<T> ForFreeRegister(ExpressionTree& tree,
DirectRegister reg);
// Creates an indirect storage against a shared base register.
static Storage<T> ForSharedBaseRegister(ExpressionTree& tree,
BaseRegister base,
int32_t offset);
// Creates an immediate storage with the specified value.
static Storage<T> ForImmediate(ExpressionTree& tree, T value);
// Loads the address of an immediate storage into a newly created direct
// storage and resets the original storage. If possible, reuses the
// register from the source storage.
// Note: do not call this method with a last reference to a Temporary as
// the Temporary may be released after the source storage is reset and
// the created storage would point to invalid the memory.
template <typename U>
static Storage<T> AddressOfIndirect(Storage<U>&& indirect);
Storage& operator=(Storage const & other);
~Storage();
void Reset();
bool IsNull() const;
// Returns whether this Storage shares ownership over its Data*.
// Note that for direct storages, IsSoleDataOwner() result of true
// implies that the storage also exclusively owns the register. For
// indirect storages, this implication stands only for non-shared base
// registers.
bool IsSoleDataOwner() const;
StorageClass GetStorageClass() const;
DirectRegister GetDirectRegister() const;
BaseRegister GetBaseRegister() const;
int32_t GetOffset() const;
// Note: The method must be removed for invalid immediates rather than
// static_assert triggered because for some of them (arrays) the method
// has invalid declaration otherwise.
template <typename U = T,
typename ENABLED = typename std::enable_if<ImmediateCategoryOf<U>::value
== ImmediateCategory::InlineImmediate>::type>
U GetImmediate() const;
DirectRegister ConvertToDirect(bool forModification);
// Swaps the contents of the two storages. See SwapType definition for
// more details.
template <typename U>
void Swap(Storage<U>& other, SwapType type);
// If the storage is not the sole data owner, copies the data to stack
// and switches the ownership of other owners to it.
// Does nothing if the storage was already the sole data owner.
// Requires that the storage is direct and that it does not refer to
// one of the shared base registers.
void TakeSoleOwnershipOfDirect();
// Returns a pin for the storage's register. While the pin is held,
// the register cannot be spilled. Can only be called if Storage is
// either direct or if it's indirect and refers to non-shared base
// registers.
ReferenceCounter GetPin();
// Prints the information about the storage's type, value, base register
// and offset to the output stream.
void Print(std::ostream& out) const;
// Returns whether or not two storages point to the same Data object.
// (as opposed to having the same contents).
template <typename U>
bool operator==(Storage<U> const & other) const;
private:
// Types used to select the correct flavor of immediate methods. This is
// necessary because GetStorageClass() is a runtime rather than a compile
// time property. Even though it is not possible to create a
// StorageClass::Immediate storage for an invalid immediate, the code
// that branches depending on various types of storage needs to always
// compile.
struct ValidImmediateStorage {};
struct InvalidImmediateStorage {};
typedef typename std::conditional<CanBeInImmediateStorage<T>::value,
ValidImmediateStorage,
InvalidImmediateStorage>::type
ImmediateFlavor;
Storage(ExpressionTree::Data* data);
void SetData(ExpressionTree::Data* data);
void SetData(Storage& other);
void ConvertImmediateToDirect(bool forModification, ValidImmediateStorage);
void ConvertImmediateToDirect(bool forModification, InvalidImmediateStorage);
void PrintImmediate(std::ostream& out, ValidImmediateStorage) const;
void PrintImmediate(std::ostream& out, InvalidImmediateStorage) const;
ExpressionTree::Data* m_data;
};
template <typename T>
using Storage = typename ExpressionTree::Storage<T>;
}
| 8,594 |
765 |
<gh_stars>100-1000
/*
* Copyright (c) 2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ARCH_ARM_HTM_HH__
#define __ARCH_ARM_HTM_HH__
/**
* @file
*
* ISA-specific types for hardware transactional memory.
*/
#include "arch/arm/regs/int.hh"
#include "arch/arm/regs/vec.hh"
#include "arch/generic/htm.hh"
#include "base/types.hh"
namespace gem5
{
namespace ArmISA
{
class HTMCheckpoint : public BaseHTMCheckpoint
{
public:
HTMCheckpoint()
: BaseHTMCheckpoint()
{}
const static int MAX_HTM_DEPTH = 255;
void reset() override;
void save(ThreadContext *tc) override;
void restore(ThreadContext *tc, HtmFailureFaultCause cause) override;
void destinationRegister(RegIndex dest) { rt = dest; }
void cancelReason(uint16_t reason) { tcreason = reason; }
private:
uint8_t rt; // TSTART destination register
Addr nPc; // Fallback instruction address
std::array<RegVal, NUM_ARCH_INTREGS> x; // General purpose registers
std::array<VecRegContainer, NumVecRegs> z; // Vector registers
std::array<VecPredRegContainer, NumVecRegs> p; // Predicate registers
Addr sp; // Stack Pointer at current EL
uint16_t tcreason; // TCANCEL reason
uint32_t fpcr; // Floating-point Control Register
uint32_t fpsr; // Floating-point Status Register
uint32_t iccPmrEl1; // Interrupt Controller Interrupt Priority Mask
uint8_t nzcv; // Condition flags
uint8_t daif;
PCState pcstateckpt;
};
} // namespace ArmISA
} // namespace gem5
#endif
| 1,055 |
1,227 |
/*
* Copyright 2012 <NAME>
* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package javax.microedition.media;
public interface Player extends Controllable {
int CLOSED = 0;
int UNREALIZED = 100;
int REALIZED = 200;
int PREFETCHED = 300;
int STARTED = 400;
long TIME_UNKNOWN = -1;
void realize() throws MediaException;
void prefetch() throws MediaException;
void start() throws MediaException;
void stop() throws MediaException;
void deallocate();
void close();
long setMediaTime(long now) throws MediaException;
long getMediaTime();
TimeBase getTimeBase();
void setTimeBase(TimeBase master) throws MediaException;
long getDuration();
void setLoopCount(int count);
int getState();
void addPlayerListener(PlayerListener playerListener);
void removePlayerListener(PlayerListener playerListener);
String getContentType();
}
| 387 |
432 |
<reponame>mfkiwl/riscv_vhdl-64bit-fault-tolerant
TEST_NAME = "Tests N1: demo game"
TEST_DESCRIPTION = \
"This test do the following steps:\n"\
" - Load elf-image into simulated target.\n"\
" - Start simulation.\n"\
" - Use button to select Tetris menu.\n"\
" - Start and play Tetris game.\n"\
" - Generate test reproduction documentation.\n"
import sys
sys.path.append(".\\modules")
import subprocess
import rpc
import doxytracer
subprocess.Popen("..\\win32build\\Release\\appdbg64g.exe -c ..\\..\\targets\\stm32l4xx_gui.json")
sim = rpc.Simulator()
sim.connect()
# Prepare documenetation generator
doxy = doxytracer.DoxyTracer(sim, "STM32L4xx platform description")
sim.setDoxyTracer(doxy)
doxy.addPage(TEST_NAME)
doxy.addParagraph(TEST_DESCRIPTION)
doxy.addSection("Device Initial State")
doxy.addParagraph(\
"General settings before the test started:\n"\
" -# <b>Power On:</b> {0}\n"\
.format(sim.isON()))
doxy.addSection("User Actions")
sim.clickButton("BTN_P7", comment="to power-on device.")
sim.go_msec(5100)
sim.clickButton("BTN_0", comment="to select menu Keyboard test")
sim.go_msec(500)
sim.saveScreenShot()
sim.clickButton("BTN_0", comment="to select menu Tetris")
sim.go_msec(200)
sim.saveScreenShot()
sim.clickButton("BTN_P2", comment="to start Tetris demo game")
sim.go_msec(500)
sim.saveScreenShot()
sim.clickButton("BTN_5", comment="to rotate figure")
sim.go_msec(200)
sim.clickButton("BTN_4", comment="to shift left")
sim.clickButton("BTN_4", comment="to shift left again")
sim.go_msec(200)
sim.saveScreenShot()
sim.clickButton("BTN_0", comment="to drop figure down")
sim.go_msec(200)
sim.saveScreenShot()
sim.go()
sim.halt()
doxy.addSection("Test statistics")
doxy.addParagraph(\
"General test information:\n"\
" -# <b>Duration:</b> {0} sec\n"\
" -# <b>Test Code Coverage</b>: {1} %\n"\
" -# <b>Overall Code Coverage</b>: {2} %\n"\
.format(sim.simTimeSec(), sim.cmd("coverage"), sim.cmd("coverage")))
# Generate documentation in current folder
doxy.generate("./")
sim.exit()
sim.disconnect()
subprocess.Popen("cd generated & doxygen.exe ..\\demo\\Doxyfile & cd latex & make.bat & refman.pdf", shell=True)
| 868 |
6,497 |
<reponame>jedyang/cachecloud
package com.sohu.cache.constant;
/**
* Redis-Migrate-Tool常量
* @author leifu
* @Date 2016-6-10
* @Time 上午9:23:30
*/
public enum RedisMigrateToolConstant {
Stats("Stats"),
Keyspace("Group"),
Clients("Clients"),
Memory("Memory"),
Server("Server");
private String value;
RedisMigrateToolConstant(String value) {
this.value = value;
}
@Override
public String toString() {
return this.value;
}
public static RedisMigrateToolConstant value(String input) {
RedisMigrateToolConstant[] constants = RedisMigrateToolConstant.values();
for (RedisMigrateToolConstant constant : constants) {
if (constant.value.equals(input)) {
return constant;
}
}
return null;
}
public String getValue() {
return value;
}
}
| 428 |
1,576 |
<reponame>paulojraposo/MarkdownReader
/**
* Types for extending block parsing
*/
package org.commonmark.parser.block;
| 38 |
854 |
<filename>Python3/373.py
__________________________________________________________________________________________________
sample 40 ms submission
import heapq
class Solution:
def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:
if not nums1 or not nums2:
return []
n,m=len(nums1),len(nums2)
mappings=[0]*n
que=[[nums1[i]+nums2[0], i] for i in range(n)]
res=[]
while len(res)<k and que:
val,idx=heapq.heappop(que)
res.append([nums1[idx],nums2[mappings[idx]]])
mappings[idx]+=1
if mappings[idx]<m:
val2=nums1[idx]+nums2[mappings[idx]]
heapq.heappush(que,[val2,idx])
return res
__________________________________________________________________________________________________
sample 13304 kb submission
import heapq
class Solution:
def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:
if not nums1 or not nums2:
return []
def push(i, j):
heapq.heappush(heap, (nums1[i] + nums2[j], i, j))
def pop():
_, i, j = heapq.heappop(heap)
return i, j
res = []
heap = [(nums1[0] + nums2[0], 0, 0)]
while heap and len(res) < k:
i, j = pop()
res.append((nums1[i], nums2[j]))
if j == 0 and i + 1 < len(nums1):
push(i + 1, j)
if j + 1 < len(nums2):
push(i, j + 1)
return res
__________________________________________________________________________________________________
| 835 |
455 |
#pragma once
#include "../defines.h"
#include "../Context.h"
#include "Object.h"
#include "BaseCamera.h"
namespace star
{
class FreeCamera : public BaseCamera
{
public:
FreeCamera(void);
virtual ~FreeCamera(void);
void SetStatic(bool isStatic);
void SetZoomEnabled(bool canZoom);
void SetZoomSpeed(float32 speed);
bool IsZoomEnabled() const;
void SetMoveSpeed(float32 speed);
protected:
virtual void Update(const Context& context);
private:
float32 m_TotalPitch, m_TotalYaw;
float32 m_MoveSpeed, m_RotationSpeed;
float32 m_Zoom, m_ZoomSpeed;
bool m_bisStatic, m_bZoom;
FreeCamera(const FreeCamera &);
FreeCamera(FreeCamera &&);
FreeCamera & operator=(const FreeCamera &);
FreeCamera & operator=(FreeCamera&&);
};
}
| 320 |
3,227 |
#include <CGAL/Simple_cartesian.h>
#include <CGAL/Surface_mesh.h>
#include <CGAL/Timer.h>
// Simplification function
#include <CGAL/Surface_mesh_simplification/edge_collapse.h>
#include <CGAL/Surface_mesh_simplification/Edge_collapse_visitor_base.h>
#include <CGAL/Surface_mesh_simplification/Policies/Edge_collapse/Count_stop_predicate.h>
#include <CGAL/Surface_mesh_simplification/Policies/Edge_collapse/LindstromTurk_cost.h>
#include <CGAL/Surface_mesh_simplification/Policies/Edge_collapse/LindstromTurk_placement.h>
#include <CGAL/Surface_mesh_simplification/Policies/Edge_collapse/Bounded_normal_change_filter.h>
#include <CGAL/Surface_mesh_simplification/Policies/Edge_collapse/Polyhedral_envelope_filter.h>
//bbox
#include <CGAL/Polygon_mesh_processing/bbox.h>
#include <iostream>
namespace SMS = CGAL::Surface_mesh_simplification;
typedef CGAL::Simple_cartesian<double> Kernel;
typedef Kernel::Point_3 Point_3;
typedef CGAL::Surface_mesh<Point_3> Surface;
typedef SMS::LindstromTurk_cost<Surface> Cost;
typedef SMS::LindstromTurk_placement<Surface> Placement;
typedef SMS::Polyhedral_envelope_filter<Kernel,SMS::Bounded_normal_change_filter<> > Filter;
struct Stats
{
std::size_t collected = 0;
std::size_t processed = 0;
std::size_t collapsed = 0;
std::size_t non_collapsable = 0;
std::size_t cost_uncomputable = 0;
std::size_t placement_uncomputable = 0;
};
struct My_visitor : SMS::Edge_collapse_visitor_base<Surface>
{
My_visitor(Stats* s) : stats(s) {}
// Called during the collecting phase for each edge collected.
void OnCollected(const Profile&, const boost::optional<double>&)
{
++(stats->collected);
std::cerr << "\rEdges collected: " << stats->collected << std::endl;
}
// Called during the processing phase for each edge selected.
// If cost is absent the edge won't be collapsed.
void OnSelected(const Profile&,
boost::optional<double> cost,
std::size_t /* initial */,
std::size_t /* current */)
{
++(stats->processed);
if(!cost)
++(stats->cost_uncomputable);
}
// Called during the processing phase for each edge being collapsed.
// If placement is absent the edge is left uncollapsed.
void OnCollapsing(const Profile&,
boost::optional<Point> placement)
{
if(!placement)
++(stats->placement_uncomputable);
}
// Called for each edge which failed the so called link-condition,
// that is, which cannot be collapsed because doing so would
// turn the surface mesh into a non-manifold.
void OnNonCollapsable(const Profile&)
{
++(stats->non_collapsable);
}
// Called after each edge has been collapsed
void OnCollapsed(const Profile&, vertex_descriptor)
{
++(stats->collapsed);
}
Stats* stats;
};
int main(int argc, char** argv)
{
Surface input_mesh;
std::ifstream is(argc > 1 ? argv[1] : "data/helmet.off");
is >> input_mesh;
SMS::Count_stop_predicate<Surface> stop(0); // go as far as you can while in the envelope
Stats stats;
My_visitor vis(&stats);
std::cout << "Input has " << num_vertices(input_mesh) << " vertices and " << num_edges(input_mesh) << " edges" << std::endl;
CGAL::Iso_cuboid_3<Kernel> bbox(CGAL::Polygon_mesh_processing::bbox(input_mesh));
Point_3 cmin = (bbox.min)();
Point_3 cmax = (bbox.max)();
const double diag = CGAL::approximate_sqrt(CGAL::squared_distance(cmin, cmax));
Surface mesh_cpy = input_mesh; // need a copy to keep the AABB tree valid
Surface small_envelope_mesh = input_mesh;
Surface big_envelope_mesh = input_mesh;
Surface huge_envelope_mesh = input_mesh;
CGAL::Timer t;
t.start();
{
std::cout << "eps = " << 0.005*diag << std::endl;
Placement placement;
Filter filter(0.005*diag);
SMS::edge_collapse(small_envelope_mesh, stop, CGAL::parameters::get_cost(Cost()).filter(filter).get_placement(placement));
std::cout << "Output has " << vertices(small_envelope_mesh).size() << " vertices and " << edges(small_envelope_mesh).size() << " edges" << std::endl;
std::cout << t.time() << "sec\n";
t.reset();
}
{
std::cout << "eps = " << 0.01*diag << std::endl;
Placement placement;
Filter filter(0.01*diag);
SMS::edge_collapse(big_envelope_mesh, stop, CGAL::parameters::get_cost(Cost()).filter(filter).visitor(vis).get_placement(placement));
std::cout << "Output has " << vertices(big_envelope_mesh).size() << " vertices and " << edges(big_envelope_mesh).size() << " edges" << std::endl;
std::cout << t.time() << "sec\n";
t.reset();
}
{
std::cout << "eps = " << 0.02*diag << std::endl;
Placement placement;
Filter filter(0.02*diag);
SMS::edge_collapse(huge_envelope_mesh, stop, CGAL::parameters::get_cost(Cost()).filter(filter).get_placement(placement));
std::cout << "Output has " << vertices(huge_envelope_mesh).size() << " vertices and " << edges(huge_envelope_mesh).size() << " edges" << std::endl;
std::cout << t.time() << "sec\n";
}
std::cout << "\nEdges collected: " << stats.collected
<< "\nEdges proccessed: " << stats.processed
<< "\nEdges collapsed: " << stats.collapsed
<< std::endl
<< "\nEdges not collapsed due to topological constraints: " << stats.non_collapsable
<< "\nEdge not collapsed due to cost computation constraints: " << stats.cost_uncomputable
<< "\nEdge not collapsed due to placement computation constraints: " << stats.placement_uncomputable
<< std::endl;
assert(vertices(input_mesh).size() > vertices(small_envelope_mesh).size());
assert(vertices(small_envelope_mesh).size() > vertices(big_envelope_mesh).size());
assert(vertices(big_envelope_mesh).size() > vertices(huge_envelope_mesh).size());
return EXIT_SUCCESS;
}
| 2,395 |
2,553 |
# Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for bed_reader CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
from third_party.nucleus.io import clif_postproc
from third_party.nucleus.io.python import bed_reader
from third_party.nucleus.protos import bed_pb2
from third_party.nucleus.testing import test_utils
class BedReaderTest(parameterized.TestCase):
def setUp(self):
self.bed = test_utils.genomics_core_testdata('test_regions.bed')
self.zipped_bed = test_utils.genomics_core_testdata('test_regions.bed.gz')
self.options = bed_pb2.BedReaderOptions()
self.first = bed_pb2.BedRecord(
reference_name='chr1',
start=10,
end=20,
name='first',
score=100,
strand=bed_pb2.BedRecord.FORWARD_STRAND,
thick_start=12,
thick_end=18,
item_rgb='255,124,1',
block_count=3,
block_sizes='2,6,2',
block_starts='10,12,18')
def test_bed_iterate(self):
with bed_reader.BedReader.from_file(self.bed, self.options) as reader:
self.assertEqual(reader.header.num_fields, 12)
iterable = reader.iterate()
self.assertIsInstance(iterable, clif_postproc.WrappedCppIterable)
actual = list(iterable)
self.assertLen(actual, 2)
self.assertEqual(actual[0], self.first)
zreader = bed_reader.BedReader.from_file(self.zipped_bed,
bed_pb2.BedReaderOptions())
self.assertEqual(zreader.header.num_fields, 12)
with zreader:
ziterable = zreader.iterate()
self.assertIsInstance(ziterable, clif_postproc.WrappedCppIterable)
zactual = list(ziterable)
self.assertLen(zactual, 2)
self.assertEqual(zactual[0], self.first)
def test_from_file_raises_with_missing_bed(self):
# redacted
with self.assertRaisesRegexp(ValueError, 'Could not open missing.bed'):
bed_reader.BedReader.from_file('missing.bed', self.options)
def test_ops_on_closed_reader_raise(self):
reader = bed_reader.BedReader.from_file(self.bed, self.options)
with reader:
pass
# At this point the reader is closed.
with self.assertRaisesRegexp(ValueError, 'Cannot Iterate a closed'):
reader.iterate()
@parameterized.parameters('malformed.bed', 'malformed2.bed')
def test_bed_iterate_raises_on_malformed_record(self, filename):
malformed = test_utils.genomics_core_testdata(filename)
reader = bed_reader.BedReader.from_file(malformed, self.options)
iterable = iter(reader.iterate())
self.assertIsNotNone(next(iterable))
with self.assertRaises(ValueError):
list(iterable)
if __name__ == '__main__':
absltest.main()
| 1,595 |
8,569 |
#include <libevdev/libevdev.h>
#include "sway/config.h"
#include "sway/commands.h"
#include "sway/input/cursor.h"
struct cmd_results *input_cmd_scroll_button(int argc, char **argv) {
struct cmd_results *error = NULL;
if ((error = checkarg(argc, "scroll_button", EXPECTED_AT_LEAST, 1))) {
return error;
}
struct input_config *ic = config->handler_context.input_config;
if (!ic) {
return cmd_results_new(CMD_FAILURE, "No input device defined.");
}
if (strcmp(*argv, "disable") == 0) {
ic->scroll_button = 0;
return cmd_results_new(CMD_SUCCESS, NULL);
}
char *message = NULL;
uint32_t button = get_mouse_button(*argv, &message);
if (message) {
error = cmd_results_new(CMD_INVALID, message);
free(message);
return error;
} else if (button == SWAY_SCROLL_UP || button == SWAY_SCROLL_DOWN
|| button == SWAY_SCROLL_LEFT || button == SWAY_SCROLL_RIGHT) {
return cmd_results_new(CMD_INVALID,
"X11 axis buttons are not supported for scroll_button");
} else if (!button) {
return cmd_results_new(CMD_INVALID, "Unknown button %s", *argv);
}
ic->scroll_button = button;
return cmd_results_new(CMD_SUCCESS, NULL);
}
| 457 |
512 |
<gh_stars>100-1000
from __future__ import absolute_import
import os
import shutil
import signal
import sys
import metaparticle_pkg.option as option
import metaparticle_pkg.builder as builder
import metaparticle_pkg.runner as runner
def is_in_docker_container():
mp_in_container = os.getenv('METAPARTICLE_IN_CONTAINER', None)
if mp_in_container in ['true', '1']:
return True
elif mp_in_container in ['false', '0']:
return False
try:
with open('/proc/1/cgroup', 'r+t') as f:
lines = f.read().splitlines()
last_line = lines[-1]
if 'docker' in last_line:
return True
elif 'kubepods' in last_line:
return True
else:
return False
except IOError:
return False
def write_dockerfile(package, exec_file):
if hasattr(package, 'dockerfile') and package.dockerfile is not None:
shutil.copy(package.dockerfile, 'Dockerfile')
return
copy_files = "\n".join([addFile.render() for addFile in package.additionalFiles])
with open('Dockerfile', 'w+t') as f:
f.write("""FROM python:{version}-alpine
COPY ./ /app/
{copy_files}
RUN pip install --no-cache -r /app/requirements.txt
CMD python -u /app/{exec_file}
""".format(version=package.py_version,
exec_file=exec_file,
copy_files=copy_files))
class Containerize(object):
def __init__(self, runtime={}, package={}):
self.runtime = option.load(option.RuntimeOptions, runtime)
self.package = option.load(option.PackageOptions, package)
self.image = "{repo}/{name}:latest".format(
repo=self.package.repository,
name=self.package.name
)
self.builder = builder.select(self.package.builder)
self.runner = runner.select(self.runtime.executor)
def __call__(self, func):
def wrapped(*args, **kwargs):
if is_in_docker_container():
return func(*args, **kwargs)
exec_file = sys.argv[0]
slash_ix = exec_file.find('/')
if slash_ix != -1:
exec_file = exec_file[slash_ix:]
write_dockerfile(self.package, exec_file)
self.builder.build(self.image)
if self.package.publish:
self.builder.publish(self.image)
def signal_handler(signal, frame):
self.runner.cancel(self.package.name)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
self.runner.run(self.image, self.package.name, self.runtime)
return self.runner.logs(self.package.name)
return wrapped
class PackageFile(object):
def __init__(self, src, dest, mode=None):
self.src = src
self.dest = dest
self.mode = mode
def render(self):
ret = "COPY {src} {dest}".format(src=self.src, dest=self.dest)
if self.mode:
ret += "\nRUN chmod -R {mode} {dest}".format(mode=self.mode, dest=self.dest)
return ret
| 1,405 |
4,879 |
#include "routing/road_index.hpp"
#include "routing/routing_exceptions.hpp"
namespace routing
{
void RoadIndex::Import(std::vector<Joint> const & joints)
{
for (Joint::Id jointId = 0; jointId < joints.size(); ++jointId)
{
Joint const & joint = joints[jointId];
for (uint32_t i = 0; i < joint.GetSize(); ++i)
{
RoadPoint const & entry = joint.GetEntry(i);
RoadJointIds & roadJoints = m_roads[entry.GetFeatureId()];
roadJoints.AddJoint(entry.GetPointId(), jointId);
}
}
}
} // namespace routing
| 217 |
777 |
<filename>content/child/service_worker/service_worker_provider_context.h
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_CHILD_SERVICE_WORKER_SERVICE_WORKER_PROVIDER_CONTEXT_H_
#define CONTENT_CHILD_SERVICE_WORKER_SERVICE_WORKER_PROVIDER_CONTEXT_H_
#include <memory>
#include <set>
#include <vector>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner_helpers.h"
#include "content/common/content_export.h"
#include "content/common/service_worker/service_worker_types.h"
namespace base {
class SingleThreadTaskRunner;
}
namespace content {
class ServiceWorkerHandleReference;
class ServiceWorkerRegistrationHandleReference;
struct ServiceWorkerProviderContextDeleter;
class ThreadSafeSender;
// An instance of this class holds information related to Document/Worker.
// Created and destructed on the main thread. Unless otherwise noted, all
// methods are called on the main thread. The lifetime of this class is equals
// to the corresponding ServiceWorkerNetworkProvider.
//
// The role of this class varies for controllees and controllers:
// - For controllees, this is used for keeping the associated registration and
// the controller alive to create controllee's ServiceWorkerContainer. The
// references to them are kept until OnDisassociateRegistration() is called
// or OnSetControllerServiceWorker() is called with an invalid worker info.
// - For controllers, this is used for keeping the associated registration and
// its versions alive to create controller's ServiceWorkerGlobalScope. The
// references to them are kept until OnDisassociateRegistration() is called.
//
// These operations are actually done in delegate classes owned by this class:
// ControlleeDelegate and ControllerDelegate.
class CONTENT_EXPORT ServiceWorkerProviderContext
: public base::RefCountedThreadSafe<ServiceWorkerProviderContext,
ServiceWorkerProviderContextDeleter> {
public:
ServiceWorkerProviderContext(int provider_id,
ServiceWorkerProviderType provider_type,
ThreadSafeSender* thread_safe_sender);
// Called from ServiceWorkerDispatcher.
void OnAssociateRegistration(
std::unique_ptr<ServiceWorkerRegistrationHandleReference> registration,
std::unique_ptr<ServiceWorkerHandleReference> installing,
std::unique_ptr<ServiceWorkerHandleReference> waiting,
std::unique_ptr<ServiceWorkerHandleReference> active);
void OnDisassociateRegistration();
void OnSetControllerServiceWorker(
std::unique_ptr<ServiceWorkerHandleReference> controller);
// Called on the worker thread. Used for initializing
// ServiceWorkerGlobalScope.
void GetAssociatedRegistration(ServiceWorkerRegistrationObjectInfo* info,
ServiceWorkerVersionAttributes* attrs);
// May be called on the main or worker thread.
bool HasAssociatedRegistration();
int provider_id() const { return provider_id_; }
ServiceWorkerHandleReference* controller();
private:
friend class base::DeleteHelper<ServiceWorkerProviderContext>;
friend class base::RefCountedThreadSafe<ServiceWorkerProviderContext,
ServiceWorkerProviderContextDeleter>;
friend struct ServiceWorkerProviderContextDeleter;
class Delegate;
class ControlleeDelegate;
class ControllerDelegate;
~ServiceWorkerProviderContext();
void DestructOnMainThread() const;
const int provider_id_;
scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_;
scoped_refptr<ThreadSafeSender> thread_safe_sender_;
std::unique_ptr<Delegate> delegate_;
DISALLOW_COPY_AND_ASSIGN(ServiceWorkerProviderContext);
};
struct ServiceWorkerProviderContextDeleter {
static void Destruct(const ServiceWorkerProviderContext* context) {
context->DestructOnMainThread();
}
};
} // namespace content
#endif // CONTENT_CHILD_SERVICE_WORKER_SERVICE_WORKER_PROVIDER_CONTEXT_H_
| 1,281 |
420 |
<filename>example/example.c
#include <stdlib.h>
#include <stdio.h>
#include <wlc/wlc.h>
#include <chck/math/math.h>
#include <linux/input.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
static struct {
struct {
wlc_handle view;
struct wlc_point grab;
uint32_t edges;
} action;
} compositor;
static bool
start_interactive_action(wlc_handle view, const struct wlc_point *origin)
{
if (compositor.action.view)
return false;
compositor.action.view = view;
compositor.action.grab = *origin;
wlc_view_bring_to_front(view);
return true;
}
static void
start_interactive_move(wlc_handle view, const struct wlc_point *origin)
{
start_interactive_action(view, origin);
}
static void
start_interactive_resize(wlc_handle view, uint32_t edges, const struct wlc_point *origin)
{
const struct wlc_geometry *g;
if (!(g = wlc_view_get_geometry(view)) || !start_interactive_action(view, origin))
return;
const int32_t halfw = g->origin.x + g->size.w / 2;
const int32_t halfh = g->origin.y + g->size.h / 2;
if (!(compositor.action.edges = edges)) {
compositor.action.edges = (origin->x < halfw ? WLC_RESIZE_EDGE_LEFT : (origin->x > halfw ? WLC_RESIZE_EDGE_RIGHT : 0)) |
(origin->y < halfh ? WLC_RESIZE_EDGE_TOP : (origin->y > halfh ? WLC_RESIZE_EDGE_BOTTOM : 0));
}
wlc_view_set_state(view, WLC_BIT_RESIZING, true);
}
static void
stop_interactive_action(void)
{
if (!compositor.action.view)
return;
wlc_view_set_state(compositor.action.view, WLC_BIT_RESIZING, false);
memset(&compositor.action, 0, sizeof(compositor.action));
}
static wlc_handle
get_topmost(wlc_handle output, size_t offset)
{
size_t memb;
const wlc_handle *views = wlc_output_get_views(output, &memb);
return (memb > 0 ? views[(memb - 1 + offset) % memb] : 0);
}
static void
relayout(wlc_handle output)
{
// very simple layout function
// you probably don't want to layout certain type of windows in wm
const struct wlc_size *r;
if (!(r = wlc_output_get_virtual_resolution(output)))
return;
size_t memb;
const wlc_handle *views = wlc_output_get_views(output, &memb);
size_t positioned = 0;
for (size_t i = 0; i < memb; ++i)
if (wlc_view_positioner_get_anchor_rect(views[i]) == NULL)
positioned ++;
bool toggle = false;
uint32_t y = 0;
const uint32_t n = chck_maxu32((1 + positioned) / 2, 1);
const uint32_t w = r->w / 2, h = r->h / n;
const uint32_t ew = r->w - w * 2, eh = r->h - h * n;
size_t j = 0;
for (size_t i = 0; i < memb; ++i) {
const struct wlc_geometry* anchor_rect = wlc_view_positioner_get_anchor_rect(views[i]);
if (anchor_rect == NULL) {
const struct wlc_geometry g = {
.origin = {
.x = (toggle ? w + ew : 0),
.y = y
},
.size = {
.w = (!toggle && j == positioned - 1 ? r->w : (toggle ? w : w + ew)),
.h = (j < 2 ? h + eh : h)
}
};
wlc_view_set_geometry(views[i], 0, &g);
y = y + (!(toggle = !toggle) ? g.size.h : 0);
j ++;
} else {
struct wlc_size size_req = *wlc_view_positioner_get_size(views[i]);
if ((size_req.w <= 0) || (size_req.h <= 0)) {
const struct wlc_geometry* current = wlc_view_get_geometry(views[i]);
size_req = current->size;
}
struct wlc_geometry g = {
.origin = anchor_rect->origin,
.size = size_req
};
wlc_handle parent = wlc_view_get_parent(views[i]);
if (parent) {
const struct wlc_geometry* parent_geometry = wlc_view_get_geometry(parent);
g.origin.x += parent_geometry->origin.x;
g.origin.y += parent_geometry->origin.y;
}
wlc_view_set_geometry(views[i], 0, &g);
}
}
}
static void
output_resolution(wlc_handle output, const struct wlc_size *from, const struct wlc_size *to)
{
(void)from, (void)to;
relayout(output);
}
static bool
view_created(wlc_handle view)
{
wlc_view_set_mask(view, wlc_output_get_mask(wlc_view_get_output(view)));
wlc_view_bring_to_front(view);
wlc_view_focus(view);
relayout(wlc_view_get_output(view));
return true;
}
static void
view_destroyed(wlc_handle view)
{
wlc_view_focus(get_topmost(wlc_view_get_output(view), 0));
relayout(wlc_view_get_output(view));
}
static void
view_focus(wlc_handle view, bool focus)
{
wlc_view_set_state(view, WLC_BIT_ACTIVATED, focus);
}
static void
view_request_move(wlc_handle view, const struct wlc_point *origin)
{
start_interactive_move(view, origin);
}
static void
view_request_resize(wlc_handle view, uint32_t edges, const struct wlc_point *origin)
{
start_interactive_resize(view, edges, origin);
}
static void
view_request_geometry(wlc_handle view, const struct wlc_geometry *g)
{
(void)view, (void)g;
// stub intentionally to ignore geometry requests.
}
static int
cb_selection_data(int fd, uint32_t mask, void *data)
{
((void) data);
struct wlc_event_source **sourceptr = (struct wlc_event_source**) data;
if (!sourceptr || !(*sourceptr)) {
printf("error: selection cb, no src pointer\n");
return 0;
}
if (mask & WLC_EVENT_ERROR) {
printf("selection data fd error\n");
goto cleanup;
}
if (mask & WLC_EVENT_READABLE) {
char buf[512];
int ret = read(fd, buf, 511);
if (ret < 0) {
printf("reading from selection fd failed: %s\n", strerror(errno));
goto cleanup;
}
buf[ret] = '\0';
printf("Received clipboard data: %s\n", buf);
}
cleanup:
wlc_event_source_remove(*sourceptr);
close(fd);
*sourceptr = NULL;
return 0;
}
static bool
keyboard_key(wlc_handle view, uint32_t time, const struct wlc_modifiers *modifiers, uint32_t key, enum wlc_key_state state)
{
(void)time, (void)key;
const uint32_t sym = wlc_keyboard_get_keysym_for_key(key, NULL);
if (view) {
if (modifiers->mods & WLC_BIT_MOD_CTRL && sym == XKB_KEY_q) {
if (state == WLC_KEY_STATE_PRESSED) {
wlc_view_close(view);
}
return true;
} else if (modifiers->mods & WLC_BIT_MOD_CTRL && sym == XKB_KEY_Down) {
if (state == WLC_KEY_STATE_PRESSED) {
wlc_view_send_to_back(view);
wlc_view_focus(get_topmost(wlc_view_get_output(view), 0));
}
return true;
}
}
if (modifiers->mods & WLC_BIT_MOD_CTRL && sym == XKB_KEY_Escape) {
if (state == WLC_KEY_STATE_PRESSED) {
wlc_terminate();
}
return true;
} else if (modifiers->mods & WLC_BIT_MOD_CTRL && sym == XKB_KEY_Return) {
if (state == WLC_KEY_STATE_PRESSED) {
char *terminal = (getenv("TERMINAL") ? getenv("TERMINAL") : "weston-terminal");
wlc_exec(terminal, (char *const[]){ terminal, NULL });
}
return true;
} else if (modifiers->mods & WLC_BIT_MOD_CTRL && sym >= XKB_KEY_1 && sym <= XKB_KEY_9) {
if (state == WLC_KEY_STATE_PRESSED) {
size_t memb;
const wlc_handle *outputs = wlc_get_outputs(&memb);
const uint32_t scale = (sym - XKB_KEY_1) + 1;
for (size_t i = 0; i < memb; ++i)
wlc_output_set_resolution(outputs[i], wlc_output_get_resolution(outputs[i]), scale);
printf("scale: %u\n", scale);
}
return true;
} else if (modifiers->mods & WLC_BIT_MOD_CTRL && sym == XKB_KEY_comma && state == WLC_KEY_STATE_PRESSED) {
size_t size;
const char **types = wlc_get_selection_types(&size);
for (size_t i = 0; i < size; ++i) {
if (strcmp(types[i], "text/plain;charset=utf-8") == 0 || strcmp(types[i], "text/plain") == 0) {
int pipes[2];
if (pipe(pipes) == -1) {
printf("pipe failed: %s\n", strerror(errno));
break;
}
fcntl(pipes[0], F_SETFD, O_CLOEXEC | O_NONBLOCK);
fcntl(pipes[1], F_SETFD, O_CLOEXEC | O_NONBLOCK);
if (!wlc_get_selection_data(types[i], pipes[1])) {
close(pipes[0]);
close(pipes[1]);
printf("error: get selection data failed for valid selection\n");
break;
}
static struct wlc_event_source *src = NULL;
static int recv_fd = -1;
if (src) {
wlc_event_source_remove(src);
close(recv_fd);
}
src = wlc_event_loop_add_fd(pipes[0], WLC_EVENT_READABLE | WLC_EVENT_ERROR | WLC_EVENT_HANGUP, cb_selection_data, &src);
recv_fd = pipes[0];
break;
}
}
free(types);
}
return false;
}
static bool
pointer_button(wlc_handle view, uint32_t time, const struct wlc_modifiers *modifiers, uint32_t button, enum wlc_button_state state, const struct wlc_point *position)
{
(void)button, (void)time, (void)modifiers;
if (state == WLC_BUTTON_STATE_PRESSED) {
wlc_view_focus(view);
if (view) {
if (modifiers->mods & WLC_BIT_MOD_CTRL && button == BTN_LEFT)
start_interactive_move(view, position);
if (modifiers->mods & WLC_BIT_MOD_CTRL && button == BTN_RIGHT)
start_interactive_resize(view, 0, position);
}
} else {
stop_interactive_action();
}
return (compositor.action.view ? true : false);
}
static bool
pointer_motion(wlc_handle handle, uint32_t time, double x, double y)
{
(void)handle, (void)time;
if (compositor.action.view) {
const int32_t dx = x - compositor.action.grab.x;
const int32_t dy = y - compositor.action.grab.y;
struct wlc_geometry g = *wlc_view_get_geometry(compositor.action.view);
if (compositor.action.edges) {
const struct wlc_size min = { 80, 40 };
struct wlc_geometry n = g;
if (compositor.action.edges & WLC_RESIZE_EDGE_LEFT) {
n.size.w -= dx;
n.origin.x += dx;
} else if (compositor.action.edges & WLC_RESIZE_EDGE_RIGHT) {
n.size.w += dx;
}
if (compositor.action.edges & WLC_RESIZE_EDGE_TOP) {
n.size.h -= dy;
n.origin.y += dy;
} else if (compositor.action.edges & WLC_RESIZE_EDGE_BOTTOM) {
n.size.h += dy;
}
if (n.size.w >= min.w) {
g.origin.x = n.origin.x;
g.size.w = n.size.w;
}
if (n.size.h >= min.h) {
g.origin.y = n.origin.y;
g.size.h = n.size.h;
}
wlc_view_set_geometry(compositor.action.view, compositor.action.edges, &g);
} else {
g.origin.x += dx;
g.origin.y += dy;
wlc_view_set_geometry(compositor.action.view, 0, &g);
}
compositor.action.grab.x = x;
compositor.action.grab.y = y;
}
// In order to give the compositor control of the pointer placement it needs
// to be explicitly set after receiving the motion event:
wlc_pointer_set_position_v2(x, y);
return (compositor.action.view ? true : false);
}
static void
cb_data_source(void *data, const char *type, int fd)
{
((void) type);
const char *str = data;
write(fd, str, strlen(str));
close(fd);
}
static void
cb_log(enum wlc_log_type type, const char *str)
{
(void)type;
printf("%s\n", str);
}
int
main(void)
{
wlc_log_set_handler(cb_log);
wlc_set_output_resolution_cb(output_resolution);
wlc_set_view_created_cb(view_created);
wlc_set_view_destroyed_cb(view_destroyed);
wlc_set_view_focus_cb(view_focus);
wlc_set_view_request_move_cb(view_request_move);
wlc_set_view_request_resize_cb(view_request_resize);
wlc_set_view_request_geometry_cb(view_request_geometry);
wlc_set_keyboard_key_cb(keyboard_key);
wlc_set_pointer_button_cb(pointer_button);
wlc_set_pointer_motion_cb_v2(pointer_motion);
if (!wlc_init())
return EXIT_FAILURE;
const char *type = "text/plain;charset=utf-8";
wlc_set_selection("wlc", &type, 1, &cb_data_source);
wlc_run();
return EXIT_SUCCESS;
}
| 5,761 |
14,668 |
<reponame>zealoussnow/chromium<filename>chrome/browser/download/android/java/src/org/chromium/chrome/browser/download/dialogs/DownloadDialogUtilsUnitTest.java
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.download.dialogs;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.robolectric.annotation.Config;
import org.chromium.chrome.browser.download.DirectoryOption;
import org.chromium.testing.local.LocalRobolectricTestRunner;
import java.util.ArrayList;
/**
* Unit test for {@link DownloadDialogUtils}.
*/
@RunWith(LocalRobolectricTestRunner.class)
@Config(manifest = Config.NONE)
public class DownloadDialogUtilsUnitTest {
private final DirectoryOption mInternalSmallOption = new DirectoryOption(
"/internal", 301, 1000, DirectoryOption.DownloadLocationDirectoryType.DEFAULT);
private final DirectoryOption mInternalLargeOption = new DirectoryOption(
"/internal", 600, 1000, DirectoryOption.DownloadLocationDirectoryType.DEFAULT);
private final DirectoryOption mExternalSmallOption = new DirectoryOption(
"/sd_card", 100, 2000, DirectoryOption.DownloadLocationDirectoryType.ADDITIONAL);
private final DirectoryOption mExternalLargeOption = new DirectoryOption(
"/sd_card", 1000, 2000, DirectoryOption.DownloadLocationDirectoryType.ADDITIONAL);
private final String mDefaultLocation = "/internal";
@Test
public void testShouldSuggestDownloadLocation_DefaultEnoughSpace() {
ArrayList<DirectoryOption> dirs = new ArrayList<>();
dirs.add(mInternalLargeOption);
dirs.add(mExternalLargeOption);
assertFalse(DownloadDialogUtils.shouldSuggestDownloadLocation(dirs, mDefaultLocation, 300));
}
@Test
public void testShouldSuggestDownloadLocation_UnknownBytes() {
ArrayList<DirectoryOption> dirs = new ArrayList<>();
dirs.add(mInternalLargeOption);
dirs.add(mExternalLargeOption);
assertFalse(DownloadDialogUtils.shouldSuggestDownloadLocation(dirs, mDefaultLocation, 0));
}
@Test
public void testShouldSuggestDownloadLocation_SuggestExternal() {
ArrayList<DirectoryOption> dirs = new ArrayList<>();
dirs.add(mInternalSmallOption);
dirs.add(mExternalLargeOption);
assertTrue(DownloadDialogUtils.shouldSuggestDownloadLocation(dirs, mDefaultLocation, 300));
}
@Test
public void testShouldSuggestDownloadLocation_BothNotEnoughSPace() {
ArrayList<DirectoryOption> dirs = new ArrayList<>();
dirs.add(mInternalSmallOption);
dirs.add(mExternalSmallOption);
assertFalse(DownloadDialogUtils.shouldSuggestDownloadLocation(dirs, mDefaultLocation, 300));
}
@Test
public void testShouldSuggestDownloadLocation_NoAvailableStorage() {
ArrayList<DirectoryOption> dirs = new ArrayList<>();
assertFalse(DownloadDialogUtils.shouldSuggestDownloadLocation(dirs, mDefaultLocation, 300));
}
@Test
public void testShouldSuggestDownloadLocation_NoExternalStorage() {
ArrayList<DirectoryOption> dirs = new ArrayList<>();
dirs.add(mInternalLargeOption);
assertFalse(DownloadDialogUtils.shouldSuggestDownloadLocation(dirs, mDefaultLocation, 300));
}
}
| 1,132 |
369 |
<filename>module-os/LockGuard.hpp
// Copyright (c) 2017-2021, Mudita <NAME>.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include <mutex.hpp>
class LockGuard
{
public:
explicit LockGuard(cpp_freertos::MutexStandard& mutex);
~LockGuard();
LockGuard(const LockGuard &) = delete;
LockGuard(LockGuard &&) = delete;
LockGuard &operator=(const LockGuard &) = delete;
LockGuard &operator=(LockGuard &&) = delete;
private:
BaseType_t savedInterruptStatus;
cpp_freertos::MutexStandard &mutex;
};
| 222 |
419 |
#pragma once
#include <memory>
namespace Hypodermic
{
class IRegistration;
class IResolutionContainer;
class RegistrationContext
{
public:
RegistrationContext(IResolutionContainer& resolutionContainer, const std::shared_ptr< IRegistration >& registration)
: m_resolutionContainer(resolutionContainer)
, m_registration(registration)
{
}
IResolutionContainer& resolutionContainer() const
{
return m_resolutionContainer;
}
const std::shared_ptr< IRegistration >& registration() const
{
return m_registration;
}
private:
IResolutionContainer& m_resolutionContainer;
std::shared_ptr< IRegistration > m_registration;
};
} // namespace Hypodermic
| 326 |
728 |
<gh_stars>100-1000
/**
* Node-level transaction stuff.
*/
package org.sirix.access.trx.node.json;
| 42 |
405 |
<reponame>joowani/dtags<filename>dtags/commands/tags.py<gh_stars>100-1000
import json
import sys
from pathlib import Path
from typing import List, Optional, Set, Tuple
from dtags import style
from dtags.commons import (
dtags_command,
get_argparser,
normalize_tags,
prompt_user,
reverse_map,
)
from dtags.files import get_new_config, load_config_file, save_config_file
USAGE = "tags [-j] [-r] [-y] [-c] [-p] [-t TAG [TAG ...]]"
DESCRIPTION = f"""
Manage directory tags.
examples:
# show all tags
{style.command("tags")}
# show tags in JSON format with -j/--json
{style.command("tags --json")}
# show reverse mapping with -r/--reverse
{style.command("tags --reverse")}
# filter specific tags with -t
{style.command("tags -t foo bar baz")}
# clean invalid directories with -c/--clean
{style.command("tags --clean")}
# purge all tags with -p/--purge
{style.command("tags --purge")}
# skip confirmation prompts with -y/--yes
{style.command("tags --clean --yes")}
"""
@dtags_command
def execute(args: Optional[List[str]] = None) -> None:
parser = get_argparser(prog="tags", desc=DESCRIPTION, usage=USAGE)
arg_group = parser.add_mutually_exclusive_group()
parser.add_argument(
"-j",
"--json",
action="store_true",
dest="json",
help="show tags in JSON format",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
help="show tag to directories relationship",
)
parser.add_argument(
"-y",
"--yes",
action="store_true",
dest="yes",
help="assume yes to prompts",
)
arg_group.add_argument(
"-c",
"--clean",
action="store_true",
dest="clean",
help="clean invalid directories",
)
arg_group.add_argument(
"-p",
"--purge",
action="store_true",
dest="purge",
help="purge all tags",
)
arg_group.add_argument(
"-t",
metavar="TAG",
nargs="+",
dest="tags",
help="tag names to filter",
)
parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)
if parsed_args.reverse and parsed_args.clean:
parser.error("argument -r/--reverse: not allowed with argument -c/--clean")
elif parsed_args.reverse and parsed_args.purge:
parser.error("argument -r/--reverse: not allowed with argument -p/--purge")
elif parsed_args.json and parsed_args.clean:
parser.error("argument -j/--json: not allowed with argument -c/--clean")
elif parsed_args.json and parsed_args.purge:
parser.error("argument -j/--json: not allowed with argument -p/--purge")
elif parsed_args.clean:
clean_tags(skip_prompts=parsed_args.yes)
elif parsed_args.purge:
purge_tags(skip_prompts=parsed_args.yes)
else:
show_tags(
filters=parsed_args.tags,
in_json=parsed_args.json,
in_reverse=parsed_args.reverse,
)
def show_tags(
filters: Optional[List[str]] = None,
in_json: bool = False,
in_reverse: bool = False,
) -> None:
config = load_config_file()
tag_config = config["tags"]
tag_filters = None if filters is None else normalize_tags(filters)
if in_json and in_reverse:
raw_data = {
tag: sorted(dirpath.as_posix() for dirpath in dirpaths)
for tag, dirpaths in reverse_map(tag_config).items()
if not tag_filters or tag in tag_filters
}
print(json.dumps(raw_data, indent=2, sort_keys=True))
elif in_json and not in_reverse:
raw_data = {
dirpath.as_posix(): sorted(tags)
for dirpath, tags in tag_config.items()
if not tag_filters or tags.intersection(tag_filters)
}
print(json.dumps(raw_data, indent=2, sort_keys=True))
elif not in_json and in_reverse:
tag_to_dirpaths = reverse_map(tag_config)
for tag in sorted(tag_to_dirpaths):
if not tag_filters or tag in tag_filters:
print(style.tag(tag))
for dirpath in sorted(tag_to_dirpaths[tag]):
print(" " + style.path(dirpath))
else:
for dirpath, tags in tag_config.items():
if not tag_filters or tags.intersection(tag_filters):
print(style.mapping(dirpath, tags))
def clean_tags(skip_prompts: bool = True) -> None:
config = load_config_file()
tag_config = config["tags"]
diffs: List[Tuple[Path, Set[str]]] = [
(dirpath, tags) for dirpath, tags in tag_config.items() if not dirpath.is_dir()
]
if not diffs:
print("Nothing to clean")
else:
for dirpath, tags in diffs:
print(style.diff(dirpath, del_tags=tags))
del tag_config[dirpath]
if skip_prompts or prompt_user():
save_config_file(config)
print("Tags cleaned successfully")
def purge_tags(skip_prompts: bool = True) -> None:
config = load_config_file()
tag_config = config["tags"]
if not tag_config:
print("Nothing to purge")
else:
for dirpath, tags in tag_config.items():
print(style.diff(dirpath, del_tags=tags))
if skip_prompts or prompt_user():
save_config_file(get_new_config())
print("Tags purged successfully")
| 2,392 |
405 |
package weixin.guanjia.core.entity.message.customer;
public class Image {
private String media_id;
public void setMedia_id(String mediaId) {
media_id = mediaId;
}
public String getMedia_id() {
return media_id;
}
}
| 85 |
2,494 |
<reponame>jxcore/jxcore<gh_stars>1000+
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <v8.h>
namespace v8 {
// NOT IMPLEMENTED
class V8_EXPORT CpuProfiler {
public:
// void SetSamplingInterval(int us);
// void StartProfiling(Handle<String> title, bool record_samples = false);
// CpuProfile* StopProfiling(Handle<String> title);
void SetIdle(bool is_idle) {}
};
// NOT IMPLEMENTED
class V8_EXPORT HeapProfiler {
public:
typedef RetainedObjectInfo *(*WrapperInfoCallback)(
uint16_t class_id, Handle<Value> wrapper);
void SetWrapperClassInfoProvider(
uint16_t class_id, WrapperInfoCallback callback) {}
};
// NOT IMPLEMENTED
class V8_EXPORT RetainedObjectInfo {
public:
virtual void Dispose() = 0;
virtual bool IsEquivalent(RetainedObjectInfo *other) = 0;
virtual intptr_t GetHash() = 0;
virtual const char *GetLabel() = 0;
virtual const char *GetGroupLabel() { return nullptr; }
virtual intptr_t GetElementCount() { return 0; }
virtual intptr_t GetSizeInBytes() { return 0; }
};
} // namespace v8
| 815 |
2,406 |
<filename>model-optimizer/unit_tests/extensions/front/mxnet/ssd_pattern_flatten_softmax_activation_test.py<gh_stars>1000+
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from extensions.front.mxnet.ssd_pattern_flatten_softmax_activation import SsdPatternFlattenSoftmaxActivation
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
class TestSsdPatternFlattenSoftmaxActivation(unittest.TestCase):
def test_pattern_remove_transpose(self):
graph = build_graph({'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'},
'node_2': {'type': 'Identity', 'kind': 'op'},
'node_3': {'type': 'Identity', 'kind': 'op'},
'node_softmax_activation': {'type': 'SoftMax', 'kind': 'op', 'op': 'SoftMax'},
'node_multi_box_detection': {'type': '_contrib_MultiBoxDetection', 'kind': 'op',
'op': '_contrib_MultiBoxDetection'},
'node_4': {'type': 'Identity', 'kind': 'op'},
},
[('node_1', 'node_softmax_activation'),
('node_2', 'node_multi_box_detection'),
('node_softmax_activation', 'node_multi_box_detection'),
('node_3', 'node_multi_box_detection'),
('node_multi_box_detection', 'node_4'), ],
)
pattern = SsdPatternFlattenSoftmaxActivation()
pattern.find_and_replace_pattern(graph)
flatten_name = list(graph.nodes())[-1]
self.assertTrue(graph.has_node(flatten_name))
self.assertFalse(graph.has_edge(Node(graph, 'node_softmax_activation').id, Node(graph, 'node_multi_box_detection').id))
| 963 |
5,133 |
<filename>processor/src/test/java/org/mapstruct/ap/testutil/compilation/annotation/ProcessorOptions.java
/*
* Copyright MapStruct Authors.
*
* Licensed under the Apache License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package org.mapstruct.ap.testutil.compilation.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Contains multiple {@link ProcessorOption} annotations
*
* @author <NAME>
*/
@Retention( RetentionPolicy.RUNTIME )
@Target( { ElementType.TYPE, ElementType.METHOD } )
public @interface ProcessorOptions {
ProcessorOption[] value();
}
| 217 |
5,169 |
<reponame>Gantios/Specs<filename>Specs/c/c/e/MDInfo/0.1.0/MDInfo.podspec.json
{
"name": "MDInfo",
"version": "0.1.0",
"summary": "Deal with the localized name, short name and initials of days and months easily.",
"homepage": "https://github.com/elio-developer/MDInfo",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/elio-developer/MDInfo.git",
"tag": "0.1.0"
},
"platforms": {
"ios": "8.0"
},
"source_files": "MDInfo/Classes/**/*",
"pushed_with_swift_version": "3.0"
}
| 265 |
971 |
package com.ucar.datalink.biz.service.impl;
import com.alibaba.fastjson.JSONObject;
import com.ucar.datalink.biz.dal.InterceptorDAO;
import com.ucar.datalink.biz.dal.MediaDAO;
import com.ucar.datalink.biz.service.InterceptorService;
import com.ucar.datalink.common.errors.ValidationException;
import com.ucar.datalink.domain.interceptor.InterceptorInfo;
import com.ucar.datalink.domain.media.MediaMappingInfo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.stream.Collectors;
/**
* Created by user on 2017/3/22.
*/
@Service
public class InterceptorServiceImpl implements InterceptorService {
@Autowired
private InterceptorDAO interceptorDAO;
@Autowired
private MediaDAO mediaDAO;
@Override
public List<InterceptorInfo> getList() {
return interceptorDAO.getList();
}
@Override
public InterceptorInfo getInterceptorById(Long id) {
return interceptorDAO.findInterceptorById(id);
}
@Override
public Boolean insert(InterceptorInfo interceptorInfo) {
Integer num = interceptorDAO.insert(interceptorInfo);
if (num > 0) {
return true;
}
return false;
}
@Override
public Boolean update(InterceptorInfo interceptorInfo) {
Integer num = interceptorDAO.update(interceptorInfo);
if (num > 0) {
return true;
}
return false;
}
@Override
public Boolean delete(Long id) {
checkAvailable(id);
Integer num = interceptorDAO.delete(id);
if (num > 0) {
return true;
}
return false;
}
private void checkAvailable(Long interceptorId) {
List<MediaMappingInfo> mediaMappingInfos = mediaDAO.findMediaMappingsByInterceptorId(interceptorId);
if (mediaMappingInfos.size() > 0) {
throw new ValidationException(String.format("该拦截器正在应用于id为%s的映射,不能执行删除操作!",
JSONObject.toJSONString(mediaMappingInfos.stream().map(m -> m.getId()).distinct().collect(Collectors.toList()))));
}
}
}
| 894 |
348 |
{"nom":"Largeasse","circ":"3ème circonscription","dpt":"Deux-Sèvres","inscrits":555,"abs":311,"votants":244,"blancs":12,"nuls":6,"exp":226,"res":[{"nuance":"REM","nom":"<NAME>","voix":123},{"nuance":"LR","nom":"<NAME>","voix":103}]}
| 92 |
5,378 |
<reponame>st--/jupytext<filename>tests/notebooks/mirror/ipynb_to_script_vim_folding_markers/jupyter.py<gh_stars>1000+
# ---
# jupyter:
# jupytext:
# cell_markers: '{{{,}}}'
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter notebook
#
# This notebook is a simple jupyter notebook. It only has markdown and code cells. And it does not contain consecutive markdown cells. We start with an addition:
a = 1
b = 2
a + b
# Now we return a few tuples
a, b
a, b, a+b
# And this is already the end of the notebook
| 228 |
555 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch
from typing import List
import logging
logger = logging.getLogger(__name__)
class AutoModel:
def __init__(self, model_name: str, tokenizer_name: str = None, easynmt_path: str = None, lang_map=None, tokenizer_args=None):
if tokenizer_args is None:
tokenizer_args = {}
if lang_map is None:
lang_map = {}
if tokenizer_name is None:
tokenizer_name = model_name
self.lang_map = lang_map
self.tokenizer_args = tokenizer_args
if model_name == ".":
model_name = easynmt_path
if tokenizer_name == ".":
tokenizer_name = easynmt_path
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, **self.tokenizer_args)
def translate_sentences(self, sentences: List[str], source_lang: str, target_lang: str, device: str, beam_size: int = 5, **kwargs):
self.model.to(device)
if source_lang in self.lang_map:
source_lang = self.lang_map[source_lang]
if target_lang in self.lang_map:
target_lang = self.lang_map[target_lang]
self.tokenizer.src_lang = source_lang
inputs = self.tokenizer(sentences, truncation=True, padding=True, return_tensors="pt")
for key in inputs:
inputs[key] = inputs[key].to(device)
with torch.no_grad():
if hasattr(self.tokenizer, 'lang_code_to_id'):
kwargs['forced_bos_token_id'] = self.tokenizer.lang_code_to_id[target_lang]
translated = self.model.generate(**inputs, num_beams=beam_size, **kwargs)
output = [self.tokenizer.decode(t, skip_special_tokens=True) for t in translated]
return output
def save(self, output_path):
self.model.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
return {
"model_name": ".",
"tokenizer_name": ".",
"lang_map": self.lang_map,
"tokenizer_args": self.tokenizer_args
}
| 967 |
407 |
package com.alibaba.smart.framework.engine.test.cases;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.alibaba.smart.framework.engine.common.util.CollectionUtil;
import com.alibaba.smart.framework.engine.common.util.MapUtil;
import com.alibaba.smart.framework.engine.configuration.DelegationExecutor;
import com.alibaba.smart.framework.engine.configuration.ListenerExecutor;
import com.alibaba.smart.framework.engine.constant.ExtensionElementsConstant;
import com.alibaba.smart.framework.engine.context.ExecutionContext;
import com.alibaba.smart.framework.engine.listener.ListenerAggregation;
import com.alibaba.smart.framework.engine.model.assembly.Activity;
import com.alibaba.smart.framework.engine.model.assembly.ExtensionElementContainer;
import com.alibaba.smart.framework.engine.model.assembly.ExtensionElements;
import com.alibaba.smart.framework.engine.model.assembly.ProcessDefinition;
import com.alibaba.smart.framework.engine.model.instance.InstanceStatus;
import com.alibaba.smart.framework.engine.model.instance.ProcessInstance;
import com.alibaba.smart.framework.engine.pvm.event.PvmEventConstant;
import com.alibaba.smart.framework.engine.test.delegation.OrchestrationAdapter;
import com.alibaba.smart.framework.engine.test.delegation.OrchestrationAdapterImplement;
import com.alibaba.smart.framework.engine.util.ClassUtil;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertEquals;
public class DelegationAndListenerExecutorExtensionTest extends CommonTestCode {
private static final Logger LOGGER = LoggerFactory.getLogger(DelegationAndListenerExecutorExtensionTest.class);
@Override
public void setUp() {
super.setUp();
super.processEngineConfiguration.setDelegationExecutor(new DelegationExecutor() {
@Override
public void execute(ExecutionContext context, Activity activity) {
Map<String, String> properties = activity.getProperties();
if(MapUtil.isNotEmpty(properties)){
String className = properties.get("class");
if(null != className){
OrchestrationAdapter adapter = (OrchestrationAdapter) ClassUtil.createOrGetInstance(className);
adapter.execute(null,null);
}else {
LOGGER.info("No behavior found:"+activity.getId());
}
}
}
});
super.processEngineConfiguration.setListenerExecutor(new ListenerExecutor() {
@Override
public void execute(PvmEventConstant event, ExtensionElementContainer extensionElementContaine,
ExecutionContext context) {
String eventName = event.name();
ExtensionElements extensionElements = extensionElementContaine.getExtensionElements();
if(null != extensionElements){
ListenerAggregation extension = (ListenerAggregation)extensionElements.getDecorationMap().get(
ExtensionElementsConstant.EXECUTION_LISTENER);
if(null != extension){
List<String> listenerClassNameList = extension.getEventListenerMap().get(eventName);
if(CollectionUtil.isNotEmpty(listenerClassNameList)){
for (String listenerClassName : listenerClassNameList) {
OrchestrationAdapter adapter = (OrchestrationAdapter) ClassUtil.createOrGetInstance(listenerClassName);
adapter.execute(null,null);
}
}
}
}
}
});
}
@Test
public void test() throws Exception {
OrchestrationAdapterImplement.resetCounter();
ProcessDefinition processDefinition = repositoryCommandService
.deploy("DelegationAndListenerExecutorExtensionTest.xml").getFirstProcessDefinition();
assertEquals(5, processDefinition.getBaseElementList().size());
//4.启动流程实例
Map<String, Object> request = new HashMap<String, Object>();
ProcessInstance processInstance = processCommandService.start(
processDefinition.getId(), processDefinition.getVersion(), request
);
Assert.assertNotNull(processInstance);
Assert.assertTrue(processInstance.getStatus().equals(InstanceStatus.completed));
Assert.assertEquals(2L, OrchestrationAdapterImplement.counter.get());
}
}
| 1,842 |
1,350 |
<reponame>Shashi-rk/azure-sdk-for-java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.avs.models;
import com.azure.core.util.ExpandableStringEnum;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.util.Collection;
/** Defines values for DatastoreProvisioningState. */
public final class DatastoreProvisioningState extends ExpandableStringEnum<DatastoreProvisioningState> {
/** Static value Succeeded for DatastoreProvisioningState. */
public static final DatastoreProvisioningState SUCCEEDED = fromString("Succeeded");
/** Static value Failed for DatastoreProvisioningState. */
public static final DatastoreProvisioningState FAILED = fromString("Failed");
/** Static value Cancelled for DatastoreProvisioningState. */
public static final DatastoreProvisioningState CANCELLED = fromString("Cancelled");
/** Static value Pending for DatastoreProvisioningState. */
public static final DatastoreProvisioningState PENDING = fromString("Pending");
/** Static value Creating for DatastoreProvisioningState. */
public static final DatastoreProvisioningState CREATING = fromString("Creating");
/** Static value Updating for DatastoreProvisioningState. */
public static final DatastoreProvisioningState UPDATING = fromString("Updating");
/** Static value Deleting for DatastoreProvisioningState. */
public static final DatastoreProvisioningState DELETING = fromString("Deleting");
/**
* Creates or finds a DatastoreProvisioningState from its string representation.
*
* @param name a name to look for.
* @return the corresponding DatastoreProvisioningState.
*/
@JsonCreator
public static DatastoreProvisioningState fromString(String name) {
return fromString(name, DatastoreProvisioningState.class);
}
/** @return known DatastoreProvisioningState values. */
public static Collection<DatastoreProvisioningState> values() {
return values(DatastoreProvisioningState.class);
}
}
| 661 |
1,816 |
<filename>open-vm-tools/services/plugins/serviceDiscovery/serviceDiscovery.c
/*********************************************************
* Copyright (C) 2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation version 2.1 and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser GNU General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*********************************************************/
/*
* serviceDiscovery.c --
*
* Captures the information about services inside the guest
* and writes it to Namespace DB.
*/
#include <string.h>
#include "serviceDiscoveryInt.h"
#include "vmware.h"
#include "conf.h"
#include "guestApp.h"
#include "dynbuf.h"
#include "util.h"
#include "vmcheck.h"
#include "vmware/guestrpc/serviceDiscovery.h"
#include "vmware/tools/threadPool.h"
#include "vmware/tools/utils.h"
#include "vmware/tools/guestrpc.h"
#if !defined(__APPLE__)
#include "vm_version.h"
#include "embed_version.h"
#include "vmtoolsd_version.h"
VM_EMBED_VERSION(VMTOOLSD_VERSION_STRING);
#endif
#define NSDB_PRIV_GET_VALUES_CMD "namespace-priv-get-values"
#define NSDB_PRIV_SET_KEYS_CMD "namespace-priv-set-keys"
#if defined (_WIN32)
#define SCRIPT_EXTN ".bat"
/*
* Scripts used by plugin in Windows guests to capture information about
* running services.
*/
#define SERVICE_DISCOVERY_SCRIPT_PERFORMANCE_METRICS \
"get-performance-metrics" SCRIPT_EXTN
#define SERVICE_DISCOVERY_WIN_SCRIPT_RELATIONSHIP "get-parent-child-rels" \
SCRIPT_EXTN
#define SERVICE_DISCOVERY_WIN_SCRIPT_NET "net-share" SCRIPT_EXTN
#define SERVICE_DISCOVERY_WIN_SCRIPT_IIS_PORTS "get-iis-ports-info" SCRIPT_EXTN
#else
#define SCRIPT_EXTN ".sh"
/*
* Scripts used by plugin in Linux guests to capture information about
* running services.
*/
#define SERVICE_DISCOVERY_SCRIPT_PERFORMANCE_METRICS \
"get-listening-process-perf-metrics" SCRIPT_EXTN
#endif
/*
* Scripts used by plugin in both Windows and Linux guests to capture
* information about running services.
*/
#define SERVICE_DISCOVERY_SCRIPT_PROCESSES "get-listening-process-info" \
SCRIPT_EXTN
#define SERVICE_DISCOVERY_SCRIPT_CONNECTIONS "get-connection-info" SCRIPT_EXTN
#define SERVICE_DISCOVERY_SCRIPT_VERSIONS "get-versions" SCRIPT_EXTN
/*
* Default value for CONFNAME_SERVICE_DISCOVERY_DISABLED setting in
* tools configuration file.
*/
#define SERVICE_DISCOVERY_CONF_DEFAULT_DISABLED_VALUE FALSE
/*
* Polling interval of service discovery plugin in milliseconds
*/
#define SERVICE_DISCOVERY_POLL_INTERVAL 300000
/*
* Time shift for comparision of time read from the signal and
* current system time in milliseconds.
*/
#define SERVICE_DISCOVERY_WRITE_DELTA 60000
/*
* Time to wait in milliseconds before RPC operation
*/
#define SERVICE_DISCOVERY_RPC_WAIT_TIME 100
/*
* Maximum number of keys that can be deleted by one operation
*/
#define SERVICE_DISCOVERY_DELETE_CHUNK_SIZE 25
typedef struct {
gchar *keyName;
gchar *val;
} KeyNameValue;
static KeyNameValue gKeyScripts[] = {
{ SERVICE_DISCOVERY_KEY_PROCESSES, SERVICE_DISCOVERY_SCRIPT_PROCESSES },
{ SERVICE_DISCOVERY_KEY_CONNECTIONS,
SERVICE_DISCOVERY_SCRIPT_CONNECTIONS },
{ SERVICE_DISCOVERY_KEY_PERFORMANCE_METRICS,
SERVICE_DISCOVERY_SCRIPT_PERFORMANCE_METRICS },
{ SERVICE_DISCOVERY_KEY_VERSIONS, SERVICE_DISCOVERY_SCRIPT_VERSIONS },
#if defined(_WIN32)
{ SERVICE_DISCOVERY_WIN_KEY_RELATIONSHIP,
SERVICE_DISCOVERY_WIN_SCRIPT_RELATIONSHIP },
{ SERVICE_DISCOVERY_WIN_KEY_IIS_PORTS,
SERVICE_DISCOVERY_WIN_SCRIPT_IIS_PORTS },
{ SERVICE_DISCOVERY_WIN_KEY_NET, SERVICE_DISCOVERY_WIN_SCRIPT_NET },
#endif
};
static GSource *gServiceDiscoveryTimeoutSource = NULL;
static gint64 gLastWriteTime = 0;
static GArray *gFullPaths = NULL;
static volatile Bool gTaskSubmitted = FALSE;
/*
*****************************************************************************
* GetGuestTimeInMillis --
*
* Get system current time in millis.
*
* @retval time in millis.
*
*****************************************************************************
*/
static gint64
GetGuestTimeInMillis(void)
{
return g_get_real_time() / 1000;
}
/*
*****************************************************************************
* SendRpcMessage --
*
* Sends message over RPC channel.
*
* @param[in] ctx Application context.
* @param[in] msg Message to send
* @param[in] msgLen Length of the message to send
* @param[out] result Rpc operation result, freed by callers
* @param[out] resultLen Length of Rpc operation result
*
* @retval TRUE RPC message send succeeded.
* @retval FALSE RPC message send failed.
*
*****************************************************************************
*/
static Bool
SendRpcMessage(ToolsAppCtx *ctx,
char const *msg,
size_t msgLen,
char **result,
size_t *resultLen)
{
Bool status;
RpcChannelType rpcChannelType = RpcChannel_GetType(ctx->rpc);
g_debug("%s: Current RPC channel type: %d\n", __FUNCTION__, rpcChannelType);
if (rpcChannelType == RPCCHANNEL_TYPE_PRIV_VSOCK) {
status = RpcChannel_Send(ctx->rpc, msg, msgLen, result, resultLen);
} else {
/*
* After the vmsvc RPC channel falls back to backdoor, it could not
* send through privileged guest RPC any more.
*/
g_usleep(SERVICE_DISCOVERY_RPC_WAIT_TIME * 1000);
status = RpcChannel_SendOneRawPriv(msg, msgLen, result, resultLen);
/*
* RpcChannel_SendOneRawPriv returns RPCCHANNEL_SEND_PERMISSION_DENIED
* if the privileged vsocket can not be established.
*/
if (!status && result != NULL &&
strcmp(*result, RPCCHANNEL_SEND_PERMISSION_DENIED) == 0) {
g_debug("%s: Retrying RPC send", __FUNCTION__);
free(*result);
g_usleep(SERVICE_DISCOVERY_RPC_WAIT_TIME * 1000);
status = RpcChannel_SendOneRawPriv(msg, msgLen, result, resultLen);
}
}
return status;
}
/*
*****************************************************************************
* WriteData --
*
* Sends key-value update request to the Namespace DB.
*
* @param[in] ctx Application context.
* @param[in] key Key sent to the Namespace DB
* @param[in] value Service data sent to the Namespace DB
* @param[in] len Service data len
*
* @retval TRUE Namespace DB write over RPC succeeded.
* @retval FALSE Namespace DB write over RPC failed.
*
*****************************************************************************
*/
Bool
WriteData(ToolsAppCtx *ctx,
const char *key,
const char *data,
const size_t len)
{
Bool status = FALSE;
DynBuf buf;
gchar *timeStamp = NULL;
if (data != NULL) {
timeStamp = g_strdup_printf("%" G_GINT64_FORMAT, gLastWriteTime);
}
DynBuf_Init(&buf);
/*
* Format is:
*
* namespace-set-keys <namespace>\0<numOps>\0<op>\0<key>\0<value>\0<oldVal>
*
* We have just a single op, and want to always set the value, clobbering
* anything already there.
*/
if (!DynBuf_Append(&buf, NSDB_PRIV_SET_KEYS_CMD,
strlen(NSDB_PRIV_SET_KEYS_CMD)) ||
!DynBuf_Append(&buf, " ", 1) ||
!DynBuf_AppendString(&buf, SERVICE_DISCOVERY_NAMESPACE_DB_NAME) ||
!DynBuf_AppendString(&buf, "1") || // numOps
!DynBuf_AppendString(&buf, "0") || // op 0 == setAlways
!DynBuf_AppendString(&buf, key)) {
g_warning("%s: Could not construct buffer header\n", __FUNCTION__);
goto out;
}
if (data != NULL) {
if (!DynBuf_Append(&buf, timeStamp, strlen(timeStamp)) ||
!DynBuf_Append(&buf, ",", 1) ||
!DynBuf_Append(&buf, data, len) ||
!DynBuf_Append(&buf, "", 1)) {
g_warning("%s: Could not construct write buffer\n", __FUNCTION__);
goto out;
}
} else {
if (!DynBuf_Append(&buf, "", 1)) {
g_warning("%s: Could not construct delete buffer\n", __FUNCTION__);
goto out;
}
}
if (!DynBuf_Append(&buf, "", 1)) {
g_warning("%s: Could not construct buffer footer\n", __FUNCTION__);
goto out;
} else {
char *result = NULL;
size_t resultLen;
status = SendRpcMessage(ctx, DynBuf_Get(&buf), DynBuf_GetSize(&buf),
&result, &resultLen);
if (!status) {
g_warning("%s: Failed to update %s, result: %s resultLen: %" FMTSZ
"u\n", __FUNCTION__, key, (result != NULL) ?
result : "(null)", resultLen);
}
if (result != NULL) {
free(result);
}
}
out:
DynBuf_Destroy(&buf);
g_free(timeStamp);
return status;
}
/*
*****************************************************************************
* ReadData --
*
* Reads value from Namespace DB by given key.
*
* @param[in] ctx Application context.
* @param[in] key Key sent to the Namespace DB
* @param[out] resultData Data fetched from Namespace DB, freed by callers
* @param[out] resultDataLen Length of data fetched from Namespace DB
*
* @retval TRUE Namespace DB read over RPC succeeded.
* @retval FALSE Namespace DB read over RPC failed.
*
*****************************************************************************
*/
static Bool
ReadData(ToolsAppCtx *ctx,
const char *key,
char **resultData,
size_t *resultDataLen)
{
DynBuf buf;
Bool status = FALSE;
ASSERT(key);
*resultData = NULL;
*resultDataLen = 0;
DynBuf_Init(&buf);
/*
* Format is
*
* namespace-get-values <namespace>\0<key>\0...
*
*/
if (!DynBuf_Append(&buf, NSDB_PRIV_GET_VALUES_CMD,
strlen(NSDB_PRIV_GET_VALUES_CMD)) ||
!DynBuf_Append(&buf, " ", 1) ||
!DynBuf_AppendString(&buf, SERVICE_DISCOVERY_NAMESPACE_DB_NAME) ||
!DynBuf_AppendString(&buf, key)) {
g_warning("%s: Could not construct request buffer\n", __FUNCTION__);
goto done;
}
status = SendRpcMessage(ctx, DynBuf_Get(&buf), DynBuf_GetSize(&buf),
resultData, resultDataLen);
if (!status) {
g_debug("%s: Read over RPC failed, result: %s, resultDataLen: %" FMTSZ
"u\n", __FUNCTION__, (*resultData != NULL) ?
*resultData : "(null)", *resultDataLen);
}
done:
DynBuf_Destroy(&buf);
return status;
}
/*
*****************************************************************************
* DeleteData --
*
* Deletes keys/values from Namespace DB.
*
* @param[in] ctx Application context.
* @param[in] keys Keys of entries to be deleted from the Namespace DB
*
* @retval TRUE Namespace DB delete over RPC succeeded.
* @retval FALSE Namespace DB delete over RPC failed or command buffer has not
* been constructed correctly.
*
*****************************************************************************
*/
static Bool
DeleteData(ToolsAppCtx *ctx,
const GPtrArray* keys)
{
Bool status = FALSE;
DynBuf buf;
int i;
gchar *numKeys = g_strdup_printf("%d", keys->len);
DynBuf_Init(&buf);
/*
* Format is:
*
* namespace-set-keys <namespace>\0<numOps>\0<op>\0<key>\0<value>\0<oldVal>
*
*/
if (!DynBuf_Append(&buf, NSDB_PRIV_SET_KEYS_CMD,
strlen(NSDB_PRIV_SET_KEYS_CMD)) ||
!DynBuf_Append(&buf, " ", 1) ||
!DynBuf_AppendString(&buf, SERVICE_DISCOVERY_NAMESPACE_DB_NAME) ||
!DynBuf_AppendString(&buf, numKeys)) { // numOps
g_warning("%s: Could not construct buffer header\n", __FUNCTION__);
goto out;
}
for (i = 0; i < keys->len; ++i) {
const char *key = (const char *) g_ptr_array_index(keys, i);
g_debug("%s: Adding key %s to buffer\n", __FUNCTION__, key);
if (!DynBuf_AppendString(&buf, "0") ||
!DynBuf_AppendString(&buf, key) ||
!DynBuf_Append(&buf, "", 1) ||
!DynBuf_Append(&buf, "", 1)) {
g_warning("%s: Could not construct delete buffer\n", __FUNCTION__);
goto out;
}
}
if (!DynBuf_Append(&buf, "", 1)) {
g_warning("%s: Could not construct buffer footer\n", __FUNCTION__);
goto out;
} else {
char *result = NULL;
size_t resultLen;
status = SendRpcMessage(ctx, DynBuf_Get(&buf), DynBuf_GetSize(&buf),
&result, &resultLen);
if (!status) {
g_warning("%s: Failed to delete keys, result: %s resultLen: %" FMTSZ
"u\n", __FUNCTION__, (result != NULL) ? result : "(null)",
resultLen);
}
free(result);
}
out:
DynBuf_Destroy(&buf);
g_free(numKeys);
return status;
}
/*
*****************************************************************************
* DeleteDataAndFree --
*
* Deletes the specified keys in Namespace DB and frees memory
* for every key.
*
* @param[in] ctx Application context.
* @param[in/out] keys Keys to be deleted.
*
*****************************************************************************
*/
static void
DeleteDataAndFree(ToolsAppCtx *ctx,
GPtrArray *keys) {
int j;
if (!DeleteData(ctx, keys)) {
g_warning("%s: Failed to delete data\n", __FUNCTION__);
}
for (j = 0; j < keys->len; ++j) {
g_free((gchar *) g_ptr_array_index(keys, j));
}
g_ptr_array_set_size(keys, 0);
}
/*
*****************************************************************************
* CleanupNamespaceDB --
*
* Deletes all the chunks written to the Namespace DB in previous cycle.
*
* @param[in] ctx Application context.
*
*****************************************************************************
*/
static void
CleanupNamespaceDB(ToolsAppCtx *ctx) {
int i;
GPtrArray *keys = g_ptr_array_new();
g_debug("%s: Performing cleanup of previous data\n", __FUNCTION__);
ASSERT(gFullPaths);
for (i = 0; i < gFullPaths->len; i++) {
char *value = NULL;
size_t len = 0;
KeyNameValue tmp = g_array_index(gFullPaths, KeyNameValue, i);
/*
* Read count of chunks, ignore timestamp, iterate over chunks
* and remove them.
*/
if (ReadData(ctx, tmp.keyName, &value, &len) && len > 1) {
char *token = NULL;
g_debug("%s: Read %s from Namespace DB\n", __FUNCTION__, value);
g_ptr_array_add(keys, g_strdup(tmp.keyName));
if (keys->len >= SERVICE_DISCOVERY_DELETE_CHUNK_SIZE) {
DeleteDataAndFree(ctx, keys);
}
if (NULL == strtok(value, ",")) {
g_warning("%s: Malformed data for %s in Namespace DB",
__FUNCTION__, tmp.keyName);
if (value) {
free(value);
value = NULL;
}
continue;
}
token = strtok(NULL, ",");
if (token != NULL) {
int count = (int) g_ascii_strtoll(token, NULL, 10);
int j;
for (j = 0; j < count; j++) {
gchar *msg = g_strdup_printf("%s-%d", tmp.keyName, j + 1);
g_ptr_array_add(keys, msg);
if (keys->len >= SERVICE_DISCOVERY_DELETE_CHUNK_SIZE) {
DeleteDataAndFree(ctx, keys);
}
}
} else {
g_warning("%s: Chunk count has invalid value %s", __FUNCTION__,
value);
}
} else {
g_warning("%s: Key %s not found in Namespace DB\n", __FUNCTION__,
tmp.keyName);
}
if (value) {
free(value);
value = NULL;
}
}
if (keys->len >= 1) {
DeleteDataAndFree(ctx, keys);
}
g_ptr_array_free(keys, TRUE);
}
/*
*****************************************************************************
* ServiceDiscoveryTask --
*
* Task to gather discovered services' data and write to Namespace DB.
*
* @param[in] ctx Application context.
* @param[in] data Data pointer, not used.
*
*****************************************************************************
*/
static void
ServiceDiscoveryTask(ToolsAppCtx *ctx,
void *data)
{
Bool status = FALSE;
int i;
gint64 previousWriteTime = gLastWriteTime;
gTaskSubmitted = TRUE;
/*
* We are going to write to Namespace DB, update glastWriteTime
*/
gLastWriteTime = GetGuestTimeInMillis();
/*
* Reset "ready" flag to stop readers until all data is written
*/
status = WriteData(ctx, SERVICE_DISCOVERY_KEY_READY, "FALSE", 5);
if (!status) {
gLastWriteTime = previousWriteTime;
g_warning("%s: Failed to reset %s flag", __FUNCTION__,
SERVICE_DISCOVERY_KEY_READY);
goto out;
}
/*
* Remove chunks written to DB in the previous iteration
*/
CleanupNamespaceDB(ctx);
for (i = 0; i < gFullPaths->len; i++) {
KeyNameValue tmp = g_array_index(gFullPaths, KeyNameValue, i);
if (!PublishScriptOutputToNamespaceDB(ctx, tmp.keyName, tmp.val)) {
g_debug("%s: PublishScriptOutputToNamespaceDB failed for script %s\n",
__FUNCTION__, tmp.val);
}
}
/*
* Update ready flag
*/
status = WriteData(ctx, SERVICE_DISCOVERY_KEY_READY, "TRUE", 4);
if (!status) {
g_warning("%s: Failed to update ready flag", __FUNCTION__);
}
out:
gTaskSubmitted = FALSE;
}
/*
*****************************************************************************
* checkForWrite --
*
* Performs needed checks to decide if Data should be written to Namespace DB
* or not.
*
* First check - checks if interval related information, stored in Namespace DB
* under key "signal" and in format of "interval,timestamp" is outdated or not.
*
* Second check - checks if time greater than interval read from Namespace DB
* has elapsed since the last write operation.
*
* @param[in] ctx The application context.
*
* @retval TRUE Execute scripts and write service data to Namespace DB.
* @retval FALSE Omit this cycle wihtout any script running.
*
*****************************************************************************
*/
static Bool
checkForWrite(ToolsAppCtx *ctx)
{
char *signal = NULL;
size_t signalLen = 0;
Bool result = FALSE;
/*
* Read signal from Namespace DB
*/
if (!ReadData(ctx, SERVICE_DISCOVERY_KEY_SIGNAL, &signal, &signalLen)) {
g_debug("%s: Failed to read necessary information from Namespace DB\n",
__FUNCTION__);
} else {
if ((signal != NULL) && (strcmp(signal, "")) && signalLen > 0) {
char *token1;
char *token2;
g_debug("%s: signal = %s last write time = %" G_GINT64_FORMAT "\n",
__FUNCTION__, signal, gLastWriteTime);
/*
* parse signal, it should be in "interval,timestamp" format
*/
token1 = strtok(signal, ",");
token2 = strtok(NULL, ",");
if (token1 != NULL && token2 != NULL) {
gint64 currentTime = GetGuestTimeInMillis();
int clientInterval = (int) g_ascii_strtoll(token1, NULL, 10);
gint64 clientTimestamp = g_ascii_strtoll(token2, NULL, 10);
if (clientInterval == 0 || clientTimestamp == 0) {
g_warning("%s: Wrong value of interval and timestamp",
__FUNCTION__);
} else if ((currentTime - clientTimestamp) < (5 * clientInterval)) {
if ((currentTime - gLastWriteTime +
SERVICE_DISCOVERY_WRITE_DELTA) >= clientInterval) {
result = TRUE;
}
} else {
/*
* Signal is outdated, reset the last write time
*/
gLastWriteTime = 0;
}
g_debug("%s: result=%s client interval = %d "
"client timestamp =% " G_GINT64_FORMAT
" system time = %" G_GINT64_FORMAT
" previous write time = %" G_GINT64_FORMAT "\n",
__FUNCTION__, result ? "true" : "false", clientInterval,
clientTimestamp, currentTime, gLastWriteTime);
} else {
g_warning("%s: Wrong value of signal", __FUNCTION__);
}
} else {
g_warning("%s: signal was NULL or empty", __FUNCTION__);
}
}
if (signal) {
free(signal);
}
return result;
}
/*
*****************************************************************************
* ServiceDiscoveryThread --
*
* Creates a new thread that collects all the desired application related
* information and updates the Namespace DB.
*
* @param[in] data The application context.
*
* @return TRUE to indicate that the timer should be rescheduled.
*
*****************************************************************************
*/
static Bool
ServiceDiscoveryThread(gpointer data)
{
ToolsAppCtx *ctx = data;
/*
* First check for taskSubmitted, if it is true automatically omit this
* cycle even without checking for write to avoid resetting last write
* time.
*/
if (gTaskSubmitted || !checkForWrite(ctx)) {
g_debug("%s: Data should not be written taskSubmitted = %s\n",
__FUNCTION__, gTaskSubmitted ? "True" : "False");
} else {
g_debug("%s: Submitting task to write\n", __FUNCTION__);
if (!ToolsCorePool_SubmitTask(ctx, ServiceDiscoveryTask, NULL, NULL)) {
g_warning("%s: failed to start information gather thread\n",
__FUNCTION__);
}
}
return TRUE;
}
/*
*****************************************************************************
* TweakDiscoveryLoop --
*
* @brief Start service discovery poll loop.
*
* @param[in] ctx The app context.
*
*****************************************************************************
*/
static void
TweakDiscoveryLoop(ToolsAppCtx *ctx)
{
if (gServiceDiscoveryTimeoutSource == NULL) {
gServiceDiscoveryTimeoutSource =
g_timeout_source_new(SERVICE_DISCOVERY_POLL_INTERVAL);
VMTOOLSAPP_ATTACH_SOURCE(ctx, gServiceDiscoveryTimeoutSource,
ServiceDiscoveryThread, ctx, NULL);
g_source_unref(gServiceDiscoveryTimeoutSource);
}
}
/*
******************************************************************************
* ServiceDiscoveryServerConfReload --
*
* @brief Reconfigures the poll loop interval upon config file reload.
*
* @param[in] src Unused.
* @param[in] ctx The application context.
* @param[in] data Unused.
*
******************************************************************************
*/
static void
ServiceDiscoveryServerConfReload(gpointer src,
ToolsAppCtx *ctx,
gpointer data)
{
gboolean disabled =
VMTools_ConfigGetBoolean(ctx->config,
CONFGROUPNAME_SERVICEDISCOVERY,
CONFNAME_SERVICEDISCOVERY_DISABLED,
SERVICE_DISCOVERY_CONF_DEFAULT_DISABLED_VALUE);
if (!disabled) {
g_info("%s: Service discovery loop started\n", __FUNCTION__);
TweakDiscoveryLoop(ctx);
} else if (gServiceDiscoveryTimeoutSource != NULL) {
gLastWriteTime = 0;
g_source_destroy(gServiceDiscoveryTimeoutSource);
gServiceDiscoveryTimeoutSource = NULL;
g_info("%s: Service discovery loop disabled\n", __FUNCTION__);
}
}
/*
*****************************************************************************
* ServiceDiscoveryServerShutdown --
*
* Cleanup internal data on shutdown.
*
* @param[in] src The source object.
* @param[in] ctx Unused.
* @param[in] data Unused.
*
*****************************************************************************
*/
static void
ServiceDiscoveryServerShutdown(gpointer src,
ToolsAppCtx *ctx,
gpointer data)
{
if (gServiceDiscoveryTimeoutSource != NULL) {
g_source_destroy(gServiceDiscoveryTimeoutSource);
gServiceDiscoveryTimeoutSource = NULL;
}
if (gFullPaths != NULL) {
int i = 0;
guint len = gFullPaths->len;
for (i = 0; i < len; ++i) {
g_free(g_array_index(gFullPaths, KeyNameValue, i).keyName);
g_free(g_array_index(gFullPaths, KeyNameValue, i).val);
}
g_array_free(gFullPaths, TRUE);
}
}
/*
*****************************************************************************
* ConstructScriptPaths --
*
* Construct final paths of the scripts that will be used for execution.
*
*****************************************************************************
*/
static void
ConstructScriptPaths(void)
{
int i;
gchar *scriptInstallDir;
#if !defined(OPEN_VM_TOOLS)
gchar *toolsInstallDir;
#endif
if (gFullPaths != NULL) {
return;
}
gFullPaths = g_array_sized_new(FALSE, TRUE, sizeof(KeyNameValue),
ARRAYSIZE(gKeyScripts));
#if defined(OPEN_VM_TOOLS)
scriptInstallDir = Util_SafeStrdup(VMTOOLS_SERVICE_DISCOVERY_SCRIPTS);
#else
toolsInstallDir = GuestApp_GetInstallPath();
scriptInstallDir = g_strdup_printf("%s%s%s%s%s", toolsInstallDir, DIRSEPS,
"serviceDiscovery", DIRSEPS, "scripts");
g_free(toolsInstallDir);
#endif
for (i = 0; i < ARRAYSIZE(gKeyScripts); ++i) {
KeyNameValue tmp;
tmp.keyName = g_strdup_printf("%s", gKeyScripts[i].keyName);
#if defined(_WIN32)
tmp.val = g_strdup_printf("\"%s%s%s\"", scriptInstallDir,
DIRSEPS, gKeyScripts[i].val);
#else
tmp.val = g_strdup_printf("%s%s%s", scriptInstallDir, DIRSEPS,
gKeyScripts[i].val);
#endif
g_array_insert_val(gFullPaths, i, tmp);
}
g_free(scriptInstallDir);
}
/*
*****************************************************************************
* ToolsOnLoad --
*
* Plugin entry point. Initializes internal plugin state.
*
* @param[in] ctx The app context.
*
* @return The registration data.
*
*****************************************************************************
*/
TOOLS_MODULE_EXPORT ToolsPluginData *
ToolsOnLoad(ToolsAppCtx *ctx)
{
static ToolsPluginData regData = {
"serviceDiscovery",
NULL,
NULL
};
uint32 vmxVersion = 0;
uint32 vmxType = VMX_TYPE_UNSET;
/*
* Return NULL to disable the plugin if not running in a VMware VM.
*/
if (!ctx->isVMware) {
g_info("%s: Not running in a VMware VM.\n", __FUNCTION__);
return NULL;
}
/*
* Return NULL to disable the plugin if VM is not running on ESX host.
*/
if (!VmCheck_GetVersion(&vmxVersion, &vmxType) ||
vmxType != VMX_TYPE_SCALABLE_SERVER) {
g_info("%s, VM is not running on ESX host.\n", __FUNCTION__);
return NULL;
}
/*
* Return NULL to disable the plugin if not running in vmsvc daemon.
*/
if (!TOOLS_IS_MAIN_SERVICE(ctx)) {
g_info("%s: Not running in vmsvc daemon: container name='%s'.\n",
__FUNCTION__, ctx->name);
return NULL;
}
if (ctx->rpc != NULL) {
ToolsPluginSignalCb sigs[] = {
{ TOOLS_CORE_SIG_SHUTDOWN, ServiceDiscoveryServerShutdown, NULL },
{ TOOLS_CORE_SIG_CONF_RELOAD, ServiceDiscoveryServerConfReload, NULL }
};
ToolsAppReg regs[] = {
{ TOOLS_APP_SIGNALS,
VMTools_WrapArray(sigs, sizeof *sigs, ARRAYSIZE(sigs))
}
};
gboolean disabled;
regData.regs = VMTools_WrapArray(regs,
sizeof *regs,
ARRAYSIZE(regs));
/*
* Append scripts absolute paths based on installation dirs.
*/
ConstructScriptPaths();
disabled =
VMTools_ConfigGetBoolean(ctx->config,
CONFGROUPNAME_SERVICEDISCOVERY,
CONFNAME_SERVICEDISCOVERY_DISABLED,
SERVICE_DISCOVERY_CONF_DEFAULT_DISABLED_VALUE);
if (!disabled) {
TweakDiscoveryLoop(ctx);
}
return ®Data;
}
return NULL;
}
| 11,872 |
1,127 |
<reponame>ryanloney/openvino-1
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "xml_net_builder.hpp"
#include "rnn_referee.hpp"
#include <cpp/ie_cnn_network.h>
#include <vector>
#include <string>
enum Mode {
CELL, /**< single LSTMCell layer */
SEQ, /**< single LSTMSeq layer */
DYN_SEQ, /**< single LSTMSeq layer with seq length input*/
TI, /**< TI layer with LSTM body */
TI_CSTM /**< TI layer with LSTM plus negative at the body */
};
enum Direction {
FWD, /**< Forward. With stride 1 */
BWD, /**< Backward. WIth stride -1 */
BDR /**< Bidirectional. With stride 1 and -1 */
};
/**
* Topology generator for some RNN specific cases
*/
class RNNGen {
public:
/** Sequence topology */
RNNGen(size_t batch, size_t seq, CellDesc cell, Mode mode, Direction dir, int axis);
const std::vector<Filler> fillers() const;
const std::vector<Checker> checkers() const;
InferenceEngine::CNNNetwork net();
private:
const size_t D = 10; // Data size
const size_t S = 5; // State size
const size_t G = 4; // Number of gate
const size_t N; // Batch
const size_t T; // Sequence
const int axis; // Axis of sequence
const Mode mode;
const CellDesc cell;
const Direction dir;
const bool neg;
size_t state_num = 0;
size_t wSzB = 0;
size_t bSzB = 0;
InferenceEngine::SizeVector seq_l_dim, st_dim, id_dim, od_dim;
InferenceEngine::TBlob<uint8_t>::Ptr weights;
InferenceEngine::Blob::Ptr w_blob, b_blob;
std::shared_ptr<RNN_Referee> referee;
private:
std::string model();
void add_TI(CommonTestUtils::V2NetBuilder &builder);
void add_SEQ(CommonTestUtils::V2NetBuilder &builder);
void add_CELL(CommonTestUtils::V2NetBuilder &builder);
std::map<std::string, std::string> basic_cell_attr();
};
| 791 |
831 |
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.apk.viewer;
import com.android.SdkConstants;
import org.apache.commons.compress.utils.IOUtils;
import org.jetbrains.annotations.NotNull;
import org.junit.Test;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class ProtoXmlPrettyPrinterTest {
@Test
public void decodeInvalidFileThrows() {
// Prepare
byte[] contents = new byte[] {0x03, 0x00};
ProtoXmlPrettyPrinter prettyPrinter = new ProtoXmlPrettyPrinterImpl();
// Act
try {
@SuppressWarnings("unused")
String ignored = prettyPrinter.prettyPrint(contents);
fail("prettyPrint should fail to decode invalid Proto XML content");
} catch(IOException expected) {
}
}
@Test
public void decodeValidFileWorks() throws IOException {
// Prepare
final String manifestXmlPath = "base/manifest/" + SdkConstants.FN_ANDROID_MANIFEST_XML;
byte[] contents = readAppBundleFileEntry(manifestXmlPath);
ProtoXmlPrettyPrinterImpl prettyPrinter = new ProtoXmlPrettyPrinterImpl();
// Act
String xml = prettyPrinter.prettyPrint(contents);
// Assert
assertEquals("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" +
"<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n" +
" package=\"com.example.myapplication\"\n" +
" android:versionCode=\"1\"\n" +
" android:versionName=\"1.0\" >\n" +
"\n" +
" <uses-sdk\n" +
" android:minSdkVersion=\"21\"\n" +
" android:targetSdkVersion=\"27\" />\n" +
"\n" +
" <application\n" +
" android:allowBackup=\"true\"\n" +
" android:debuggable=\"true\"\n" +
" android:icon=\"@mipmap/ic_launcher\"\n" +
" android:label=\"@string/app_name\"\n" +
" android:roundIcon=\"@mipmap/ic_launcher_round\"\n" +
" android:supportsRtl=\"true\"\n" +
" android:theme=\"@style/AppTheme\" >\n" +
" <activity\n" +
" android:name=\"com.example.myapplication.MainActivity\"\n" +
" android:label=\"@string/app_name\"\n" +
" android:theme=\"@style/AppTheme.NoActionBar\" >\n" +
" <intent-filter>\n" +
" <action android:name=\"android.intent.action.MAIN\" />\n" +
"\n" +
" <category android:name=\"android.intent.category.LAUNCHER\" />\n" +
" </intent-filter>\n" +
" </activity>\n" +
" </application>\n" +
"\n" +
"</manifest>\n", xml.replace("\r\n", "\n"));
}
@NotNull
private static byte[] readAppBundleFileEntry(@SuppressWarnings("SameParameterValue") @NotNull String path) throws IOException {
byte[] contents = null;
try (InputStream file = ProtoXmlPrettyPrinterTest.class.getResourceAsStream("/bundle.aab")) {
try (ZipInputStream zip = new ZipInputStream(file)) {
for (ZipEntry entry = zip.getNextEntry(); entry != null; entry = zip.getNextEntry()) {
if (entry.getName().equals(path)) {
contents = IOUtils.toByteArray(zip);
break;
}
zip.closeEntry();
}
}
}
if (contents == null) {
throw new IOException(String.format("Invalid app bundle file, entry \"%s\" not found", path));
}
return contents;
}
}
| 2,029 |
569 |
<filename>java/src/com/google/template/soy/data/internal/AbstractSoyMap.java<gh_stars>100-1000
/*
* Copyright 2020 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.template.soy.data.internal;
import com.google.template.soy.data.LoggingAdvisingAppendable;
import com.google.template.soy.data.SoyAbstractValue;
import com.google.template.soy.data.SoyMap;
import com.google.template.soy.data.SoyValue;
import java.io.IOException;
import javax.annotation.ParametersAreNonnullByDefault;
/**
* Base implementation of a SoyMap. This provides implementations of the SoyValue methods in terms
* of the SoyMap methods.
*/
@ParametersAreNonnullByDefault
abstract class AbstractSoyMap extends SoyAbstractValue implements SoyMap {
@Override
public boolean coerceToBoolean() {
return true;
}
@Override
public String coerceToString() {
LoggingAdvisingAppendable mapStr = LoggingAdvisingAppendable.buffering();
try {
render(mapStr);
} catch (IOException e) {
throw new AssertionError(e); // impossible
}
return mapStr.toString();
}
@Override
public void render(LoggingAdvisingAppendable appendable) throws IOException {
appendable.append('{');
boolean isFirst = true;
for (SoyValue key : keys()) {
SoyValue value = get(key);
if (isFirst) {
isFirst = false;
} else {
appendable.append(", ");
}
key.render(appendable);
appendable.append(": ");
value.render(appendable);
}
appendable.append('}');
}
@Override
public boolean equals(Object other) {
// Instance equality, to match Javascript behavior.
return this == other;
}
@Override
public int hashCode() {
return System.identityHashCode(this);
}
@Override
public String toString() {
return coerceToString();
}
}
| 773 |
794 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.mina.examples.udpecho;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import org.apache.mina.api.AbstractIoFilter;
import org.apache.mina.api.AbstractIoHandler;
import org.apache.mina.api.IdleStatus;
import org.apache.mina.api.IoSession;
import org.apache.mina.filter.logging.LoggingFilter;
import org.apache.mina.filterchain.ReadFilterChainController;
import org.apache.mina.filterchain.WriteFilterChainController;
import org.apache.mina.session.WriteRequest;
import org.apache.mina.transport.nio.NioUdpServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A UDP base echo server sending back every datagram received
*
* @author <a href="http://mina.apache.org">Apache MINA Project</a>
*/
public class NioUdpEchoServer extends NioUdpServer {
static final Logger LOG = LoggerFactory.getLogger(NioUdpEchoServer.class);
/** The server logger filter */
public final class UdpEchoFilter extends AbstractIoFilter {
@Override
public void sessionOpened(final IoSession session) {
LOG.info("session {} opened", session);
super.sessionOpened(session);
}
@Override
public void sessionIdle(IoSession session, IdleStatus status) {
LOG.info("session {} idle", session);
super.sessionIdle(session, status);
}
@Override
public void sessionClosed(IoSession session) {
LOG.info("session {} closed", session);
super.sessionClosed(session);
}
@Override
public void messageWriting(IoSession session, WriteRequest message, WriteFilterChainController controller) {
// we just push the message in the chain
super.messageWriting(session, message, controller);
}
@Override
public void messageReceived(IoSession session, Object message, ReadFilterChainController controller) {
if (message instanceof ByteBuffer) {
LOG.info("echoing");
session.write(message);
}
super.messageReceived(session, message, controller);
}
@Override
public void messageSent(IoSession session, Object message) {
LOG.info("message {} sent", message);
super.messageSent(session, message);
}
}
public static void main(final String[] args) {
LOG.info("starting echo server");
final NioUdpServer server = new NioUdpEchoServer();
// create the filter chain for this service
server.setFilters(new LoggingFilter("LoggingFilter1"), ((NioUdpEchoServer) server).new UdpEchoFilter());
server.setIoHandler(new AbstractIoHandler() {
@Override
public void sessionOpened(final IoSession session) {
LOG.info("session opened {}", session);
final String welcomeStr = "welcome\n";
final ByteBuffer bf = ByteBuffer.allocate(welcomeStr.length());
bf.put(welcomeStr.getBytes());
bf.flip();
session.write(bf);
}
});
try {
final SocketAddress address = new InetSocketAddress(9999);
server.bind(address);
LOG.debug("Running the server for 25 sec");
Thread.sleep(25000);
LOG.debug("Unbinding the UDP port");
server.unbind();
} catch (final InterruptedException e) {
LOG.error("Interrupted exception", e);
}
}
}
| 1,679 |
496 |
/*
* Copyright (C) 2018 <NAME>
*
* Author: <NAME> <<EMAIL>>
*/
#include "lexbor/html/interfaces/table_section_element.h"
#include "lexbor/html/interfaces/document.h"
lxb_html_table_section_element_t *
lxb_html_table_section_element_interface_create(lxb_html_document_t *document)
{
lxb_html_table_section_element_t *element;
element = lexbor_mraw_calloc(document->dom_document.mraw,
sizeof(lxb_html_table_section_element_t));
if (element == NULL) {
return NULL;
}
lxb_dom_node_t *node = lxb_dom_interface_node(element);
node->owner_document = lxb_html_document_original_ref(document);
node->type = LXB_DOM_NODE_TYPE_ELEMENT;
return element;
}
lxb_html_table_section_element_t *
lxb_html_table_section_element_interface_destroy(lxb_html_table_section_element_t *table_section_element)
{
return lexbor_mraw_free(
lxb_dom_interface_node(table_section_element)->owner_document->mraw,
table_section_element);
}
| 418 |
764 |
<filename>erc20/0x60Eb57d085C59932d5fAa6c6026268A4386927d0.json
{"symbol": "LOCG","address": "0x60Eb57d085C59932d5fAa6c6026268A4386927d0","overview":{"en": ""},"email": "","website": "https://locgame.io","state": "NORMAL","links": {"blog": "https://medium.com/locgame","twitter": "https://twitter.com/LOCgameio?s=20","telegram": "","github": ""}}
| 146 |
412 |
typedef struct tag_struct_name
{
int x;
float y;
} MYSTRUCT;
void fun(struct tag_struct_name tag_struct_param, MYSTRUCT mystruct_param)
{
}
| 56 |
726 |
"""
:class:`FoldingClassifier` and :class:`FoldingRegressor` provide an easy way
to run k-Folding cross-validation. Also it is a nice way to combine predictions of trained classifiers.
"""
from __future__ import division, print_function, absolute_import
import numpy
import pandas
from six.moves import zip
from sklearn import clone
from sklearn.cross_validation import KFold
from sklearn.utils import check_random_state
from . import utils
from .factory import train_estimator
from ..estimators.interface import Classifier, Regressor
from ..estimators.utils import check_inputs
__author__ = '<NAME>, <NAME>'
__all__ = ['FoldingClassifier', 'FoldingRegressor']
from .utils import get_classifier_probabilities, get_classifier_staged_proba, get_regressor_prediction, \
get_regressor_staged_predict
class FoldingBase(object):
"""
This meta-{estimator} implements folding algorithm:
* split training data into n equal parts;
* train n {estimator}s, each one is trained using n-1 folds
To get unbiased predictions for data, pass the **same** dataset (with same order of events)
as in training to prediction methods,
in which case each event is predicted with base {estimator} which didn't use that event during training.
To use information from not one, but several estimators during predictions,
provide appropriate voting function. Examples of voting function:
>>> voting = lambda x: numpy.mean(x, axis=0)
>>> voting = lambda x: numpy.median(x, axis=0)
"""
def __init__(self,
base_estimator,
n_folds=2,
random_state=None,
features=None,
parallel_profile=None):
"""
:param sklearn.BaseEstimator base_estimator: base classifier, which will be used for training
:param int n_folds: count of folds
:param features: features used in training
:type features: None or list[str]
:param parallel_profile: profile for IPython cluster, None to compute locally.
:type parallel_profile: None or str
:param random_state: random state for reproducibility
:type random_state: None or int or RandomState
"""
self.estimators = []
self.parallel_profile = parallel_profile
self.n_folds = n_folds
self.base_estimator = base_estimator
self._folds_indices = None
self.random_state = random_state
self._random_number = None
# setting features directly
self.features = features
def _get_folds_column(self, length):
"""
Return special column with indices of folds for all events.
"""
if self._random_number is None:
self._random_number = check_random_state(self.random_state).randint(0, 100000)
folds_column = numpy.zeros(length)
for fold_number, (_, folds_indices) in enumerate(
KFold(length, self.n_folds, shuffle=True, random_state=self._random_number)):
folds_column[folds_indices] = fold_number
return folds_column
def _prepare_data(self, X, y, sample_weight):
raise NotImplementedError('To be implemented in descendant')
def fit(self, X, y, sample_weight=None):
"""
Train the model, will train several base {estimator}s on overlapping
subsets of training dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
"""
if hasattr(self.base_estimator, 'features'):
assert self.base_estimator.features is None, \
'Base estimator must have None features! Use features parameter in Folding instead'
self.train_length = len(X)
X, y, sample_weight = self._prepare_data(X, y, sample_weight)
folds_column = self._get_folds_column(len(X))
for _ in range(self.n_folds):
self.estimators.append(clone(self.base_estimator))
if sample_weight is None:
weights_iterator = [None] * self.n_folds
else:
weights_iterator = (sample_weight[folds_column != index] for index in range(self.n_folds))
result = utils.map_on_cluster(self.parallel_profile, train_estimator,
range(len(self.estimators)),
self.estimators,
(X.iloc[folds_column != index, :].copy() for index in range(self.n_folds)),
(y[folds_column != index] for index in range(self.n_folds)),
weights_iterator)
for status, data in result:
if status == 'success':
name, classifier, spent_time = data
self.estimators[name] = classifier
else:
print('Problem while training on the node, report:\n', data)
return self
def _folding_prediction(self, X, prediction_function, vote_function=None):
"""
Supplementary function to predict (labels, probabilities, values)
:param X: dataset to predict
:param prediction_function: function(classifier, X) -> prediction
:param vote_function: if using averaging over predictions of folds, this function shall be passed.
For instance: lambda x: numpy.mean(x, axis=0), which means averaging result over all folds.
Another useful option is lambda x: numpy.median(x, axis=0)
"""
X = self._get_features(X)
if vote_function is not None:
print('KFold prediction with voting function')
results = []
for estimator in self.estimators:
results.append(prediction_function(estimator, X))
# results: [n_classifiers, n_samples, n_dimensions], reduction over 0th axis
results = numpy.array(results)
return vote_function(results)
else:
if len(X) != self.train_length:
print('KFold prediction using random classifier (length of data passed not equal to length of train)')
else:
print('KFold prediction using folds column')
folds_column = self._get_folds_column(len(X))
parts = []
for fold in range(self.n_folds):
parts.append(prediction_function(self.estimators[fold], X.iloc[folds_column == fold, :]))
result_shape = [len(X)] + list(numpy.shape(parts[0])[1:])
results = numpy.zeros(shape=result_shape)
folds_indices = [numpy.where(folds_column == fold)[0] for fold in range(self.n_folds)]
for fold, part in enumerate(parts):
results[folds_indices[fold]] = part
return results
def _staged_folding_prediction(self, X, prediction_function, vote_function=None):
X = self._get_features(X)
if vote_function is not None:
print('Using voting KFold prediction')
iterators = [prediction_function(estimator, X) for estimator in self.estimators]
for fold_prob in zip(*iterators):
result = numpy.array(fold_prob)
yield vote_function(result)
else:
if len(X) != self.train_length:
print('KFold prediction using random classifier (length of data passed not equal to length of train)')
else:
print('KFold prediction using folds column')
folds_column = self._get_folds_column(len(X))
iterators = [prediction_function(self.estimators[fold], X.iloc[folds_column == fold, :])
for fold in range(self.n_folds)]
folds_indices = [numpy.where(folds_column == fold)[0] for fold in range(self.n_folds)]
for stage_results in zip(*iterators):
result_shape = [len(X)] + list(numpy.shape(stage_results[0])[1:])
result = numpy.zeros(result_shape)
for fold in range(self.n_folds):
result[folds_indices[fold]] = stage_results[fold]
yield result
def _get_feature_importances(self):
"""
Get features importance
:return: pandas.DataFrame with column effect and `index=features`
"""
importances = numpy.sum([est.feature_importances_ for est in self.estimators], axis=0)
# to get train_features, not features
one_importances = self.estimators[0].get_feature_importances()
return pandas.DataFrame({'effect': importances / numpy.max(importances)}, index=one_importances.index)
class FoldingRegressor(FoldingBase, Regressor):
# inherit documentation
__doc__ = FoldingBase.__doc__.format(estimator='regressor')
def fit(self, X, y, sample_weight=None):
return FoldingBase.fit(self, X, y, sample_weight=sample_weight)
fit.__doc__ = FoldingBase.fit.__doc__.format(estimator='regressor')
def _prepare_data(self, X, y, sample_weight):
X = self._get_features(X)
y_shape = numpy.shape(y)
self.n_outputs_ = 1 if len(y_shape) < 2 else y_shape[1]
return check_inputs(X, y, sample_weight=sample_weight, allow_multiple_targets=True)
def predict(self, X, vote_function=None):
"""
Get predictions. To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used. Parameters: numpy.ndarray [n_classifiers, n_samples]
:type vote_function: None or function
:rtype: numpy.array of shape [n_samples, n_outputs]
"""
return self._folding_prediction(X, prediction_function=get_regressor_prediction,
vote_function=vote_function)
def staged_predict(self, X, vote_function=None):
"""
Get predictions after each iteration of base estimator.
To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used. Parameters: numpy.ndarray [n_classifiers, n_samples]
:type vote_function: None or function
:rtype: sequence of numpy.array of shape [n_samples, n_outputs]
"""
return self._staged_folding_prediction(X, prediction_function=get_regressor_staged_predict,
vote_function=vote_function)
def get_feature_importances(self):
"""
Get features importance
:rtype: pandas.DataFrame with column effect and `index=features`
"""
return self._get_feature_importances()
@property
def feature_importances_(self):
"""Sklearn-way of returning feature importance.
This returned as numpy.array, assuming that initially passed train_features=None """
return self.get_feature_importances().ix[self.features, 'effect'].values
class FoldingClassifier(FoldingBase, Classifier):
# inherit documentation
__doc__ = FoldingBase.__doc__.format(estimator='classifier')
def fit(self, X, y, sample_weight=None):
return FoldingBase.fit(self, X, y, sample_weight=sample_weight)
fit.__doc__ = FoldingBase.fit.__doc__.format(estimator='classifier')
def _prepare_data(self, X, y, sample_weight):
X = self._get_features(X)
self._set_classes(y)
return check_inputs(X, y, sample_weight=sample_weight, allow_multiple_targets=True)
def predict(self, X, vote_function=None):
"""
Predict labels. To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used.
:type vote_function: None or function
:rtype: numpy.array of shape [n_samples]
"""
return numpy.argmax(self.predict_proba(X, vote_function=vote_function), axis=1)
def predict_proba(self, X, vote_function=None):
"""
Predict probabilities. To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used.
:type vote_function: None or function
:rtype: numpy.array of shape [n_samples, n_classes]
"""
result = self._folding_prediction(X, prediction_function=get_classifier_probabilities,
vote_function=vote_function)
return result / numpy.sum(result, axis=1, keepdims=True)
def staged_predict_proba(self, X, vote_function=None):
"""
Predict probabilities after each stage of base_estimator.
To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used.
:type vote_function: None or function
:rtype: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
for proba in self._staged_folding_prediction(X, prediction_function=get_classifier_staged_proba,
vote_function=vote_function):
yield proba / numpy.sum(proba, axis=1, keepdims=True)
def get_feature_importances(self):
"""
Get features importance
:rtype: pandas.DataFrame with column effect and `index=features`
"""
return self._get_feature_importances()
@property
def feature_importances_(self):
"""Sklearn-way of returning feature importance.
This returned as numpy.array, assuming that initially passed train_features=None """
return self.get_feature_importances().ix[self.features, 'effect'].values
| 6,069 |
1,127 |
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include <memory>
#include "default_opset.hpp"
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph {
namespace onnx_import {
namespace utils {
/// \brief Factory class which generates sub-graphs for ONNX ArgMin, ArgMax ops.
class ArgMinMaxFactory {
public:
explicit ArgMinMaxFactory(const Node& node);
virtual ~ArgMinMaxFactory() = default;
/// \brief Creates ArgMax ONNX operation.
/// \return Sub-graph representing ArgMax op.
std::shared_ptr<ngraph::Node> make_arg_max() const;
/// \brief Creates ArgMin ONNX operation.
/// \return Sub-graph representing ArgMin op.
std::shared_ptr<ngraph::Node> make_arg_min() const;
private:
std::shared_ptr<ngraph::Node> make_topk_subgraph(default_opset::TopK::Mode mode) const;
const std::int64_t m_keep_dims;
Output<ngraph::Node> m_input_node;
std::int64_t m_axis;
std::int64_t m_select_last_index;
};
} // namespace utils
} // namespace onnx_import
} // namespace ngraph
| 441 |
1,667 |
<gh_stars>1000+
/**
* Copyright 2017 <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <string.h>
#include <sys/ptrace.h>
#include "ptrace.h"
#ifndef ZEND_EVAL
#define ZEND_EVAL (1<<0)
#define ZEND_INCLUDE (1<<1)
#define ZEND_INCLUDE_ONCE (1<<2)
#define ZEND_REQUIRE (1<<3)
#define ZEND_REQUIRE_ONCE (1<<4)
#endif
#ifndef ZEND_USER_CODE
/* A quick check (type == ZEND_USER_FUNCTION || type == ZEND_EVAL_CODE) */
#define ZEND_USER_CODE(type) ((type & 1) == 0)
#endif
static int ptrace_fetch_str(pid_t pid, void *addr, char *buf, size_t size)
{
char *p;
int i, j;
long val;
for (i = 0; i < size; addr += sizeof(long)) {
val = ptrace(PTRACE_PEEKDATA, pid, addr, NULL);
if (val == -1) {
return -1;
}
p = (char *) &val;
for (j = 0; j < sizeof(long) && i < size; j++) {
buf[i++] = *p++;
}
}
buf[i] = '\0';
return i;
}
static inline long ptrace_fetch_long(pid_t pid, void *addr)
{
/* some debug code here, just this... */
return ptrace(PTRACE_PEEKDATA, pid, (void *) addr, NULL);
}
/* quick ptrace fetch */
#define fetch_long(addr) ptrace_fetch_long(pid, (void *) addr)
#define fetch_int(addr) (int) (fetch_long(addr) & 0xFFFFFFFF)
#define fetch_ptr(addr) (void *) fetch_long(addr)
#define fetch_str(addr, buf, size) ptrace_fetch_str(pid, (void *) addr, buf, size)
#define fetch_zstr(addr, buf) fetch_str(addr + 24, buf, fetch_long(addr + 16))
/* TODO store presets in local file */
static pt_ptrace_preset_t presets[] = {
{70012, 1,/**/0, 40, 8, 48, 140, 144, 440,/**/480, 0, 56, 24, 32,/**/8, 16, 120, 8, 20, 24},
{50619, 0,/**/0, 40, 8, 48, 140, 144, 440,/**/1120, 0, 48, 8, 32,/**/8, 16, 152, 8, 32, 40},
{50533, 0,/**/0, 64, 8, 72, 156, 160, 448,/**/1120, 0, 48, 8, 32,/**/8, 16, 152, 8, 32, 40},
};
pt_ptrace_preset_t *pt_ptrace_preset(int version, void *addr_sapi_module,
void *addr_sapi_globals, void *addr_executor_globals)
{
int i;
for (i = 0; i < sizeof(presets); i++) {
if (version >= presets[i].version) {
presets[i].sapi_module = addr_sapi_module;
presets[i].sapi_globals = addr_sapi_globals;
presets[i].executor_globals = addr_executor_globals;
return &presets[i];
}
}
return NULL;
}
long pt_ptrace_attach(pid_t pid)
{
return ptrace(PTRACE_ATTACH, pid, NULL, NULL);
}
long pt_ptrace_detach(pid_t pid)
{
return ptrace(PTRACE_DETACH, pid, NULL, NULL);
}
void *pt_ptrace_fetch_current_ex(pt_ptrace_preset_t *preset, pid_t pid)
{
/* NULL means PHP is in-active */
return fetch_ptr(preset->executor_globals + preset->EG_current_execute_data);
}
int pt_ptrace_build_status(pt_status_t *status, pt_ptrace_preset_t *preset,
pid_t pid, void *addr_root_ex, int version_id)
{
int i;
void *addr_current_ex;
memset(status, 0, sizeof(pt_status_t));
/* version */
status->php_version = sdscatprintf(sdsempty(), "%d.%d.%d",
version_id / 10000, version_id % 10000 / 100, version_id % 100);
/* request */
pt_ptrace_build_request(&status->request, preset, pid);
/* calculate stack depth */
for (i = 0, addr_current_ex = addr_root_ex; addr_current_ex; i++) {
addr_current_ex = fetch_ptr(addr_current_ex + preset->EX_prev_execute_data); /* prev */
}
status->frame_count = i;
if (status->frame_count) {
status->frames = calloc(status->frame_count, sizeof(pt_frame_t));
for (i = 0, addr_current_ex = addr_root_ex; i < status->frame_count && addr_current_ex; i++) {
pt_ptrace_build_frame(status->frames + i, preset, pid, addr_current_ex);
addr_current_ex = fetch_ptr(addr_current_ex + preset->EX_prev_execute_data); /* prev */
}
} else {
status->frames = NULL;
}
return 0;
}
int pt_ptrace_build_request(pt_request_t *request, pt_ptrace_preset_t *preset,
pid_t pid)
{
int i;
long val;
void *addr;
static char buf[4096];
/* init */
memset(request, 0, sizeof(pt_request_t));
request->type = PT_FRAME_STACK;
/* sapi_module.name */
addr = fetch_ptr(preset->sapi_module + preset->SM_name);
if (addr != NULL) {
fetch_str(addr, buf, sizeof(buf));
request->sapi = sdsnew(buf);
}
/* sapi_globals request_info->path_translated */
addr = fetch_ptr(preset->sapi_globals + preset->SG_request_info_path_translated);
if (addr != NULL) {
fetch_str(addr, buf, sizeof(buf));
request->script = sdsnew(buf);
}
/* sapi_globals request_info->request_method */
addr = fetch_ptr(preset->sapi_globals + preset->SG_request_info_request_method);
if (addr != NULL) {
fetch_str(addr, buf, sizeof(buf));
request->method = sdsnew(buf);
}
/* sapi_globals request_info->request_uri */
addr = fetch_ptr(preset->sapi_globals + preset->SG_request_info_request_uri);
if (addr != NULL) {
fetch_str(addr, buf, sizeof(buf));
request->uri = sdsnew(buf);
}
/* argc, argv */
request->argc = fetch_int(preset->sapi_globals + preset->SG_request_info_argc);
if (request->argc) {
request->argv = calloc(request->argc, sizeof(sds));
addr = fetch_ptr(preset->sapi_globals + preset->SG_request_info_argv);
for (i = 0; i < request->argc; i++) {
fetch_str(addr + sizeof(char *) * i, buf, sizeof(buf));
request->argv[i] = sdsnew(buf);
}
}
/* sapi_globals global_request_time */
val = fetch_long(preset->sapi_globals + preset->SG_global_request_time);
request->time = (long) (*(double *) &val) * 1000000l;
return 0;
}
int pt_ptrace_build_frame(pt_frame_t *frame, pt_ptrace_preset_t *preset,
pid_t pid, void *addr_current_ex)
{
static char buf[4096];
memset(frame, 0, sizeof(pt_frame_t));
/* types, level */
frame->type = PT_FRAME_STACK;
/* TODO frame->functype = internal ? PT_FUNC_INTERNAL : 0x00; */
frame->level = 1;
/* args init */
frame->arg_count = 0;
frame->args = NULL;
void *addr_prev_ex = fetch_ptr(addr_current_ex + preset->EX_prev_execute_data);
void *addr_ex_func = fetch_ptr(addr_current_ex + preset->EX_func);
void *addr_ex_func_name = fetch_ptr(addr_ex_func + preset->EX_FUNC_name);
void *addr_ex_func_scope = fetch_ptr(addr_ex_func + preset->EX_FUNC_scope);
void *addr_ex_This = fetch_ptr(addr_current_ex + preset->EX_This);
void *addr_caller_ex = addr_prev_ex ? addr_prev_ex : addr_current_ex;
void *addr_caller_ex_opline = fetch_ptr(addr_caller_ex + preset->EX_opline);
void *addr_caller_ex_func = fetch_ptr(addr_caller_ex + preset->EX_func);
void *addr_caller_ex_func_oparray_filename = fetch_ptr(addr_caller_ex_func +
preset->EX_FUNC_oparray_filename);
/* names */
if (addr_ex_func_name) {
/* function name */
if (preset->has_zend_string) {
fetch_zstr(addr_ex_func_name, buf);
} else {
fetch_str(addr_ex_func_name, buf, sizeof(buf));
}
frame->function = sdsnew(buf);
/* functype, class name */
if (addr_ex_func_scope) {
void *addr_ex_func_scope_name = fetch_ptr(addr_ex_func_scope + preset->EX_FUNC_SCOPE_name);
if (preset->has_zend_string) {
fetch_zstr(addr_ex_func_scope_name, buf);
} else {
fetch_str(addr_ex_func_scope_name, buf, sizeof(buf));
}
frame->class = sdsnew(buf);
frame->functype |= addr_ex_This ? PT_FUNC_MEMBER : PT_FUNC_STATIC;
} else {
frame->functype |= PT_FUNC_NORMAL;
}
} else {
long ev = 0;
if (addr_caller_ex && addr_caller_ex_opline) {
ev = fetch_long(addr_caller_ex_opline + preset->EX_OPLINE_extended_value);
}
/* special user function */
switch (ev) {
case ZEND_INCLUDE_ONCE:
frame->functype |= PT_FUNC_INCLUDE_ONCE;
frame->function = "include_once";
break;
case ZEND_REQUIRE_ONCE:
frame->functype |= PT_FUNC_REQUIRE_ONCE;
frame->function = "require_once";
break;
case ZEND_INCLUDE:
frame->functype |= PT_FUNC_INCLUDE;
frame->function = "include";
break;
case ZEND_REQUIRE:
frame->functype |= PT_FUNC_REQUIRE;
frame->function = "require";
break;
case ZEND_EVAL:
frame->functype |= PT_FUNC_EVAL;
frame->function = "{eval}";
break;
default:
/* should be function main */
frame->functype |= PT_FUNC_NORMAL;
frame->function = "{main}";
break;
}
frame->function = sdsnew(frame->function);
}
/* lineno */
if (addr_caller_ex && addr_caller_ex_opline) {
frame->lineno = fetch_int(addr_caller_ex_opline + preset->EX_OPLINE_lineno);
} else {
frame->lineno = 0;
}
/* filename */
unsigned char caller_func_type = fetch_long(addr_caller_ex_func + 0) & 0xFF;
if (addr_caller_ex_func_oparray_filename && ZEND_USER_CODE(caller_func_type)) {
if (preset->has_zend_string) {
fetch_zstr(addr_caller_ex_func_oparray_filename, buf);
} else {
fetch_str(addr_caller_ex_func_oparray_filename, buf, sizeof(buf));
}
frame->filename = sdsnew(buf);
} else {
frame->filename = NULL;
}
return 0;
}
| 4,819 |
4,901 |
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package libcore.libcore.util;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import junit.framework.TestCase;
import static libcore.util.HexEncoding.decode;
import static libcore.util.HexEncoding.encode;
public class HexEncodingTest extends TestCase {
public void testEncode() {
final byte[] avocados = "avocados".getBytes(StandardCharsets.UTF_8);
assertArraysEqual("61766F6361646F73".toCharArray(), encode(avocados));
assertArraysEqual(avocados, decode(encode(avocados), false));
// Make sure we can handle lower case hex encodings as well.
assertArraysEqual(avocados, decode("61766f6361646f73".toCharArray(), false));
}
public void testDecode_allow4Bit() {
assertArraysEqual(new byte[] { 6 }, decode("6".toCharArray(), true));
assertArraysEqual(new byte[] { 6, 'v' }, decode("676".toCharArray(), true));
}
public void testDecode_disallow4Bit() {
try {
decode("676".toCharArray(), false);
fail();
} catch (IllegalArgumentException expected) {
}
}
public void testDecode_invalid() {
try {
decode("DEADBARD".toCharArray(), false);
fail();
} catch (IllegalArgumentException expected) {
}
// This demonstrates a difference in behaviour from apache commons : apache
// commons uses Character.isDigit and would successfully decode a string with
// arabic and devanagari characters.
try {
decode("६१٧٥٥F6361646F73".toCharArray(), false);
fail();
} catch (IllegalArgumentException expected) {
}
try {
decode("#%6361646F73".toCharArray(), false);
fail();
} catch (IllegalArgumentException expected) {
}
}
private static void assertArraysEqual(char[] lhs, char[] rhs) {
assertEquals(new String(lhs), new String(rhs));
}
private static void assertArraysEqual(byte[] lhs, byte[] rhs) {
assertEquals(Arrays.toString(lhs), Arrays.toString(rhs));
}
}
| 876 |
461 |
#!/usr/bin/env python3
import json
import warnings
import click
from ipwhois import IPWhois
@click.command()
@click.argument('ip')
@click.option('--json', 'json_output', is_flag=True, default=False, help='Print the output in JSON format')
@click.option('--csv', 'csv_output', is_flag=True, default=False, help='Print the output in CSV format')
def cmd_whois_ip(ip, json_output, csv_output):
"""Simple whois client to check IP addresses (IPv4 and IPv6).
Example:
\b
$ habu.whois.ip 8.8.4.4
asn 15169
asn_registry arin
asn_cidr 8.8.4.0/24
asn_country_code US
asn_description GOOGLE - Google LLC, US
asn_date 1992-12-01
"""
warnings.filterwarnings("ignore")
default_fields = [
'asn',
'asn_registry',
'asn_cidr',
'asn_country_code',
'asn_description',
'asn_date',
]
obj = IPWhois(ip)
data = obj.lookup_rdap()
if json_output:
print(json.dumps(data, indent=4))
return True
for field in default_fields:
value = data.get(field, None)
if not field:
continue
if csv_output:
print('"{}","whois.{}","{}"'.format(ip, field, value))
else:
print('{:<25}{}'.format(field, value))
if __name__ == '__main__':
cmd_whois_ip()
| 691 |
1,766 |
<reponame>aalonsog/licode
/*
* hsam.cpp
*/
#include <stdio.h>
#include <OneToManyProcessor.h>
#include <SdpInfo.h>
#include <WebRtcConnection.h>
#include <LibNiceConnection.h>
#include "Test.h"
#include "pc/Observer.h"
using namespace erizo;
int publisherid = 0;
int main() {
new Test();
// SDPReceiver* receiver = new SDPReceiver();
// Observer *subscriber = new Observer("subscriber", receiver);
// new Observer("publisher", receiver);
// subscriber->wait();
// return 0;
}
SDPReceiver::SDPReceiver() {
muxer = new erizo::OneToManyProcessor();
}
bool SDPReceiver::createPublisher(int peer_id) {
if (muxer->publisher == NULL) {
printf("Adding publisher peer_id %d\n", peer_id);
WebRtcConnection *newConn = new WebRtcConnection;
newConn->init();
newConn->setAudioReceiver(muxer);
newConn->setVideoReceiver(muxer);
muxer->setPublisher(newConn);
publisherid = peer_id;
} else {
printf("PUBLISHER ALREADY SET\n");
return false;
}
return true;
}
bool SDPReceiver::createSubscriber(int peer_id) {
printf("Adding Subscriber peerid %d\n", peer_id);
if (muxer->subscribers.find(peer_id) != muxer->subscribers.end()) {
printf("OFFER AGAIN\n");
return false;
}
WebRtcConnection *newConn = new WebRtcConnection;
newConn->init();
muxer->addSubscriber(newConn, peer_id);
return true;
}
void SDPReceiver::setRemoteSDP(int peer_id, const std::string &sdp) {
if (peer_id == publisherid) {
muxer->publisher->setRemoteSdp(sdp);
} else {
muxer->subscribers[peer_id]->setRemoteSdp(sdp);
}
}
std::string SDPReceiver::getLocalSDP(int peer_id) {
std::string sdp;
if (peer_id == publisherid) {
sdp = muxer->publisher->getLocalSdp();
} else {
sdp = muxer->subscribers[peer_id]->getLocalSdp();
}
printf("Getting localSDP %s\n", sdp.c_str());
return sdp;
}
void SDPReceiver::peerDisconnected(int peer_id) {
if (peer_id != publisherid) {
printf("removing peer %d\n", peer_id);
muxer->removeSubscriber(peer_id);
}
}
| 793 |
1,350 |
<reponame>Shashi-rk/azure-sdk-for-java<gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.security.keyvault.keys.implementation;
public class ByteExtensions {
/**
* Creates a copy of the source array.
* @param source The Array to make copy of
* @return A copy of the array, or null if source was null.
*/
public static byte[] clone(byte[] source) {
if (source == null) {
return null;
}
byte[] copy = new byte[source.length];
System.arraycopy(source, 0, copy, 0, source.length);
return copy;
}
}
| 253 |
1,444 |
package mage.cards.m;
import java.util.UUID;
import mage.MageInt;
import mage.abilities.Ability;
import mage.abilities.common.EntersBattlefieldTriggeredAbility;
import mage.abilities.effects.OneShotEffect;
import mage.cards.Card;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.SubType;
import mage.constants.Outcome;
import mage.constants.Zone;
import mage.filter.common.FilterCreatureCard;
import mage.filter.predicate.card.OwnerIdPredicate;
import mage.game.Game;
import mage.players.Player;
import mage.target.Target;
import mage.target.common.TargetCardInGraveyard;
import mage.target.common.TargetOpponent;
/**
*
* @author LevelX2
*/
public final class MausoleumTurnkey extends CardImpl {
public MausoleumTurnkey(UUID ownerId, CardSetInfo setInfo) {
super(ownerId, setInfo, new CardType[]{CardType.CREATURE}, "{3}{B}");
this.subtype.add(SubType.OGRE);
this.subtype.add(SubType.ROGUE);
this.power = new MageInt(3);
this.toughness = new MageInt(2);
// When Mausoleum Turnkey enters the battlefield, return target creature card of an opponent's choice from your graveyard to your hand.
this.addAbility(new EntersBattlefieldTriggeredAbility(new MausoleumTurnkeyEffect(), false));
}
private MausoleumTurnkey(final MausoleumTurnkey card) {
super(card);
}
@Override
public MausoleumTurnkey copy() {
return new MausoleumTurnkey(this);
}
}
class MausoleumTurnkeyEffect extends OneShotEffect {
public MausoleumTurnkeyEffect() {
super(Outcome.Benefit);
this.staticText = "return target creature card of an opponent's choice from your graveyard to your hand";
}
public MausoleumTurnkeyEffect(final MausoleumTurnkeyEffect effect) {
super(effect);
}
@Override
public MausoleumTurnkeyEffect copy() {
return new MausoleumTurnkeyEffect(this);
}
@Override
public boolean apply(Game game, Ability source) {
Player controller = game.getPlayer(source.getControllerId());
if (controller != null) {
UUID opponentId = null;
if (game.getOpponents(controller.getId()).size() > 1) {
Target target = new TargetOpponent(true);
if (controller.chooseTarget(outcome, target, source, game)) {
opponentId = target.getFirstTarget();
}
} else {
opponentId = game.getOpponents(controller.getId()).iterator().next();
}
if (opponentId != null) {
Player opponent = game.getPlayer(opponentId);
if (opponent != null) {
FilterCreatureCard filter = new FilterCreatureCard("creature card from " + controller.getLogName() + " graveyard");
filter.add(new OwnerIdPredicate(controller.getId()));
Target target = new TargetCardInGraveyard(filter);
opponent.chooseTarget(outcome, target, source, game);
Card card = game.getCard(target.getFirstTarget());
if (card != null) {
controller.moveCards(card, Zone.HAND, source, game);
}
}
}
return true;
}
return false;
}
}
| 1,399 |
451 |
import os
import re
import requests.exceptions as req_exc
from libs import jenkinslib, quik
from .BasePlugin import BasePlugin
class RunCommand(BasePlugin):
"""Class for managing RunCommand SubCommand"""
def __init__(self, args):
super().__init__(args)
loader = quik.FileLoader(os.path.join("data", "groovy"))
cmd_template = loader.load_template("run_command_template.groovy")
cmd = cmd_template.render(
{"command": self.args.system_command.replace("\\", "\\\\").replace('"', '\\"')}
)
try:
cred = self.args.credentials[0]
server = self._get_jenkins_server(cred)
result = server.execute_script(cmd, not self.args.no_wait, node=self.args.node)
if result:
result = re.sub(r"[\r\n][\r\n]{2,}", "\n\n", result).strip()
print(result)
except jenkinslib.JenkinsException as ex:
if "[403]" in str(ex).split("\n")[0]:
self.logging.fatal(
"%s authentication failed or not an admin with script privileges",
self._get_username(cred),
)
else:
self.logging.fatal(
"Unable to access Jenkins at: %s With User: %s For Reason:\n\t%s"
% (
(
self.server_url.netloc
if len(self.server_url.netloc) > 0
else self.args.server
),
self._get_username(cred),
str(ex).split("\n")[0],
)
)
except (req_exc.SSLError, req_exc.ConnectionError):
self.logging.fatal(
"Unable to connect to: "
+ (self.server_url.netloc if len(self.server_url.netloc) > 0 else self.args.server)
)
except Exception:
self.logging.exception("")
exit(1)
class RunCommandParser:
def cmd_RunCommand(self):
"""Handles parsing of RunCommand Subcommand arguments"""
self._create_contextual_parser(
"RunCommand", "Run System Command on Jenkins via Jenkins Console"
)
self._add_common_arg_parsers()
self.parser.add_argument(
"-x",
"--no_wait",
help="Do not wait for Output",
action="store_true",
dest="no_wait",
required=False,
)
self.parser.add_argument(
"-N",
"--node",
metavar="<Node>",
help='Node (Slave) to execute against. Executes against "master" if not specified.',
action="store",
dest="node",
required=False,
)
self.parser.add_argument(
metavar="<System Command>",
help="System Command To Run",
action="store",
dest="system_command",
)
args = self.parser.parse_args()
self._validate_server_url(args)
self._validate_timeout_number(args)
self._validate_output_file(args)
return self._handle_authentication(args)
| 1,683 |
892 |
<reponame>westonsteimel/advisory-database-github<filename>advisories/unreviewed/2022/02/GHSA-xq5w-p74r-82cv/GHSA-xq5w-p74r-82cv.json
{
"schema_version": "1.2.0",
"id": "GHSA-xq5w-p74r-82cv",
"modified": "2022-02-15T00:03:02Z",
"published": "2022-02-10T00:00:20Z",
"aliases": [
"CVE-2021-36302"
],
"details": "All Dell EMC Integrated System for Microsoft Azure Stack Hub versions contain a privilege escalation vulnerability. A remote malicious user with standard level JEA credentials may potentially exploit this vulnerability to elevate privileges and take over the system.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2021-36302"
},
{
"type": "WEB",
"url": "https://www.dell.com/support/kbdoc/en-us/000191165/dsa-2021-178-dell-emc-integrated-solution-for-microsoft-azure-stack-hub-security-update-for-a-just-enough-administration-jea-vulnerability"
}
],
"database_specific": {
"cwe_ids": [
"CWE-269"
],
"severity": "CRITICAL",
"github_reviewed": false
}
}
| 470 |
2,112 |
from flask import Flask,jsonify
app = Flask(__name__)
@app.route("/")
def hello():
return jsonify([
{"from": "<<EMAIL>>", "subject": "lunch at noon tomorrow"},
{"from": "<<EMAIL>>", "subject": "compiler docs"}])
if __name__ == "__main__":
app.run(host='0.0.0.0')
| 121 |
889 |
<filename>lightnlp/we/skip_gram/tool.py
import torch
from torchtext.data import Field, Iterator
from torchtext.vocab import Vectors
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score
import jieba
from ...base.tool import Tool
from ...utils.log import logger
from .config import DEVICE, DEFAULT_CONFIG
from .utils.dataset import SkipGramDataset
seed = 2019
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
WORD = Field(tokenize=lambda x: [x], batch_first=True)
Fields = [
('context', WORD),
('target', WORD)
]
def default_tokenize(sentence):
return list(jieba.cut(sentence))
class SkipGramTool(Tool):
def get_dataset(self, path: str, fields=Fields):
logger.info('loading dataset from {}'.format(path))
cbow_dataset = SkipGramDataset(path, fields)
logger.info('succeed loading dataset')
return cbow_dataset
def get_vocab(self, *dataset):
logger.info('building word vocab...')
WORD.build_vocab(*dataset)
logger.info('succeed building word vocab')
return WORD.vocab
def get_vectors(self, path: str):
logger.info('loading vectors from {}'.format(path))
vectors = Vectors(path)
logger.info('succeed loading vectors')
return vectors
def get_iterator(self, dataset, batch_size=DEFAULT_CONFIG['batch_size'], device=DEVICE,
sort_key=lambda x: len(x.text)):
return Iterator(dataset, batch_size=batch_size, device=device, sort_key=sort_key)
def get_score(self, model, texts, labels, score_type='f1'):
metrics_map = {
'f1': f1_score,
'p': precision_score,
'r': recall_score,
'acc': accuracy_score
}
metric_func = metrics_map[score_type] if score_type in metrics_map else metrics_map['f1']
assert texts.size(0) == len(labels)
vec_predict = model(texts)
soft_predict = torch.softmax(vec_predict, dim=1)
predict_prob, predict_index = torch.max(soft_predict.cpu().data, dim=1)
labels = labels.view(-1).cpu().data.numpy()
return metric_func(predict_index, labels, average='micro')
skip_gram_tool = SkipGramTool()
| 952 |
2,415 |
package com.alexvasilkov.gestures.sample.base.settings;
import com.alexvasilkov.gestures.views.interfaces.GestureView;
public interface SettingsController {
void apply(GestureView view);
}
| 65 |
544 |
import numpy as np
from ..base import BaseSKI
from tods.data_processing.SKImputer import SKImputerPrimitive
class SKImputerSKI(BaseSKI):
def __init__(self, **hyperparams):
super().__init__(primitive=SKImputerPrimitive, **hyperparams)
self.fit_available = True
self.predict_available = False
self.produce_available = True
| 113 |
10,876 |
{
"name": "pagmo2",
"version": "2.16.1",
"description": "A C++ platform to perform parallel computations of optimisation tasks (global and local) via the asynchronous generalized island model.",
"homepage": "https://esa.github.io/pagmo2/",
"dependencies": [
"boost-any",
"boost-graph",
"boost-serialization",
"eigen3",
"tbb"
],
"features": {
"nlopt": {
"description": "Enable the NLopt wrappers",
"dependencies": [
"nlopt"
]
}
}
}
| 204 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.