max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
385 | <gh_stars>100-1000
from ._linalg import complete_basis, direct_sum, orthonormalize
from ._normalize_activation import moment, normalize2mom
from ._soft_unit_step import soft_unit_step
from ._soft_one_hot_linspace import soft_one_hot_linspace
from ._reduce import germinate_formulas, reduce_permutation
__all__ = [
"complete_basis",
"direct_sum",
"orthonormalize",
"moment",
"normalize2mom",
"soft_unit_step",
"soft_one_hot_linspace",
"germinate_formulas",
"reduce_permutation"
]
| 198 |
779 | from __future__ import absolute_import
import operator
from cytoolz.functoolz import curry, num_required_args, has_keywords
def should_curry(f):
num = num_required_args(f)
return num is None or num > 1 or num == 1 and has_keywords(f) is not False
locals().update(
dict((name, curry(f) if should_curry(f) else f)
for name, f in vars(operator).items() if callable(f)),
)
# Clean up the namespace.
del curry
del num_required_args
del has_keywords
del operator
del should_curry
| 180 |
2,092 | package com.demo.widget.adapter;
import android.graphics.Color;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import com.chad.library.adapter.base.BaseViewHolder;
import com.demo.widget.R;
import java.util.Random;
/**
* @author wenshi
* @github
* @Description
* @since 2019/6/28
*/
public class StackLayoutMangerAdapter extends RecyclerView.Adapter<RecyclerView.ViewHolder> {
String[] titles;
OnItemClickListener clickListener;
public StackLayoutMangerAdapter(String[] titles, OnItemClickListener listener) {
this.titles = titles;
this.clickListener = listener;
}
@Override
public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
View view = LayoutInflater.from(parent.getContext()).inflate(R.layout.item_stack_card,
parent, false);
view.setBackgroundColor(Color.argb(255,
new Random().nextInt(255),
new Random().nextInt(255),
new Random().nextInt(255)));
BaseViewHolder holder = new BaseViewHolder(view);
return holder;
}
@Override
public void onBindViewHolder(RecyclerView.ViewHolder viewHolder, final int position) {
((TextView) viewHolder.itemView.findViewById(R.id.tv)).setText("" + titles[position]);
viewHolder.itemView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (clickListener != null) {
clickListener.onClick(v, position);
}
}
});
}
@Override
public int getItemCount() {
return titles.length;
}
public interface OnItemClickListener {
void onClick(View view, int position);
}
}
| 776 |
1,079 | // Copyright 2004-present Facebook. All Rights Reserved.
#include <stdio.h>
#include <string.h>
#include <JavaScriptCore/JavaScript.h>
#include <JavaScriptCore/API/JSProfilerPrivate.h>
#include "JSCHelpers.h"
#include "Value.h"
#ifdef WITH_FB_MEMORY_PROFILING
static JSValueRef nativeEnableAllocationTag(
JSContextRef ctx,
JSObjectRef function,
JSObjectRef thisObject,
size_t argumentCount,
const JSValueRef arguments[],
JSValueRef* exception) {
if (argumentCount < 1) {
if (exception) {
*exception = facebook::react::makeJSCException(
ctx,
"nativeEnableAllocationTag requires a single boolean argument");
}
return JSValueMakeUndefined(ctx);
}
JSEnableAllocationTag(ctx, JSValueToBoolean(ctx, arguments[0]));
return JSValueMakeUndefined(ctx);
}
static JSValueRef nativeAllocationPushTag(
JSContextRef ctx,
JSObjectRef function,
JSObjectRef thisObject,
size_t argumentCount,
const JSValueRef arguments[],
JSValueRef* exception) {
std::string marker;
if (argumentCount < 1) {
if (exception) {
*exception = facebook::react::makeJSCException(
ctx,
"nativeAllocationPushTag requires at least 1 argument");
}
return JSValueMakeUndefined(ctx);
}
JSStringRef tag = JSValueToStringCopy(ctx, arguments[0], exception);
JSPushAllocationTag(ctx, facebook::react::String::ref(tag).str().c_str());
JSStringRelease(tag);
return JSValueMakeUndefined(ctx);
}
static JSValueRef nativeAllocationPopTag(
JSContextRef ctx,
JSObjectRef function,
JSObjectRef thisObject,
size_t argumentCount,
const JSValueRef arguments[],
JSValueRef* exception) {
JSPopAllocationTag(ctx);
return JSValueMakeUndefined(ctx);
}
static JSValueRef nativeForceSyncGC(
JSContextRef ctx,
JSObjectRef function,
JSObjectRef thisObject,
size_t argumentCount,
const JSValueRef arguments[],
JSValueRef* exception) {
JSSynchronousGarbageCollectForDebugging(ctx);
return JSValueMakeUndefined(ctx);
}
static JSValueRef nativeCaptureStart(
JSContextRef ctx,
JSObjectRef function,
JSObjectRef thisObject,
size_t argumentCount,
const JSValueRef arguments[],
JSValueRef* exception) {
if (argumentCount < 1) {
if (exception) {
*exception = facebook::react::makeJSCException(
ctx,
"nativeCaptureStart requires at least 1 argument");
}
return JSValueMakeUndefined(ctx);
}
JSStringRef outputFilename = JSValueToStringCopy(ctx, arguments[0], exception);
std::string finalFilename =
std::string("/sdcard/") +
facebook::react::String::ref(outputFilename).str();
JSHeapCaptureStart(ctx, finalFilename.c_str());
JSStringRelease(outputFilename);
return JSValueMakeUndefined(ctx);
}
static JSValueRef nativeCaptureEnd(
JSContextRef ctx,
JSObjectRef function,
JSObjectRef thisObject,
size_t argumentCount,
const JSValueRef arguments[],
JSValueRef* exception) {
JSHeapCaptureEnd(ctx);
return JSValueMakeUndefined(ctx);
}
static JSValueRef nativeHeapDump(
JSContextRef ctx,
JSObjectRef function,
JSObjectRef thisObject,
size_t argumentCount,
const JSValueRef arguments[],
JSValueRef* exception) {
if (argumentCount < 1) {
if (exception) {
*exception = facebook::react::makeJSCException(
ctx,
"nativeHeapDump requires at least 1 argument");
}
return JSValueMakeUndefined(ctx);
}
JSStringRef outputFilename = JSValueToStringCopy(ctx, arguments[0], exception);
std::string finalFilename =
std::string("/sdcard/") +
facebook::react::String::ref(outputFilename).str();
JSHeapDump(ctx, finalFilename.c_str());
JSStringRelease(outputFilename);
return JSValueMakeUndefined(ctx);
}
#endif
namespace facebook {
namespace react {
void addNativeMemoryHooks(JSGlobalContextRef ctx) {
#ifdef WITH_FB_MEMORY_PROFILING
installGlobalFunction(ctx, "nativeEnableAllocationTag", nativeEnableAllocationTag);
installGlobalFunction(ctx, "nativeAllocationPushTag", nativeAllocationPushTag);
installGlobalFunction(ctx, "nativeAllocationPopTag", nativeAllocationPopTag);
installGlobalFunction(ctx, "nativeForceSyncGC", nativeForceSyncGC);
installGlobalFunction(ctx, "nativeCaptureStart", nativeCaptureStart);
installGlobalFunction(ctx, "nativeCaptureEnd", nativeCaptureEnd);
installGlobalFunction(ctx, "nativeHeapDump", nativeHeapDump);
#endif
}
} }
| 1,544 |
560 | /*
* Copyright (c) 2016 <NAME> <<EMAIL>>
* All Rights Reserved.
*/
package me.zhanghai.android.douya.broadcast.content;
import android.os.Bundle;
import androidx.fragment.app.Fragment;
import androidx.fragment.app.FragmentActivity;
import org.greenrobot.eventbus.Subscribe;
import org.greenrobot.eventbus.ThreadMode;
import java.util.Collections;
import java.util.List;
import me.zhanghai.android.douya.content.MoreRawListResourceFragment;
import me.zhanghai.android.douya.eventbus.BroadcastDeletedEvent;
import me.zhanghai.android.douya.eventbus.BroadcastUpdatedEvent;
import me.zhanghai.android.douya.eventbus.BroadcastWriteFinishedEvent;
import me.zhanghai.android.douya.eventbus.BroadcastWriteStartedEvent;
import me.zhanghai.android.douya.eventbus.EventBusUtils;
import me.zhanghai.android.douya.network.api.ApiError;
import me.zhanghai.android.douya.network.api.ApiRequest;
import me.zhanghai.android.douya.network.api.ApiService;
import me.zhanghai.android.douya.network.api.info.frodo.Broadcast;
import me.zhanghai.android.douya.network.api.info.frodo.TimelineList;
import me.zhanghai.android.douya.util.FragmentUtils;
public class TimelineBroadcastListResource
extends MoreRawListResourceFragment<TimelineList, Broadcast> {
// Not static because we are to be subclassed.
private final String KEY_PREFIX = getClass().getName() + '.';
private final String EXTRA_USER_ID_OR_UID = KEY_PREFIX + "user_id_or_uid";
private final String EXTRA_TOPIC = KEY_PREFIX + "topic";
private String mUserIdOrUid;
private String mTopic;
private static final String FRAGMENT_TAG_DEFAULT =
TimelineBroadcastListResource.class.getName();
private static TimelineBroadcastListResource newInstance(String userIdOrUid, String topic) {
//noinspection deprecation
return new TimelineBroadcastListResource().setArguments(userIdOrUid, topic);
}
public static TimelineBroadcastListResource attachTo(String userIdOrUid, String topic,
Fragment fragment, String tag,
int requestCode) {
FragmentActivity activity = fragment.getActivity();
TimelineBroadcastListResource instance = FragmentUtils.findByTag(activity, tag);
if (instance == null) {
instance = newInstance(userIdOrUid, topic);
FragmentUtils.add(instance, activity, tag);
}
instance.setTarget(fragment, requestCode);
return instance;
}
public static TimelineBroadcastListResource attachTo(String userIdOrUid, String topic,
Fragment fragment) {
return attachTo(userIdOrUid, topic, fragment, FRAGMENT_TAG_DEFAULT, REQUEST_CODE_INVALID);
}
/**
* @deprecated Use {@code attachTo()} instead.
*/
public TimelineBroadcastListResource() {}
protected TimelineBroadcastListResource setArguments(String userIdOrUid, String topic) {
FragmentUtils.getArgumentsBuilder(this)
.putString(EXTRA_USER_ID_OR_UID, userIdOrUid)
.putString(EXTRA_TOPIC, topic);
return this;
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
Bundle arguments = getArguments();
mUserIdOrUid = arguments.getString(EXTRA_USER_ID_OR_UID);
mTopic = arguments.getString(EXTRA_TOPIC);
}
@Override
protected ApiRequest<TimelineList> onCreateRequest(boolean more, int count) {
Long untilId = null;
if (more && has()) {
List<Broadcast> broadcastList = get();
int size = broadcastList.size();
if (size > 0) {
untilId = broadcastList.get(size - 1).id;
}
}
return ApiService.getInstance().getTimelineList(mUserIdOrUid, mTopic, untilId, count);
}
@Override
protected ApiRequest<TimelineList> onCreateRequest(Integer start, Integer count) {
throw new UnsupportedOperationException();
}
@Override
protected void onLoadStarted() {
getListener().onLoadBroadcastListStarted(getRequestCode());
}
@Override
protected void onLoadFinished(boolean more, int count, boolean successful,
TimelineList response, ApiError error) {
onLoadFinished(more, count, successful, successful ? response.toBroadcastList() : null,
error);
}
private void onLoadFinished(boolean more, int count, boolean successful,
List<Broadcast> response, ApiError error) {
if (successful) {
if (more) {
append(response);
getListener().onLoadBroadcastListFinished(getRequestCode());
getListener().onBroadcastListAppended(getRequestCode(),
Collections.unmodifiableList(response));
} else {
setAndNotifyListener(response, true);
}
for (Broadcast broadcast : response) {
EventBusUtils.postAsync(new BroadcastUpdatedEvent(broadcast, this));
}
// Frodo API is sometimes buggy that broadcast list size may not be count. In this case,
// we simply load more until no more broadcast is returned.
setCanLoadMore(count == 0 || response.size() > 0);
} else {
getListener().onLoadBroadcastListFinished(getRequestCode());
getListener().onLoadBroadcastListError(getRequestCode(), error);
}
}
protected void setAndNotifyListener(List<Broadcast> broadcastList, boolean notifyFinished) {
set(broadcastList);
if (notifyFinished) {
getListener().onLoadBroadcastListFinished(getRequestCode());
}
getListener().onBroadcastListChanged(getRequestCode(), Collections.unmodifiableList(get()));
}
@Subscribe(threadMode = ThreadMode.POSTING)
public void onBroadcastUpdated(BroadcastUpdatedEvent event) {
if (event.isFromMyself(this) || isEmpty()) {
return;
}
List<Broadcast> broadcastList = get();
for (int i = 0, size = broadcastList.size(); i < size; ++i) {
Broadcast updatedBroadcast = event.update(broadcastList.get(i), this);
if (updatedBroadcast != null) {
broadcastList.set(i, updatedBroadcast);
getListener().onBroadcastChanged(getRequestCode(), i, updatedBroadcast);
}
}
}
@Subscribe(threadMode = ThreadMode.POSTING)
public void onBroadcastDeleted(BroadcastDeletedEvent event) {
if (event.isFromMyself(this) || isEmpty()) {
return;
}
List<Broadcast> broadcastList = get();
for (int i = 0, size = broadcastList.size(); i < size; ) {
Broadcast broadcast = broadcastList.get(i);
if (broadcast.id == event.broadcastId) {
broadcastList.remove(i);
getListener().onBroadcastRemoved(getRequestCode(), i);
--size;
} else {
if (broadcast.parentBroadcast != null
&& broadcast.parentBroadcast.id == event.broadcastId) {
// Same behavior as Frodo API.
// FIXME: Won't reach here if another list shares this broadcast instance.
broadcast.parentBroadcast = null;
getListener().onBroadcastChanged(getRequestCode(), i, broadcast);
} else if (broadcast.rebroadcastedBroadcast != null
&& broadcast.rebroadcastedBroadcast.id == event.broadcastId) {
broadcast.rebroadcastedBroadcast.isDeleted = true;
getListener().onBroadcastChanged(getRequestCode(), i, broadcast);
}
++i;
}
}
}
@Subscribe(threadMode = ThreadMode.POSTING)
public void onBroadcastWriteStarted(BroadcastWriteStartedEvent event) {
if (event.isFromMyself(this) || isEmpty()) {
return;
}
List<Broadcast> broadcastList = get();
for (int i = 0, size = broadcastList.size(); i < size; ++i) {
Broadcast broadcast = broadcastList.get(i);
if (broadcast.getEffectiveBroadcastId() == event.broadcastId) {
getListener().onBroadcastWriteStarted(getRequestCode(), i);
}
}
}
@Subscribe(threadMode = ThreadMode.POSTING)
public void onBroadcastWriteFinished(BroadcastWriteFinishedEvent event) {
if (event.isFromMyself(this) || isEmpty()) {
return;
}
List<Broadcast> broadcastList = get();
for (int i = 0, size = broadcastList.size(); i < size; ++i) {
Broadcast broadcast = broadcastList.get(i);
if (broadcast.getEffectiveBroadcastId() == event.broadcastId) {
getListener().onBroadcastWriteFinished(getRequestCode(), i);
}
}
}
private Listener getListener() {
return (Listener) getTarget();
}
public interface Listener extends BaseBroadcastListResource.Listener {
void onBroadcastWriteStarted(int requestCode, int position);
void onBroadcastWriteFinished(int requestCode, int position);
}
}
| 3,974 |
764 | {
"symbol": "EXX Token",
"address": "0x7A271d1Df2C3f2FeF734611C6C7eE6b9B8439204",
"overview":{
"en": "ET is the abbreviation of EXX Token. On July 9, 2018, ET was officially issued, with a total amount of 10 billion, and will never be issued. ET is a proof of the rights of the EXX trading platform itself. It is based on the Ethereum ERC20 agreement and is the only global certificate of the EXX platform. As an important part of the EXX ecosystem, ET will be applied to the application environment of the EXX entity ecosystem in the future. ET holders share various rights such as community governance and are the cornerstone of the collaborative development of EXX community members. Holders enjoy the corresponding rights of ET's entire ecology and enjoy the scarcity of ET's continued repurchase and destruction.",
"zh": "ET为EXX Token的简称,2018年 7月9日,ET 正式对外发行,总量恒定 100 亿,永不增发。ET是 EXX 交易平台本身的权益证明,基于以太坊ERC20协议发行,是EXX平台唯一的全球通证。ET作为EXX生态中的重要组成部分,未来会应用到EXX实体生态的应用场景中。ET 持有者共同享有社区治理等各类权利,是EXX社区成员协同发展的基石。持有者享受ET全生态的相应权益,并得到生态子通证奖励,以及享受ET持续回购销毁带来的稀缺性提升。"
},
"website": "https://www.exx.com",
"whitepaper": "https://www.exxvip.com/src/EXX-cn.pdf",
"state": "NORMAL",
"published_on": "2018-07-09",
"initial_price":{
"USD":"0.14 USD"
},
"links": {
"weibo": "https://www.weibo.com/6409905840/profile?rightmod=1&wvr=6&mod=personinfo",
"twitter": "https://twitter.com/ExchangeXGroup",
"telegram": "t.me/EXXcom",
"facebook": "https://www.facebook.com/ExchangeXGroup/"
}
}
| 807 |
14,668 | // Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CRASHPAD_UTIL_MISC_SYMBOLIC_CONSTANTS_COMMON_H_
#define CRASHPAD_UTIL_MISC_SYMBOLIC_CONSTANTS_COMMON_H_
//! \file
//!
//! \anchor symbolic_constant_terminology
//! Symbolic constant terminology
//! =============================
//! <dl>
//! <dt>Family</dt>
//! <dd>A group of related symbolic constants. Typically, within a single
//! family, one function will be used to transform a numeric value to a
//! string equivalent, and another will perform the inverse operation.
//! Families include POSIX signals and Mach exception masks.</dd>
//! <dt>Full name</dt>
//! <dd>The normal symbolic name used for a constant. For example, in the
//! family of POSIX signals, the strings `"SIGHUP"` and `"SIGSEGV"` are
//! full names.</dd>
//! <dt>Short name</dt>
//! <dd>An abbreviated form of symbolic name used for a constant. Short names
//! vary between families, but are commonly constructed by removing a
//! common prefix from full names. For example, in the family of POSIX
//! signals, the prefix is `SIG`, and short names include `"HUP"` and
//! `"SEGV"`.</dd>
//! <dt>Numeric string</dt>
//! <dd>A string that does not contain a full or short name, but contains a
//! numeric value that can be interpreted as a symbolic constant. For
//! example, in the family of POSIX signals, `SIGKILL` generally has value
//! `9`, so the numeric string `"9"` would be interpreted equivalently to
//! `"SIGKILL"`.</dd>
//! </dl>
namespace crashpad {
//! \brief Options for various `*ToString` functions in `symbolic_constants_*`
//! files.
//!
//! \sa \ref symbolic_constant_terminology "Symbolic constant terminology"
enum SymbolicConstantToStringOptionBits {
//! \brief Return the full name for a given constant.
//!
//! \attention API consumers should provide this value when desired, but
//! should provide only one of kUseFullName and ::kUseShortName. Because
//! kUseFullName is valueless, implementers should check for the absence
//! of ::kUseShortName instead.
kUseFullName = 0 << 0,
//! \brief Return the short name for a given constant.
kUseShortName = 1 << 0,
//! \brief If no symbolic name is known for a given constant, return an empty
//! string.
//!
//! \attention API consumers should provide this value when desired, but
//! should provide only one of kUnknownIsEmpty and ::kUnknownIsNumeric.
//! Because kUnknownIsEmpty is valueless, implementers should check for
//! the absence of ::kUnknownIsNumeric instead.
kUnknownIsEmpty = 0 << 1,
//! \brief If no symbolic name is known for a given constant, return a numeric
//! string.
//!
//! The numeric format used will vary by family, but will be appropriate to
//! the family. Families whose values are typically constructed as bitfields
//! will generally use a hexadecimal format, and other families will generally
//! use a signed or unsigned decimal format.
kUnknownIsNumeric = 1 << 1,
//! \brief Use `|` to combine values in a bitfield.
//!
//! For families whose values may be constructed as bitfields, allow
//! conversion to strings containing multiple individual components treated as
//! being combined by a bitwise “or” operation. An example family of constants
//! that behaves this way is the suite of Mach exception masks. For constants
//! that are not constructed as bitfields, or constants that are only
//! partially constructed as bitfields, this option has no effect.
kUseOr = 1 << 2,
};
//! \brief A bitfield containing values of #SymbolicConstantToStringOptionBits.
using SymbolicConstantToStringOptions = unsigned int;
//! \brief Options for various `StringTo*` functions in `symbolic_constants_*`
//! files.
//!
//! Not every `StringTo*` function will implement each of these options. See
//! function-specific documentation for details.
//!
//! \sa \ref symbolic_constant_terminology "Symbolic constant terminology"
enum StringToSymbolicConstantOptionBits {
//! \brief Allow conversion from a string containing a symbolic constant by
//! its full name.
kAllowFullName = 1 << 0,
//! \brief Allow conversion from a string containing a symbolic constant by
//! its short name.
kAllowShortName = 1 << 1,
//! \brief Allow conversion from a numeric string.
kAllowNumber = 1 << 2,
//! \brief Allow `|` to combine values in a bitfield.
//!
//! For families whose values may be constructed as bitfields, allow
//! conversion of strings containing multiple individual components treated as
//! being combined by a bitwise “or” operation. An example family of constants
//! that behaves this way is the suite of Mach exception masks. For constants
//! that are not constructed as bitfields, or constants that are only
//! partially constructed as bitfields, this option has no effect.
kAllowOr = 1 << 3,
};
//! \brief A bitfield containing values of #StringToSymbolicConstantOptionBits.
using StringToSymbolicConstantOptions = unsigned int;
} // namespace crashpad
#endif // CRASHPAD_UTIL_MISC_SYMBOLIC_CONSTANTS_COMMON_H_
| 1,759 |
521 | /* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is the Python XPCOM language bindings.
*
* The Initial Developer of the Original Code is
* ActiveState Tool Corp.
* Portions created by the Initial Developer are Copyright (C) 2000
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* <NAME> <<EMAIL>> (original author)
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
//
// This code is part of the XPCOM extensions for Python.
//
// Written May 2000 by <NAME>.
//
// Based heavily on the Python COM support, which is
// (c) <NAME> and <NAME>.
//
// (c) 2000, ActiveState corp.
#include "PyXPCOM_std.h"
#include <nsIEnumerator.h>
static nsIEnumerator *GetI(PyObject *self) {
nsIID iid = NS_GET_IID(nsIEnumerator);
if (!Py_nsISupports::Check(self, iid)) {
PyErr_SetString(PyExc_TypeError, "This object is not the correct interface");
return NULL;
}
return (nsIEnumerator *)Py_nsISupports::GetI(self);
}
static PyObject *PyFirst(PyObject *self, PyObject *args)
{
if (!PyArg_ParseTuple(args, ":First"))
return NULL;
nsIEnumerator *pI = GetI(self);
if (pI==NULL)
return NULL;
nsresult r;
Py_BEGIN_ALLOW_THREADS;
r = pI->First();
Py_END_ALLOW_THREADS;
return PyInt_FromLong(r);
}
static PyObject *PyNext(PyObject *self, PyObject *args)
{
if (!PyArg_ParseTuple(args, ":Next"))
return NULL;
nsIEnumerator *pI = GetI(self);
if (pI==NULL)
return NULL;
nsresult r;
Py_BEGIN_ALLOW_THREADS;
r = pI->Next();
Py_END_ALLOW_THREADS;
return PyInt_FromLong(r);
}
static PyObject *PyCurrentItem(PyObject *self, PyObject *args)
{
PyObject *obIID = NULL;
if (!PyArg_ParseTuple(args, "|O:CurrentItem", &obIID))
return NULL;
nsIID iid(NS_GET_IID(nsISupports));
if (obIID != NULL && !Py_nsIID::IIDFromPyObject(obIID, &iid))
return NULL;
nsIEnumerator *pI = GetI(self);
if (pI==NULL)
return NULL;
nsISupports *pRet = nsnull;
nsresult r;
Py_BEGIN_ALLOW_THREADS;
r = pI->CurrentItem(&pRet);
Py_END_ALLOW_THREADS;
if ( NS_FAILED(r) )
return PyXPCOM_BuildPyException(r);
if (obIID) {
nsISupports *temp;
Py_BEGIN_ALLOW_THREADS;
r = pRet->QueryInterface(iid, (void **)&temp);
pRet->Release();
Py_END_ALLOW_THREADS;
if ( NS_FAILED(r) ) {
return PyXPCOM_BuildPyException(r);
}
pRet = temp;
}
PyObject *ret = Py_nsISupports::PyObjectFromInterface(pRet, iid);
NS_IF_RELEASE(pRet);
return ret;
}
// A method added for Python performance if you really need
// it. Allows you to fetch a block of objects in one
// hit, allowing the loop to remain implemented in C.
static PyObject *PyFetchBlock(PyObject *self, PyObject *args)
{
PyObject *obIID = NULL;
int n_wanted;
int n_fetched = 0;
if (!PyArg_ParseTuple(args, "i|O:FetchBlock", &n_wanted, &obIID))
return NULL;
nsIID iid(NS_GET_IID(nsISupports));
if (obIID != NULL && !Py_nsIID::IIDFromPyObject(obIID, &iid))
return NULL;
nsIEnumerator *pI = GetI(self);
if (pI==NULL)
return NULL;
// We want to fetch with the thread-lock released,
// but this means we can not append to the PyList
nsISupports **fetched = new nsISupports*[n_wanted];
if (fetched==nsnull) {
PyErr_NoMemory();
return NULL;
}
memset(fetched, 0, sizeof(nsISupports *) * n_wanted);
nsresult r = NS_OK;
Py_BEGIN_ALLOW_THREADS;
for (;n_fetched<n_wanted;) {
nsISupports *pNew;
r = pI->CurrentItem(&pNew);
if (NS_FAILED(r)) {
r = 0; // Normal enum end
break;
}
if (obIID) {
nsISupports *temp;
r = pNew->QueryInterface(iid, (void **)&temp);
pNew->Release();
if ( NS_FAILED(r) ) {
break;
}
pNew = temp;
}
fetched[n_fetched] = pNew;
n_fetched++; // must increment before breaking out.
if (NS_FAILED(pI->Next()))
break; // not an error condition.
}
Py_END_ALLOW_THREADS;
PyObject *ret;
if (NS_SUCCEEDED(r)) {
ret = PyList_New(n_fetched);
if (ret)
for (int i=0;i<n_fetched;i++) {
PyObject *new_ob = Py_nsISupports::PyObjectFromInterface(fetched[i], iid);
NS_IF_RELEASE(fetched[i]);
PyList_SET_ITEM(ret, i, new_ob);
}
} else
ret = PyXPCOM_BuildPyException(r);
if ( ret == NULL ) {
// Free the objects we consumed.
for (int i=0;i<n_fetched;i++)
fetched[i]->Release();
}
delete [] fetched;
return ret;
}
static PyObject *PyIsDone(PyObject *self, PyObject *args)
{
if (!PyArg_ParseTuple(args, ":IsDone"))
return NULL;
nsIEnumerator *pI = GetI(self);
nsresult r;
if (pI==NULL)
return NULL;
Py_BEGIN_ALLOW_THREADS;
r = pI->IsDone();
Py_END_ALLOW_THREADS;
if (NS_FAILED(r))
return PyXPCOM_BuildPyException(r);
PyObject *ret = r==NS_OK ? Py_True : Py_False;
Py_INCREF(ret);
return ret;
}
struct PyMethodDef
PyMethods_IEnumerator[] =
{
{ "First", PyFirst, 1},
{ "first", PyFirst, 1},
{ "Next", PyNext, 1},
{ "next", PyNext, 1},
{ "CurrentItem", PyCurrentItem, 1},
{ "currentItem", PyCurrentItem, 1},
{ "IsDone", PyIsDone, 1},
{ "isDone", PyIsDone, 1},
{ "FetchBlock", PyFetchBlock, 1},
{ "fetchBlock", PyFetchBlock, 1},
{NULL}
};
| 2,470 |
460 | <filename>trunk/win/Source/Includes/Boost/bimap/container_adaptor/vector_map_adaptor.hpp
// Boost.Bimap
//
// Copyright (c) 2006-2007 <NAME>
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/// \file container_adaptor/vector_map_adaptor.hpp
/// \brief Container adaptor.
#ifndef BOOST_BIMAP_CONTAINER_ADAPTOR_VECTOR_MAP_ADAPTOR_HPP
#define BOOST_BIMAP_CONTAINER_ADAPTOR_VECTOR_MAP_ADAPTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER>=1200)
#pragma once
#endif
#include <boost/config.hpp>
#include <boost/mpl/list.hpp>
#include <boost/mpl/push_front.hpp>
#include <boost/mpl/aux_/na.hpp>
#include <boost/bimap/container_adaptor/vector_adaptor.hpp>
#include <boost/bimap/container_adaptor/detail/identity_converters.hpp>
#include <boost/mpl/vector.hpp>
namespace boost {
namespace bimaps {
namespace container_adaptor {
/// \brief Container adaptor.
template
<
class Base,
class Iterator,
class ConstIterator,
class ReverseIterator,
class ConstReverseIterator,
class IteratorToBaseConverter = ::boost::mpl::na,
class IteratorFromBaseConverter = ::boost::mpl::na,
class ReverseIteratorFromBaseConverter = ::boost::mpl::na,
class ValueToBaseConverter = ::boost::mpl::na,
class ValueFromBaseConverter = ::boost::mpl::na,
class FunctorsFromDerivedClasses = mpl::vector<>
>
class vector_map_adaptor :
public vector_adaptor
<
Base,
Iterator, ConstIterator, ReverseIterator, ConstReverseIterator,
IteratorToBaseConverter, IteratorFromBaseConverter,
ReverseIteratorFromBaseConverter,
ValueToBaseConverter, ValueFromBaseConverter,
FunctorsFromDerivedClasses
>
{
typedef vector_adaptor
<
Base,
Iterator, ConstIterator, ReverseIterator, ConstReverseIterator,
IteratorToBaseConverter, IteratorFromBaseConverter,
ReverseIteratorFromBaseConverter,
ValueToBaseConverter, ValueFromBaseConverter,
FunctorsFromDerivedClasses
> base_;
// MetaData -------------------------------------------------------------
public:
typedef BOOST_DEDUCED_TYPENAME Iterator::value_type::first_type key_type;
typedef BOOST_DEDUCED_TYPENAME Iterator::value_type::second_type data_type;
// Access -----------------------------------------------------------------
public:
vector_map_adaptor() {}
explicit vector_map_adaptor(Base & c) :
base_(c) {}
protected:
typedef vector_map_adaptor vector_map_adaptor_;
};
} // namespace container_adaptor
} // namespace bimaps
} // namespace boost
#endif // BOOST_BIMAP_CONTAINER_ADAPTOR_VECTOR_MAP_ADAPTOR_HPP
| 1,207 |
479 | // Copyright (C) 2016 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.reviewdb.server;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.gerrit.reviewdb.client.Account;
import com.google.gerrit.reviewdb.client.Change;
import com.google.gerrit.reviewdb.client.ChangeMessage;
import com.google.gerrit.reviewdb.client.PatchLineComment;
import com.google.gerrit.reviewdb.client.PatchSet;
import com.google.gerrit.reviewdb.client.PatchSetApproval;
import com.google.gwtorm.server.Access;
import com.google.gwtorm.server.AtomicUpdate;
import com.google.gwtorm.server.OrmException;
import com.google.gwtorm.server.ResultSet;
import com.google.gwtorm.server.StatementExecutor;
import java.util.Map;
public class ReviewDbWrapper implements ReviewDb {
protected final ReviewDb delegate;
protected ReviewDbWrapper(ReviewDb delegate) {
this.delegate = checkNotNull(delegate);
}
@Override
public void commit() throws OrmException {
delegate.commit();
}
@Override
public void rollback() throws OrmException {
delegate.rollback();
}
@Override
public void updateSchema(StatementExecutor e) throws OrmException {
delegate.updateSchema(e);
}
@Override
public void pruneSchema(StatementExecutor e) throws OrmException {
delegate.pruneSchema(e);
}
@Override
public Access<?, ?>[] allRelations() {
return delegate.allRelations();
}
@Override
public void close() {
delegate.close();
}
@Override
public SchemaVersionAccess schemaVersion() {
return delegate.schemaVersion();
}
@Override
public SystemConfigAccess systemConfig() {
return delegate.systemConfig();
}
@Override
public AccountGroupAccess accountGroups() {
return delegate.accountGroups();
}
@Override
public AccountGroupNameAccess accountGroupNames() {
return delegate.accountGroupNames();
}
@Override
public AccountGroupMemberAccess accountGroupMembers() {
return delegate.accountGroupMembers();
}
@Override
public AccountGroupMemberAuditAccess accountGroupMembersAudit() {
return delegate.accountGroupMembersAudit();
}
@Override
public ChangeAccess changes() {
return delegate.changes();
}
@Override
public PatchSetApprovalAccess patchSetApprovals() {
return delegate.patchSetApprovals();
}
@Override
public ChangeMessageAccess changeMessages() {
return delegate.changeMessages();
}
@Override
public PatchSetAccess patchSets() {
return delegate.patchSets();
}
@Override
public PatchLineCommentAccess patchComments() {
return delegate.patchComments();
}
@Override
public AccountGroupByIdAccess accountGroupById() {
return delegate.accountGroupById();
}
@Override
public AccountGroupByIdAudAccess accountGroupByIdAud() {
return delegate.accountGroupByIdAud();
}
@Override
@SuppressWarnings("deprecation")
public int nextAccountId() throws OrmException {
return delegate.nextAccountId();
}
@Override
@SuppressWarnings("deprecation")
public int nextAccountGroupId() throws OrmException {
return delegate.nextAccountGroupId();
}
@Override
@SuppressWarnings("deprecation")
public int nextChangeId() throws OrmException {
return delegate.nextChangeId();
}
@Override
public boolean changesTablesEnabled() {
return delegate.changesTablesEnabled();
}
public static class ChangeAccessWrapper implements ChangeAccess {
protected final ChangeAccess delegate;
protected ChangeAccessWrapper(ChangeAccess delegate) {
this.delegate = checkNotNull(delegate);
}
@Override
public String getRelationName() {
return delegate.getRelationName();
}
@Override
public int getRelationID() {
return delegate.getRelationID();
}
@Override
public ResultSet<Change> iterateAllEntities() throws OrmException {
return delegate.iterateAllEntities();
}
@Override
public Change.Id primaryKey(Change entity) {
return delegate.primaryKey(entity);
}
@Override
public Map<Change.Id, Change> toMap(Iterable<Change> c) {
return delegate.toMap(c);
}
@SuppressWarnings("deprecation")
@Override
public com.google.common.util.concurrent.CheckedFuture<Change, OrmException> getAsync(
Change.Id key) {
return delegate.getAsync(key);
}
@Override
public ResultSet<Change> get(Iterable<Change.Id> keys) throws OrmException {
return delegate.get(keys);
}
@Override
public void insert(Iterable<Change> instances) throws OrmException {
delegate.insert(instances);
}
@Override
public void update(Iterable<Change> instances) throws OrmException {
delegate.update(instances);
}
@Override
public void upsert(Iterable<Change> instances) throws OrmException {
delegate.upsert(instances);
}
@Override
public void deleteKeys(Iterable<Change.Id> keys) throws OrmException {
delegate.deleteKeys(keys);
}
@Override
public void delete(Iterable<Change> instances) throws OrmException {
delegate.delete(instances);
}
@Override
public void beginTransaction(Change.Id key) throws OrmException {
delegate.beginTransaction(key);
}
@Override
public Change atomicUpdate(Change.Id key, AtomicUpdate<Change> update) throws OrmException {
return delegate.atomicUpdate(key, update);
}
@Override
public Change get(Change.Id id) throws OrmException {
return delegate.get(id);
}
@Override
public ResultSet<Change> all() throws OrmException {
return delegate.all();
}
}
public static class PatchSetApprovalAccessWrapper implements PatchSetApprovalAccess {
protected final PatchSetApprovalAccess delegate;
protected PatchSetApprovalAccessWrapper(PatchSetApprovalAccess delegate) {
this.delegate = delegate;
}
@Override
public String getRelationName() {
return delegate.getRelationName();
}
@Override
public int getRelationID() {
return delegate.getRelationID();
}
@Override
public ResultSet<PatchSetApproval> iterateAllEntities() throws OrmException {
return delegate.iterateAllEntities();
}
@Override
public PatchSetApproval.Key primaryKey(PatchSetApproval entity) {
return delegate.primaryKey(entity);
}
@Override
public Map<PatchSetApproval.Key, PatchSetApproval> toMap(Iterable<PatchSetApproval> c) {
return delegate.toMap(c);
}
@SuppressWarnings("deprecation")
@Override
public com.google.common.util.concurrent.CheckedFuture<PatchSetApproval, OrmException> getAsync(
PatchSetApproval.Key key) {
return delegate.getAsync(key);
}
@Override
public ResultSet<PatchSetApproval> get(Iterable<PatchSetApproval.Key> keys)
throws OrmException {
return delegate.get(keys);
}
@Override
public void insert(Iterable<PatchSetApproval> instances) throws OrmException {
delegate.insert(instances);
}
@Override
public void update(Iterable<PatchSetApproval> instances) throws OrmException {
delegate.update(instances);
}
@Override
public void upsert(Iterable<PatchSetApproval> instances) throws OrmException {
delegate.upsert(instances);
}
@Override
public void deleteKeys(Iterable<PatchSetApproval.Key> keys) throws OrmException {
delegate.deleteKeys(keys);
}
@Override
public void delete(Iterable<PatchSetApproval> instances) throws OrmException {
delegate.delete(instances);
}
@Override
public void beginTransaction(PatchSetApproval.Key key) throws OrmException {
delegate.beginTransaction(key);
}
@Override
public PatchSetApproval atomicUpdate(
PatchSetApproval.Key key, AtomicUpdate<PatchSetApproval> update) throws OrmException {
return delegate.atomicUpdate(key, update);
}
@Override
public PatchSetApproval get(PatchSetApproval.Key key) throws OrmException {
return delegate.get(key);
}
@Override
public ResultSet<PatchSetApproval> byChange(Change.Id id) throws OrmException {
return delegate.byChange(id);
}
@Override
public ResultSet<PatchSetApproval> byPatchSet(PatchSet.Id id) throws OrmException {
return delegate.byPatchSet(id);
}
@Override
public ResultSet<PatchSetApproval> byPatchSetUser(PatchSet.Id patchSet, Account.Id account)
throws OrmException {
return delegate.byPatchSetUser(patchSet, account);
}
@Override
public ResultSet<PatchSetApproval> all() throws OrmException {
return delegate.all();
}
}
public static class ChangeMessageAccessWrapper implements ChangeMessageAccess {
protected final ChangeMessageAccess delegate;
protected ChangeMessageAccessWrapper(ChangeMessageAccess delegate) {
this.delegate = delegate;
}
@Override
public String getRelationName() {
return delegate.getRelationName();
}
@Override
public int getRelationID() {
return delegate.getRelationID();
}
@Override
public ResultSet<ChangeMessage> iterateAllEntities() throws OrmException {
return delegate.iterateAllEntities();
}
@Override
public ChangeMessage.Key primaryKey(ChangeMessage entity) {
return delegate.primaryKey(entity);
}
@Override
public Map<ChangeMessage.Key, ChangeMessage> toMap(Iterable<ChangeMessage> c) {
return delegate.toMap(c);
}
@SuppressWarnings("deprecation")
@Override
public com.google.common.util.concurrent.CheckedFuture<ChangeMessage, OrmException> getAsync(
ChangeMessage.Key key) {
return delegate.getAsync(key);
}
@Override
public ResultSet<ChangeMessage> get(Iterable<ChangeMessage.Key> keys) throws OrmException {
return delegate.get(keys);
}
@Override
public void insert(Iterable<ChangeMessage> instances) throws OrmException {
delegate.insert(instances);
}
@Override
public void update(Iterable<ChangeMessage> instances) throws OrmException {
delegate.update(instances);
}
@Override
public void upsert(Iterable<ChangeMessage> instances) throws OrmException {
delegate.upsert(instances);
}
@Override
public void deleteKeys(Iterable<ChangeMessage.Key> keys) throws OrmException {
delegate.deleteKeys(keys);
}
@Override
public void delete(Iterable<ChangeMessage> instances) throws OrmException {
delegate.delete(instances);
}
@Override
public void beginTransaction(ChangeMessage.Key key) throws OrmException {
delegate.beginTransaction(key);
}
@Override
public ChangeMessage atomicUpdate(ChangeMessage.Key key, AtomicUpdate<ChangeMessage> update)
throws OrmException {
return delegate.atomicUpdate(key, update);
}
@Override
public ChangeMessage get(ChangeMessage.Key id) throws OrmException {
return delegate.get(id);
}
@Override
public ResultSet<ChangeMessage> byChange(Change.Id id) throws OrmException {
return delegate.byChange(id);
}
@Override
public ResultSet<ChangeMessage> byPatchSet(PatchSet.Id id) throws OrmException {
return delegate.byPatchSet(id);
}
@Override
public ResultSet<ChangeMessage> all() throws OrmException {
return delegate.all();
}
}
public static class PatchSetAccessWrapper implements PatchSetAccess {
protected final PatchSetAccess delegate;
protected PatchSetAccessWrapper(PatchSetAccess delegate) {
this.delegate = delegate;
}
@Override
public String getRelationName() {
return delegate.getRelationName();
}
@Override
public int getRelationID() {
return delegate.getRelationID();
}
@Override
public ResultSet<PatchSet> iterateAllEntities() throws OrmException {
return delegate.iterateAllEntities();
}
@Override
public PatchSet.Id primaryKey(PatchSet entity) {
return delegate.primaryKey(entity);
}
@Override
public Map<PatchSet.Id, PatchSet> toMap(Iterable<PatchSet> c) {
return delegate.toMap(c);
}
@SuppressWarnings("deprecation")
@Override
public com.google.common.util.concurrent.CheckedFuture<PatchSet, OrmException> getAsync(
PatchSet.Id key) {
return delegate.getAsync(key);
}
@Override
public ResultSet<PatchSet> get(Iterable<PatchSet.Id> keys) throws OrmException {
return delegate.get(keys);
}
@Override
public void insert(Iterable<PatchSet> instances) throws OrmException {
delegate.insert(instances);
}
@Override
public void update(Iterable<PatchSet> instances) throws OrmException {
delegate.update(instances);
}
@Override
public void upsert(Iterable<PatchSet> instances) throws OrmException {
delegate.upsert(instances);
}
@Override
public void deleteKeys(Iterable<PatchSet.Id> keys) throws OrmException {
delegate.deleteKeys(keys);
}
@Override
public void delete(Iterable<PatchSet> instances) throws OrmException {
delegate.delete(instances);
}
@Override
public void beginTransaction(PatchSet.Id key) throws OrmException {
delegate.beginTransaction(key);
}
@Override
public PatchSet atomicUpdate(PatchSet.Id key, AtomicUpdate<PatchSet> update)
throws OrmException {
return delegate.atomicUpdate(key, update);
}
@Override
public PatchSet get(PatchSet.Id id) throws OrmException {
return delegate.get(id);
}
@Override
public ResultSet<PatchSet> byChange(Change.Id id) throws OrmException {
return delegate.byChange(id);
}
@Override
public ResultSet<PatchSet> all() throws OrmException {
return delegate.all();
}
}
public static class PatchLineCommentAccessWrapper implements PatchLineCommentAccess {
protected PatchLineCommentAccess delegate;
protected PatchLineCommentAccessWrapper(PatchLineCommentAccess delegate) {
this.delegate = delegate;
}
@Override
public String getRelationName() {
return delegate.getRelationName();
}
@Override
public int getRelationID() {
return delegate.getRelationID();
}
@Override
public ResultSet<PatchLineComment> iterateAllEntities() throws OrmException {
return delegate.iterateAllEntities();
}
@Override
public PatchLineComment.Key primaryKey(PatchLineComment entity) {
return delegate.primaryKey(entity);
}
@Override
public Map<PatchLineComment.Key, PatchLineComment> toMap(Iterable<PatchLineComment> c) {
return delegate.toMap(c);
}
@SuppressWarnings("deprecation")
@Override
public com.google.common.util.concurrent.CheckedFuture<PatchLineComment, OrmException> getAsync(
PatchLineComment.Key key) {
return delegate.getAsync(key);
}
@Override
public ResultSet<PatchLineComment> get(Iterable<PatchLineComment.Key> keys)
throws OrmException {
return delegate.get(keys);
}
@Override
public void insert(Iterable<PatchLineComment> instances) throws OrmException {
delegate.insert(instances);
}
@Override
public void update(Iterable<PatchLineComment> instances) throws OrmException {
delegate.update(instances);
}
@Override
public void upsert(Iterable<PatchLineComment> instances) throws OrmException {
delegate.upsert(instances);
}
@Override
public void deleteKeys(Iterable<PatchLineComment.Key> keys) throws OrmException {
delegate.deleteKeys(keys);
}
@Override
public void delete(Iterable<PatchLineComment> instances) throws OrmException {
delegate.delete(instances);
}
@Override
public void beginTransaction(PatchLineComment.Key key) throws OrmException {
delegate.beginTransaction(key);
}
@Override
public PatchLineComment atomicUpdate(
PatchLineComment.Key key, AtomicUpdate<PatchLineComment> update) throws OrmException {
return delegate.atomicUpdate(key, update);
}
@Override
public PatchLineComment get(PatchLineComment.Key id) throws OrmException {
return delegate.get(id);
}
@Override
public ResultSet<PatchLineComment> byChange(Change.Id id) throws OrmException {
return delegate.byChange(id);
}
@Override
public ResultSet<PatchLineComment> byPatchSet(PatchSet.Id id) throws OrmException {
return delegate.byPatchSet(id);
}
@Override
public ResultSet<PatchLineComment> publishedByChangeFile(Change.Id id, String file)
throws OrmException {
return delegate.publishedByChangeFile(id, file);
}
@Override
public ResultSet<PatchLineComment> publishedByPatchSet(PatchSet.Id patchset)
throws OrmException {
return delegate.publishedByPatchSet(patchset);
}
@Override
public ResultSet<PatchLineComment> draftByPatchSetAuthor(
PatchSet.Id patchset, Account.Id author) throws OrmException {
return delegate.draftByPatchSetAuthor(patchset, author);
}
@Override
public ResultSet<PatchLineComment> draftByChangeFileAuthor(
Change.Id id, String file, Account.Id author) throws OrmException {
return delegate.draftByChangeFileAuthor(id, file, author);
}
@Override
public ResultSet<PatchLineComment> draftByAuthor(Account.Id author) throws OrmException {
return delegate.draftByAuthor(author);
}
@Override
public ResultSet<PatchLineComment> all() throws OrmException {
return delegate.all();
}
}
}
| 6,220 |
333 | {
"name": "swiped",
"version": "0.1.4",
"homepage": "https://github.com/mishk0/swiped",
"authors": [
"mishk0 <<EMAIL>>"
],
"description": "touch swipe for your mobile application, written in vanilla JS",
"main": "./dist/swiped.js",
"ignore": [
".*",
"package.json",
"node_modules",
"bower_components",
"tests"
],
"keywords": [
"touch",
"swipe",
"mobile",
"slider",
"slide"
],
"license": "MIT"
}
| 209 |
6,989 | #include "common.h"
#include <util/generic/yexception.h>
namespace NLoggingImpl {
TString GetLocalTimeSSimple() {
struct tm tm;
return Strftime("%b%d_%H%M%S", Now().LocalTime(&tm));
}
TString PrepareToOpenLog(TString logType, const int logLevel, const bool rotation, const bool startAsDaemon) {
Y_ENSURE(logLevel >= 0 && logLevel <= (int)LOG_MAX_PRIORITY, "Incorrect log level");
if (rotation && TFsPath(logType).Exists()) {
TString newPath = Sprintf("%s_%s_%" PRIu64, logType.data(), NLoggingImpl::GetLocalTimeSSimple().data(), static_cast<ui64>(Now().MicroSeconds()));
TFsPath(logType).RenameTo(newPath);
}
if (startAsDaemon && (logType == "console"sv || logType == "cout"sv || logType == "cerr"sv)) {
logType = "null";
}
return logType;
}
}
bool TLogFilter::CheckLoggingContext(TLog& log, const TLogRecordContext& context) {
return context.Priority <= log.FiltrationLevel();
}
TSimpleSharedPtr<TLogElement> TLogFilter::StartRecord(TLog& logger, const TLogRecordContext& context, TSimpleSharedPtr<TLogElement> earlier) {
if (earlier)
return earlier;
TSimpleSharedPtr<TLogElement> result(new TLogElement(&logger));
*result << context.Priority;
return result;
}
| 535 |
733 | <gh_stars>100-1000
from rx import Observable, AnonymousObservable
from rx.internal import extensionmethod
@extensionmethod(Observable)
def skip_last(self, count):
"""Bypasses a specified number of elements at the end of an observable
sequence.
Description:
This operator accumulates a queue with a length enough to store the
first `count` elements. As more elements are received, elements are
taken from the front of the queue and produced on the result sequence.
This causes elements to be delayed.
Keyword arguments
count -- Number of elements to bypass at the end of the source sequence.
Returns an observable {Observable} sequence containing the source
sequence elements except for the bypassed ones at the end.
"""
source = self
def subscribe(observer):
q = []
def on_next(x):
front = None
with self.lock:
q.append(x)
if len(q) > count:
front = q.pop(0)
if not front is None:
observer.on_next(front)
return source.subscribe(on_next, observer.on_error,
observer.on_completed)
return AnonymousObservable(subscribe)
| 477 |
778 | <filename>buildtools/src/main/java/org/apache/pulsar/tests/RetryAnalyzer.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.tests;
import java.util.Arrays;
import java.util.Collections;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.testng.ITestResult;
import org.testng.SkipException;
import org.testng.util.RetryAnalyzerCount;
public class RetryAnalyzer extends RetryAnalyzerCount {
// Only try again once
static final int MAX_RETRIES = Integer.parseInt(System.getProperty("testRetryCount", "1"));
// Don't retry test classes that are changed in the current changeset in CI
private static final Pattern TEST_FILE_PATTERN = Pattern.compile("^.*src/test/java/(.*)\\.java$");
private static final Set<String> CHANGED_TEST_CLASSES = Optional.ofNullable(System.getenv("CHANGED_TESTS"))
.map(changedTestsCsv ->
Collections.unmodifiableSet(Arrays.stream(StringUtils.split(changedTestsCsv))
.map(path -> {
Matcher matcher = TEST_FILE_PATTERN.matcher(path);
if (matcher.matches()) {
return matcher.group(1)
.replace('/', '.');
} else {
return null;
}
})
.filter(Objects::nonNull)
.collect(Collectors.toSet())))
.orElse(Collections.emptySet());
public RetryAnalyzer() {
setCount(MAX_RETRIES);
}
@Override
public boolean retry(ITestResult result) {
if (CHANGED_TEST_CLASSES.contains((result.getTestClass().getName()))) {
return false;
}
return super.retry(result);
}
@Override
public boolean retryMethod(ITestResult result) {
return !(result.getThrowable() instanceof SkipException);
}
}
| 1,223 |
32,544 | package com.baeldung.functional;
import static org.springframework.web.reactive.function.BodyExtractors.toDataBuffers;
import static org.springframework.web.reactive.function.BodyExtractors.toFormData;
import static org.springframework.web.reactive.function.BodyInserters.fromValue;
import static org.springframework.web.reactive.function.server.RequestPredicates.GET;
import static org.springframework.web.reactive.function.server.RequestPredicates.POST;
import static org.springframework.web.reactive.function.server.RequestPredicates.path;
import static org.springframework.web.reactive.function.server.RouterFunctions.route;
import static org.springframework.web.reactive.function.server.RouterFunctions.toHttpHandler;
import static org.springframework.web.reactive.function.server.ServerResponse.ok;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicLong;
import org.springframework.core.io.ClassPathResource;
import org.springframework.http.server.reactive.HttpHandler;
import org.springframework.http.server.reactive.ServletHttpHandlerAdapter;
import org.springframework.util.MultiValueMap;
import org.springframework.web.reactive.function.server.RouterFunction;
import org.springframework.web.reactive.function.server.RouterFunctions;
import org.springframework.web.reactive.function.server.ServerResponse;
import org.springframework.web.server.WebHandler;
import org.springframework.web.server.adapter.WebHttpHandlerBuilder;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class RootServlet extends ServletHttpHandlerAdapter {
public RootServlet() {
this(WebHttpHandlerBuilder.webHandler((WebHandler) toHttpHandler(routingFunction()))
.filter(new IndexRewriteFilter())
.build());
}
RootServlet(HttpHandler httpHandler) {
super(httpHandler);
}
private static final Actor BRAD_PITT = new Actor("Brad", "Pitt");
private static final Actor TOM_HANKS = new Actor("Tom", "Hanks");
private static final List<Actor> actors = new CopyOnWriteArrayList<>(Arrays.asList(BRAD_PITT, TOM_HANKS));
private static RouterFunction<?> routingFunction() {
return route(GET("/test"), serverRequest -> ok().body(fromValue("helloworld"))).andRoute(POST("/login"), serverRequest -> serverRequest.body(toFormData())
.map(MultiValueMap::toSingleValueMap)
.map(formData -> {
System.out.println("form data: " + formData.toString());
if ("baeldung".equals(formData.get("user")) && "you_know_what_to_do".equals(formData.get("token"))) {
return ok().body(Mono.just("welcome back!"), String.class)
.block();
}
return ServerResponse.badRequest()
.build()
.block();
}))
.andRoute(POST("/upload"), serverRequest -> serverRequest.body(toDataBuffers())
.collectList()
.map(dataBuffers -> {
AtomicLong atomicLong = new AtomicLong(0);
dataBuffers.forEach(d -> atomicLong.addAndGet(d.asByteBuffer()
.array().length));
System.out.println("data length:" + atomicLong.get());
return ok().body(fromValue(atomicLong.toString()))
.block();
}))
.and(RouterFunctions.resources("/files/**", new ClassPathResource("files/")))
.andNest(path("/actor"), route(GET("/"), serverRequest -> ok().body(Flux.fromIterable(actors), Actor.class)).andRoute(POST("/"), serverRequest -> serverRequest.bodyToMono(Actor.class)
.doOnNext(actors::add)
.then(ok().build())))
.filter((request, next) -> {
System.out.println("Before handler invocation: " + request.path());
return next.handle(request);
});
}
}
| 1,579 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.avs.implementation;
import com.azure.core.http.rest.PagedIterable;
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.SimpleResponse;
import com.azure.core.util.Context;
import com.azure.core.util.logging.ClientLogger;
import com.azure.resourcemanager.avs.fluent.ScriptExecutionsClient;
import com.azure.resourcemanager.avs.fluent.models.ScriptExecutionInner;
import com.azure.resourcemanager.avs.models.ScriptExecution;
import com.azure.resourcemanager.avs.models.ScriptExecutions;
import com.azure.resourcemanager.avs.models.ScriptOutputStreamType;
import com.fasterxml.jackson.annotation.JsonIgnore;
import java.util.List;
public final class ScriptExecutionsImpl implements ScriptExecutions {
@JsonIgnore private final ClientLogger logger = new ClientLogger(ScriptExecutionsImpl.class);
private final ScriptExecutionsClient innerClient;
private final com.azure.resourcemanager.avs.AvsManager serviceManager;
public ScriptExecutionsImpl(
ScriptExecutionsClient innerClient, com.azure.resourcemanager.avs.AvsManager serviceManager) {
this.innerClient = innerClient;
this.serviceManager = serviceManager;
}
public PagedIterable<ScriptExecution> list(String resourceGroupName, String privateCloudName) {
PagedIterable<ScriptExecutionInner> inner = this.serviceClient().list(resourceGroupName, privateCloudName);
return Utils.mapPage(inner, inner1 -> new ScriptExecutionImpl(inner1, this.manager()));
}
public PagedIterable<ScriptExecution> list(String resourceGroupName, String privateCloudName, Context context) {
PagedIterable<ScriptExecutionInner> inner =
this.serviceClient().list(resourceGroupName, privateCloudName, context);
return Utils.mapPage(inner, inner1 -> new ScriptExecutionImpl(inner1, this.manager()));
}
public ScriptExecution get(String resourceGroupName, String privateCloudName, String scriptExecutionName) {
ScriptExecutionInner inner = this.serviceClient().get(resourceGroupName, privateCloudName, scriptExecutionName);
if (inner != null) {
return new ScriptExecutionImpl(inner, this.manager());
} else {
return null;
}
}
public Response<ScriptExecution> getWithResponse(
String resourceGroupName, String privateCloudName, String scriptExecutionName, Context context) {
Response<ScriptExecutionInner> inner =
this.serviceClient().getWithResponse(resourceGroupName, privateCloudName, scriptExecutionName, context);
if (inner != null) {
return new SimpleResponse<>(
inner.getRequest(),
inner.getStatusCode(),
inner.getHeaders(),
new ScriptExecutionImpl(inner.getValue(), this.manager()));
} else {
return null;
}
}
public void delete(String resourceGroupName, String privateCloudName, String scriptExecutionName) {
this.serviceClient().delete(resourceGroupName, privateCloudName, scriptExecutionName);
}
public void delete(String resourceGroupName, String privateCloudName, String scriptExecutionName, Context context) {
this.serviceClient().delete(resourceGroupName, privateCloudName, scriptExecutionName, context);
}
public ScriptExecution getExecutionLogs(
String resourceGroupName, String privateCloudName, String scriptExecutionName) {
ScriptExecutionInner inner =
this.serviceClient().getExecutionLogs(resourceGroupName, privateCloudName, scriptExecutionName);
if (inner != null) {
return new ScriptExecutionImpl(inner, this.manager());
} else {
return null;
}
}
public Response<ScriptExecution> getExecutionLogsWithResponse(
String resourceGroupName,
String privateCloudName,
String scriptExecutionName,
List<ScriptOutputStreamType> scriptOutputStreamType,
Context context) {
Response<ScriptExecutionInner> inner =
this
.serviceClient()
.getExecutionLogsWithResponse(
resourceGroupName, privateCloudName, scriptExecutionName, scriptOutputStreamType, context);
if (inner != null) {
return new SimpleResponse<>(
inner.getRequest(),
inner.getStatusCode(),
inner.getHeaders(),
new ScriptExecutionImpl(inner.getValue(), this.manager()));
} else {
return null;
}
}
public ScriptExecution getById(String id) {
String resourceGroupName = Utils.getValueFromIdByName(id, "resourceGroups");
if (resourceGroupName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id)));
}
String privateCloudName = Utils.getValueFromIdByName(id, "privateClouds");
if (privateCloudName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String.format("The resource ID '%s' is not valid. Missing path segment 'privateClouds'.", id)));
}
String scriptExecutionName = Utils.getValueFromIdByName(id, "scriptExecutions");
if (scriptExecutionName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format(
"The resource ID '%s' is not valid. Missing path segment 'scriptExecutions'.", id)));
}
return this.getWithResponse(resourceGroupName, privateCloudName, scriptExecutionName, Context.NONE).getValue();
}
public Response<ScriptExecution> getByIdWithResponse(String id, Context context) {
String resourceGroupName = Utils.getValueFromIdByName(id, "resourceGroups");
if (resourceGroupName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id)));
}
String privateCloudName = Utils.getValueFromIdByName(id, "privateClouds");
if (privateCloudName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String.format("The resource ID '%s' is not valid. Missing path segment 'privateClouds'.", id)));
}
String scriptExecutionName = Utils.getValueFromIdByName(id, "scriptExecutions");
if (scriptExecutionName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format(
"The resource ID '%s' is not valid. Missing path segment 'scriptExecutions'.", id)));
}
return this.getWithResponse(resourceGroupName, privateCloudName, scriptExecutionName, context);
}
public void deleteById(String id) {
String resourceGroupName = Utils.getValueFromIdByName(id, "resourceGroups");
if (resourceGroupName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id)));
}
String privateCloudName = Utils.getValueFromIdByName(id, "privateClouds");
if (privateCloudName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String.format("The resource ID '%s' is not valid. Missing path segment 'privateClouds'.", id)));
}
String scriptExecutionName = Utils.getValueFromIdByName(id, "scriptExecutions");
if (scriptExecutionName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format(
"The resource ID '%s' is not valid. Missing path segment 'scriptExecutions'.", id)));
}
this.delete(resourceGroupName, privateCloudName, scriptExecutionName, Context.NONE);
}
public void deleteByIdWithResponse(String id, Context context) {
String resourceGroupName = Utils.getValueFromIdByName(id, "resourceGroups");
if (resourceGroupName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id)));
}
String privateCloudName = Utils.getValueFromIdByName(id, "privateClouds");
if (privateCloudName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String.format("The resource ID '%s' is not valid. Missing path segment 'privateClouds'.", id)));
}
String scriptExecutionName = Utils.getValueFromIdByName(id, "scriptExecutions");
if (scriptExecutionName == null) {
throw logger
.logExceptionAsError(
new IllegalArgumentException(
String
.format(
"The resource ID '%s' is not valid. Missing path segment 'scriptExecutions'.", id)));
}
this.delete(resourceGroupName, privateCloudName, scriptExecutionName, context);
}
private ScriptExecutionsClient serviceClient() {
return this.innerClient;
}
private com.azure.resourcemanager.avs.AvsManager manager() {
return this.serviceManager;
}
public ScriptExecutionImpl define(String name) {
return new ScriptExecutionImpl(name, this.manager());
}
}
| 4,439 |
623 | // Copyright (C) 2020 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.server.cache.serialize.entities;
import static com.google.common.truth.Truth.assertThat;
import static com.google.gerrit.server.cache.serialize.entities.ProjectSerializer.deserialize;
import static com.google.gerrit.server.cache.serialize.entities.ProjectSerializer.serialize;
import com.google.gerrit.entities.BooleanProjectConfig;
import com.google.gerrit.entities.Project;
import com.google.gerrit.extensions.client.InheritableBoolean;
import com.google.gerrit.extensions.client.ProjectState;
import com.google.gerrit.extensions.client.SubmitType;
import org.junit.Test;
public class ProjectSerializerTest {
static final Project ALL_VALUES_SET =
Project.builder(Project.nameKey("test"))
.setDescription("desc")
.setSubmitType(SubmitType.FAST_FORWARD_ONLY)
.setState(ProjectState.HIDDEN)
.setParent(Project.nameKey("parent"))
.setMaxObjectSizeLimit("11K")
.setDefaultDashboard("dashboard1")
.setLocalDefaultDashboard("dashboard2")
.setConfigRefState("1337")
.setBooleanConfig(BooleanProjectConfig.ENABLE_REVIEWER_BY_EMAIL, InheritableBoolean.TRUE)
.setBooleanConfig(
BooleanProjectConfig.CREATE_NEW_CHANGE_FOR_ALL_NOT_IN_TARGET,
InheritableBoolean.INHERIT)
.build();
@Test
public void roundTrip() {
assertThat(deserialize(serialize(ALL_VALUES_SET))).isEqualTo(ALL_VALUES_SET);
}
@Test
public void roundTripWithMinimalValues() {
Project projectAutoValue =
Project.builder(Project.nameKey("test"))
.setSubmitType(SubmitType.FAST_FORWARD_ONLY)
.setState(ProjectState.HIDDEN)
.build();
assertThat(deserialize(serialize(projectAutoValue))).isEqualTo(projectAutoValue);
}
}
| 865 |
1,345 | <reponame>MaksHess/napari
from ._context import Context, create_context, get_context
from ._layerlist_context import LayerListContextKeys
__all__ = ['Context', 'create_context', 'get_context', 'LayerListContextKeys']
| 65 |
6,240 | <filename>.changes/1.23.0.json<gh_stars>1000+
{
"schema-version": "1.0",
"changes": [
{
"type": "enhancement",
"category": "Deploy",
"description": "Wait for function state to be active when deploying"
},
{
"type": "feature",
"category": "SQS",
"description": "Add queue_arn parameter to enable CDK integration with SQS event handler (#1681)"
}
]
}
| 167 |
763 | package org.batfish.datamodel.trace;
import static org.junit.Assert.assertEquals;
import com.google.common.collect.ImmutableList;
import com.google.common.testing.EqualsTester;
import java.util.List;
import org.batfish.common.util.BatfishObjectMapper;
import org.batfish.datamodel.TraceElement;
import org.junit.Test;
/** Test for {@link TraceTree}. */
public final class TraceTreeTest {
@Test
public void testEquals() {
TraceElement traceElement1 = TraceElement.of("1");
TraceElement traceElement2 = TraceElement.of("2");
List<TraceTree> children1 = ImmutableList.of();
List<TraceTree> children2 = ImmutableList.of(new TraceTree(null, ImmutableList.of()));
new EqualsTester()
.addEqualityGroup(
new TraceTree(traceElement1, children1), new TraceTree(traceElement1, children1))
.addEqualityGroup(new TraceTree(traceElement2, children1))
.addEqualityGroup(new TraceTree(traceElement1, children2))
.testEquals();
}
@Test
public void testJsonSerialization() {
TraceTree traceTree =
new TraceTree(
TraceElement.of("a"),
ImmutableList.of(new TraceTree(TraceElement.of("b"), ImmutableList.of())));
TraceTree clone = BatfishObjectMapper.clone(traceTree, TraceTree.class);
assertEquals(traceTree, clone);
}
}
| 487 |
4,879 | package com.mapswithme.maps.search;
import android.app.Activity;
import androidx.annotation.NonNull;
import com.mapswithme.maps.Framework;
import com.mapswithme.util.Utils;
public class MegafonPromoProcessor implements PromoCategoryProcessor
{
@NonNull
private final Activity mActivity;
MegafonPromoProcessor(@NonNull Activity activity)
{
mActivity = activity;
}
@Override
public void process()
{
Utils.openUrl(mActivity, Framework.nativeGetMegafonCategoryBannerUrl());
}
}
| 165 |
1,566 | <reponame>tstrutz/sqlite_orm
#include <sqlite_orm/sqlite_orm.h>
#include <catch2/catch.hpp>
#include <numeric> // std::iota
using namespace sqlite_orm;
TEST_CASE("Exists") {
struct User {
int id = 0;
std::string name;
};
struct Visit {
int id = 0;
int userId = 0;
time_t time = 0;
};
auto storage =
make_storage("",
make_table("users", make_column("id", &User::id, primary_key()), make_column("name", &User::name)),
make_table("visits",
make_column("id", &Visit::id, primary_key()),
make_column("userId", &Visit::userId),
make_column("time", &Visit::time),
foreign_key(&Visit::userId).references(&User::id)));
storage.sync_schema();
storage.replace(User{1, "<NAME>"});
storage.replace(User{2, "<NAME>"});
storage.replace(Visit{1, 1, 100000});
storage.replace(Visit{2, 1, 100001});
storage.replace(Visit{3, 1, 100002});
storage.replace(Visit{4, 1, 200000});
storage.replace(Visit{5, 2, 100000});
auto rows = storage.select(
&User::id,
where(exists(select(&Visit::id, where(c(&Visit::time) == 200000 and eq(&Visit::userId, &User::id))))));
REQUIRE(!rows.empty() == 1);
}
TEST_CASE("Iterate blob") {
struct Test {
int64_t id;
std::vector<char> key;
};
struct TestComparator {
bool operator()(const Test &lhs, const Test &rhs) const {
return lhs.id == rhs.id && lhs.key == rhs.key;
}
};
auto db =
make_storage("",
make_table("Test", make_column("key", &Test::key), make_column("id", &Test::id, primary_key())));
db.sync_schema(true);
std::vector<char> key(255);
iota(key.begin(), key.end(), 0);
Test v{5, key};
db.replace(v);
TestComparator testComparator;
for(auto &obj: db.iterate<Test>()) {
REQUIRE(testComparator(obj, v));
} // test that view_t and iterator_t compile
for(const auto &obj: db.iterate<Test>()) {
REQUIRE(testComparator(obj, v));
} // test that view_t and iterator_t compile
{
auto keysCount = db.count<Test>(where(c(&Test::key) == key));
auto keysCountRows = db.select(count<Test>(), where(c(&Test::key) == key));
REQUIRE(keysCountRows.size() == 1);
REQUIRE(keysCountRows.front() == 1);
REQUIRE(keysCount == keysCountRows.front());
REQUIRE(db.get_all<Test>(where(c(&Test::key) == key)).size() == 1);
}
{
int iterationsCount = 0;
for(auto &w: db.iterate<Test>(where(c(&Test::key) == key))) {
REQUIRE(testComparator(w, v));
++iterationsCount;
}
REQUIRE(iterationsCount == 1);
}
}
TEST_CASE("Threadsafe") {
threadsafe();
}
TEST_CASE("Different getters and setters") {
struct User {
int id;
std::string name;
int getIdByValConst() const {
return this->id;
}
void setIdByVal(int id_) {
this->id = id_;
}
std::string getNameByVal() {
return this->name;
}
void setNameByConstRef(const std::string &name_) {
this->name = name_;
}
const int &getConstIdByRefConst() const {
return this->id;
}
void setIdByRef(int &id_) {
this->id = id_;
}
const std::string &getConstNameByRefConst() const {
return this->name;
}
void setNameByRef(std::string &name_) {
this->name = std::move(name_);
}
};
auto filename = "different.sqlite";
auto storage0 = make_storage(
filename,
make_table("users", make_column("id", &User::id, primary_key()), make_column("name", &User::name)));
auto storage1 = make_storage(filename,
make_table("users",
make_column("id", &User::getIdByValConst, &User::setIdByVal, primary_key()),
make_column("name", &User::setNameByConstRef, &User::getNameByVal)));
auto storage2 =
make_storage(filename,
make_table("users",
make_column("id", &User::getConstIdByRefConst, &User::setIdByRef, primary_key()),
make_column("name", &User::getConstNameByRefConst, &User::setNameByRef)));
storage0.sync_schema();
storage0.remove_all<User>();
REQUIRE(storage0.count<User>() == 0);
REQUIRE(storage1.count<User>() == 0);
REQUIRE(storage2.count<User>() == 0);
storage0.replace(User{1, "Da buzz"});
REQUIRE(storage0.count<User>() == 1);
REQUIRE(storage1.count<User>() == 1);
REQUIRE(storage2.count<User>() == 1);
{
auto ids = storage0.select(&User::id);
REQUIRE(ids.size() == 1);
REQUIRE(ids.front() == 1);
auto ids2 = storage1.select(&User::getIdByValConst);
REQUIRE(ids == ids2);
auto ids3 = storage1.select(&User::setIdByVal);
REQUIRE(ids3 == ids2);
auto ids4 = storage2.select(&User::getConstIdByRefConst);
REQUIRE(ids4 == ids3);
auto ids5 = storage2.select(&User::setIdByRef);
REQUIRE(ids5 == ids4);
}
{
auto ids = storage0.select(&User::id, where(is_equal(&User::name, "Da buzz")));
REQUIRE(ids.size() == 1);
REQUIRE(ids.front() == 1);
auto ids2 = storage1.select(&User::getIdByValConst, where(is_equal(&User::setNameByConstRef, "Da buzz")));
REQUIRE(ids == ids2);
auto ids3 = storage1.select(&User::setIdByVal, where(is_equal(&User::getNameByVal, "Da buzz")));
REQUIRE(ids3 == ids2);
auto ids4 =
storage2.select(&User::getConstIdByRefConst, where(is_equal(&User::getConstNameByRefConst, "Da buzz")));
REQUIRE(ids4 == ids3);
auto ids5 = storage2.select(&User::setIdByRef, where(is_equal(&User::setNameByRef, "Da buzz")));
REQUIRE(ids5 == ids4);
}
{
auto ids = storage0.select(columns(&User::id), where(is_equal(&User::name, "Da buzz")));
REQUIRE(ids.size() == 1);
REQUIRE(std::get<0>(ids.front()) == 1);
auto ids2 =
storage1.select(columns(&User::getIdByValConst), where(is_equal(&User::setNameByConstRef, "Da buzz")));
REQUIRE(ids == ids2);
auto ids3 = storage1.select(columns(&User::setIdByVal), where(is_equal(&User::getNameByVal, "Da buzz")));
REQUIRE(ids3 == ids2);
auto ids4 = storage2.select(columns(&User::getConstIdByRefConst),
where(is_equal(&User::getConstNameByRefConst, "Da buzz")));
REQUIRE(ids4 == ids3);
auto ids5 = storage2.select(columns(&User::setIdByRef), where(is_equal(&User::setNameByRef, "Da buzz")));
REQUIRE(ids5 == ids4);
}
{
auto avgValue = storage0.avg(&User::id);
REQUIRE(avgValue == storage1.avg(&User::getIdByValConst));
REQUIRE(avgValue == storage1.avg(&User::setIdByVal));
REQUIRE(avgValue == storage2.avg(&User::getConstIdByRefConst));
REQUIRE(avgValue == storage2.avg(&User::setIdByRef));
}
{
auto count = storage0.count(&User::id);
REQUIRE(count == storage1.count(&User::getIdByValConst));
REQUIRE(count == storage1.count(&User::setIdByVal));
REQUIRE(count == storage2.count(&User::getConstIdByRefConst));
REQUIRE(count == storage2.count(&User::setIdByRef));
}
{
auto groupConcat = storage0.group_concat(&User::id);
REQUIRE(groupConcat == storage1.group_concat(&User::getIdByValConst));
REQUIRE(groupConcat == storage1.group_concat(&User::setIdByVal));
REQUIRE(groupConcat == storage2.group_concat(&User::getConstIdByRefConst));
REQUIRE(groupConcat == storage2.group_concat(&User::setIdByRef));
}
{
auto arg = "ototo";
auto groupConcat = storage0.group_concat(&User::id, arg);
REQUIRE(groupConcat == storage1.group_concat(&User::getIdByValConst, arg));
REQUIRE(groupConcat == storage1.group_concat(&User::setIdByVal, arg));
REQUIRE(groupConcat == storage2.group_concat(&User::getConstIdByRefConst, arg));
REQUIRE(groupConcat == storage2.group_concat(&User::setIdByRef, arg));
}
{
auto max = storage0.max(&User::id);
REQUIRE(max);
REQUIRE(*max == *storage1.max(&User::getIdByValConst));
REQUIRE(*max == *storage1.max(&User::setIdByVal));
REQUIRE(*max == *storage2.max(&User::getConstIdByRefConst));
REQUIRE(*max == *storage2.max(&User::setIdByRef));
}
{
auto min = storage0.min(&User::id);
REQUIRE(min);
REQUIRE(*min == *storage1.min(&User::getIdByValConst));
REQUIRE(*min == *storage1.min(&User::setIdByVal));
REQUIRE(*min == *storage2.min(&User::getConstIdByRefConst));
REQUIRE(*min == *storage2.min(&User::setIdByRef));
}
{
auto sum = storage0.sum(&User::id);
REQUIRE(sum);
REQUIRE(*sum == *storage1.sum(&User::getIdByValConst));
REQUIRE(*sum == *storage1.sum(&User::setIdByVal));
REQUIRE(*sum == *storage2.sum(&User::getConstIdByRefConst));
REQUIRE(*sum == *storage2.sum(&User::setIdByRef));
}
{
auto total = storage0.total(&User::id);
REQUIRE(total == storage1.total(&User::getIdByValConst));
REQUIRE(total == storage1.total(&User::setIdByVal));
REQUIRE(total == storage2.total(&User::getConstIdByRefConst));
REQUIRE(total == storage2.total(&User::setIdByRef));
}
}
#ifdef SQLITE_ORM_OPTIONAL_SUPPORTED
TEST_CASE("Dump") {
struct User {
int id = 0;
std::optional<int> carYear; // will be empty if user takes the bus.
};
auto storage = make_storage(
{},
make_table("users", make_column("id", &User::id, primary_key()), make_column("car_year", &User::carYear)));
storage.sync_schema();
auto userId_1 = storage.insert(User{0, {}});
auto userId_2 = storage.insert(User{0, 2006});
std::ignore = userId_2;
REQUIRE(storage.count<User>(where(is_not_null(&User::carYear))) == 1);
auto rows = storage.select(&User::carYear, where(is_equal(&User::id, userId_1)));
REQUIRE(rows.size() == 1);
REQUIRE(!rows.front().has_value());
auto allUsers = storage.get_all<User>();
REQUIRE(allUsers.size() == 2);
const std::string dumpUser1 = storage.dump(allUsers[0]);
REQUIRE(dumpUser1 == std::string{"{ id : '1', car_year : 'null' }"});
const std::string dumpUser2 = storage.dump(allUsers[1]);
REQUIRE(dumpUser2 == std::string{"{ id : '2', car_year : '2006' }"});
}
#endif // SQLITE_ORM_OPTIONAL_SUPPORTED
| 5,061 |
1,125 | <gh_stars>1000+
#!/usr/bin/env python
from pymodbus.compat import IS_PYTHON3, PYTHON_VERSION
import pytest
import asynctest
import asyncio
import logging
import time
_logger = logging.getLogger()
if IS_PYTHON3: # Python 3
from asynctest.mock import patch, Mock, MagicMock
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.factory import ServerDecoder
from pymodbus.server.asynchronous import ModbusTcpProtocol, ModbusUdpProtocol
from pymodbus.server.async_io import StartTcpServer, StartTlsServer, StartUdpServer, StartSerialServer, StopServer, ModbusServerFactory
from pymodbus.server.async_io import ModbusConnectedRequestHandler, ModbusBaseRequestHandler
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.compat import byte2int
from pymodbus.transaction import ModbusSocketFramer
from pymodbus.exceptions import NoSuchSlaveException, ModbusIOException
import sys
import ssl
#---------------------------------------------------------------------------#
# Fixture
#---------------------------------------------------------------------------#
import platform
from distutils.version import LooseVersion
IS_DARWIN = platform.system().lower() == "darwin"
OSX_SIERRA = LooseVersion("10.12")
if IS_DARWIN:
IS_HIGH_SIERRA_OR_ABOVE = LooseVersion(platform.mac_ver()[0])
SERIAL_PORT = '/dev/ptyp0' if not IS_HIGH_SIERRA_OR_ABOVE else '/dev/ttyp0'
else:
IS_HIGH_SIERRA_OR_ABOVE = False
SERIAL_PORT = "/dev/ptmx"
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
class AsyncioServerTest(asynctest.TestCase):
'''
This is the unittest for the pymodbus.server.asyncio module
The scope of this unit test is the life-cycle management of the network
connections and server objects.
This unittest suite does not attempt to test any of the underlying protocol details
'''
#-----------------------------------------------------------------------#
# Setup/TearDown
#-----------------------------------------------------------------------#
def setUp(self):
'''
Initialize the test environment by setting up a dummy store and context
'''
self.store = ModbusSlaveContext( di=ModbusSequentialDataBlock(0, [17]*100),
co=ModbusSequentialDataBlock(0, [17]*100),
hr=ModbusSequentialDataBlock(0, [17]*100),
ir=ModbusSequentialDataBlock(0, [17]*100))
self.context = ModbusServerContext(slaves=self.store, single=True)
def tearDown(self):
''' Cleans up the test environment '''
pass
#-----------------------------------------------------------------------#
# Test ModbusConnectedRequestHandler
#-----------------------------------------------------------------------#
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testStartTcpServer(self):
''' Test that the modbus tcp asyncio server starts correctly '''
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
self.loop = asynctest.Mock(self.loop)
server = yield from StartTcpServer(context=self.context,loop=self.loop,identity=identity)
self.assertEqual(server.control.Identity.VendorName, 'VendorName')
if PYTHON_VERSION >= (3, 6):
self.loop.create_server.assert_called_once()
@pytest.mark.skipif(PYTHON_VERSION < (3, 7), reason="requires python3.7 or above")
@asyncio.coroutine
def testTcpServerServeNoDefer(self):
''' Test StartTcpServer without deferred start (immediate execution of server) '''
with patch('asyncio.base_events.Server.serve_forever', new_callable=asynctest.CoroutineMock) as serve:
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop, defer_start=False)
serve.assert_awaited()
@pytest.mark.skipif(PYTHON_VERSION < (3, 7), reason="requires python3.7 or above")
@asyncio.coroutine
def testTcpServerServeForever(self):
''' Test StartTcpServer serve_forever() method '''
with patch('asyncio.base_events.Server.serve_forever', new_callable=asynctest.CoroutineMock) as serve:
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop)
yield from server.serve_forever()
serve.assert_awaited()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerServeForeverTwice(self):
''' Call on serve_forever() twice should result in a runtime error '''
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with self.assertRaises(RuntimeError):
yield from server.serve_forever()
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerReceiveData(self):
''' Test data sent on socket is received by internals - doesn't not process data '''
data = b'\x01\x00\x00\x00\x00\x06\x01\x03\x00\x00\x00\x19'
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with patch('pymodbus.transaction.ModbusSocketFramer.processIncomingPacket', new_callable=Mock) as process:
# process = server.framer.processIncomingPacket = Mock()
connected = self.loop.create_future()
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
self.transport = transport
self.transport.write(data)
connected.set_result(True)
def eof_received(self):
pass
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',port=random_port)
yield from asyncio.sleep(0.1) # this may be better done by making an internal hook in the actual implementation
# if this unit test fails on a machine, see if increasing the sleep time makes a difference, if it does
# blame author for a fix
if PYTHON_VERSION >= (3, 6):
process.assert_called_once()
self.assertTrue( process.call_args[1]["data"] == data )
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerRoundtrip(self):
''' Test sending and receiving data on tcp socket '''
data = b"\x01\x00\x00\x00\x00\x06\x01\x03\x00\x00\x00\x01" # unit 1, read register
expected_response = b'\x01\x00\x00\x00\x00\x05\x01\x03\x02\x00\x11' # value of 17 as per context
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
connected, done = self.loop.create_future(),self.loop.create_future()
received_value = None
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
self.transport = transport
self.transport.write(data)
connected.set_result(True)
def data_received(self, data):
nonlocal received_value, done
received_value = data
done.set_result(True)
def eof_received(self):
pass
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',port=random_port)
yield from asyncio.wait_for(done, timeout=0.1)
self.assertEqual(received_value, expected_response)
transport.close()
yield from asyncio.sleep(0)
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerConnectionLost(self):
''' Test tcp stream interruption '''
data = b"\x01\x00\x00\x00\x00\x06\x01\x01\x00\x00\x00\x01"
server = yield from StartTcpServer(context=self.context, address=("127.0.0.1", 0), loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
step1 = self.loop.create_future()
# done = self.loop.create_future()
# received_value = None
time.sleep(1)
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
self.transport = transport
step1.set_result(True)
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1', port=random_port)
yield from step1
# On Windows we seem to need to give this an extra chance to finish,
# otherwise there ends up being an active connection at the assert.
yield from asyncio.sleep(0.0)
self.assertTrue(len(server.active_connections) == 1)
protocol.transport.close() # close isn't synchronous and there's no notification that it's done
# so we have to wait a bit
allowed_delay = 1
deadline = time.monotonic() + allowed_delay
while time.monotonic() <= deadline:
yield from asyncio.sleep(0.1)
if len(server.active_connections) == 0:
break
else:
self.assertTrue(
len(server.active_connections) == 0,
msg="connections not closed within {} seconds".format(allowed_delay),
)
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerCloseActiveConnection(self):
''' Test server_close() while there are active TCP connections '''
data = b"\x01\x00\x00\x00\x00\x06\x01\x01\x00\x00\x00\x01"
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
step1 = self.loop.create_future()
done = self.loop.create_future()
received_value = None
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
self.transport = transport
step1.set_result(True)
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',port=random_port)
yield from step1
# On Windows we seem to need to give this an extra chance to finish,
# otherwise there ends up being an active connection at the assert.
yield from asyncio.sleep(0.0)
server.server_close()
# close isn't synchronous and there's no notification that it's done
# so we have to wait a bit
yield from asyncio.sleep(0.0)
self.assertTrue( len(server.active_connections) == 0 )
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerNoSlave(self):
''' Test unknown slave unit exception '''
context = ModbusServerContext(slaves={0x01: self.store, 0x02: self.store }, single=False)
data = b"\x01\x00\x00\x00\x00\x06\x05\x03\x00\x00\x00\x01" # get slave 5 function 3 (holding register)
server = yield from StartTcpServer(context=context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
connect, receive, eof = self.loop.create_future(),self.loop.create_future(),self.loop.create_future()
received_data = None
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
_logger.debug("Client connected")
self.transport = transport
transport.write(data)
connect.set_result(True)
def data_received(self, data):
_logger.debug("Client received data")
receive.set_result(True)
received_data = data
def eof_received(self):
_logger.debug("Client stream eof")
eof.set_result(True)
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',port=random_port)
yield from asyncio.wait_for(connect, timeout=0.1)
self.assertFalse(eof.done())
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerModbusError(self):
''' Test sending garbage data on a TCP socket should drop the connection '''
data = b"\x01\x00\x00\x00\x00\x06\x01\x03\x00\x00\x00\x01" # get slave 5 function 3 (holding register)
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with patch("pymodbus.register_read_message.ReadHoldingRegistersRequest.execute",
side_effect=NoSuchSlaveException):
connect, receive, eof = self.loop.create_future(),self.loop.create_future(),self.loop.create_future()
received_data = None
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
_logger.debug("Client connected")
self.transport = transport
transport.write(data)
connect.set_result(True)
def data_received(self, data):
_logger.debug("Client received data")
receive.set_result(True)
received_data = data
def eof_received(self):
_logger.debug("Client stream eof")
eof.set_result(True)
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',port=random_port)
yield from asyncio.wait_for(connect, timeout=0.1)
yield from asyncio.wait_for(receive, timeout=0.1)
self.assertFalse(eof.done())
transport.close()
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerInternalException(self):
''' Test sending garbage data on a TCP socket should drop the connection '''
data = b"\x01\x00\x00\x00\x00\x06\x01\x03\x00\x00\x00\x01" # get slave 5 function 3 (holding register)
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with patch("pymodbus.register_read_message.ReadHoldingRegistersRequest.execute",
side_effect=Exception):
connect, receive, eof = self.loop.create_future(),self.loop.create_future(),self.loop.create_future()
received_data = None
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
_logger.debug("Client connected")
self.transport = transport
transport.write(data)
connect.set_result(True)
def data_received(self, data):
_logger.debug("Client received data")
receive.set_result(True)
received_data = data
def eof_received(self):
_logger.debug("Client stream eof")
eof.set_result(True)
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',port=random_port)
yield from asyncio.wait_for(connect, timeout=0.1)
yield from asyncio.wait_for(receive, timeout=0.1)
self.assertFalse(eof.done())
transport.close()
server.server_close()
#-----------------------------------------------------------------------#
# Test ModbusTlsProtocol
#-----------------------------------------------------------------------#
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testStartTlsServer(self):
''' Test that the modbus tls asyncio server starts correctly '''
with patch.object(ssl.SSLContext, 'load_cert_chain') as mock_method:
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
self.loop = asynctest.Mock(self.loop)
server = yield from StartTlsServer(context=self.context,loop=self.loop,identity=identity)
self.assertEqual(server.control.Identity.VendorName, 'VendorName')
self.assertIsNotNone(server.sslctx)
if PYTHON_VERSION >= (3, 6):
self.loop.create_server.assert_called_once()
@pytest.mark.skipif(PYTHON_VERSION < (3, 7), reason="requires python3.7 or above")
@asyncio.coroutine
def testTlsServerServeNoDefer(self):
''' Test StartTcpServer without deferred start (immediate execution of server) '''
with patch('asyncio.base_events.Server.serve_forever', new_callable=asynctest.CoroutineMock) as serve:
with patch.object(ssl.SSLContext, 'load_cert_chain') as mock_method:
server = yield from StartTlsServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop, defer_start=False)
serve.assert_awaited()
@pytest.mark.skipif(PYTHON_VERSION < (3, 7), reason="requires python3.7 or above")
@asyncio.coroutine
def testTlsServerServeForever(self):
''' Test StartTcpServer serve_forever() method '''
with patch('asyncio.base_events.Server.serve_forever', new_callable=asynctest.CoroutineMock) as serve:
with patch.object(ssl.SSLContext, 'load_cert_chain') as mock_method:
server = yield from StartTlsServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop)
yield from server.serve_forever()
serve.assert_awaited()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTlsServerServeForeverTwice(self):
''' Call on serve_forever() twice should result in a runtime error '''
with patch.object(ssl.SSLContext, 'load_cert_chain') as mock_method:
server = yield from StartTlsServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with self.assertRaises(RuntimeError):
yield from server.serve_forever()
server.server_close()
#-----------------------------------------------------------------------#
# Test ModbusUdpProtocol
#-----------------------------------------------------------------------#
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testStartUdpServer(self):
''' Test that the modbus udp asyncio server starts correctly '''
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
self.loop = asynctest.Mock(self.loop)
server = yield from StartUdpServer(context=self.context,loop=self.loop,identity=identity)
self.assertEqual(server.control.Identity.VendorName, 'VendorName')
if PYTHON_VERSION >= (3, 6):
self.loop.create_datagram_endpoint.assert_called_once()
# async def testUdpServerServeNoDefer(self):
# ''' Test StartUdpServer without deferred start - NOT IMPLEMENTED - this test is hard to do without additional
# internal plumbing added to the implementation '''
# asyncio.base_events.Server.serve_forever = asynctest.CoroutineMock()
# server = yield from StartUdpServer(address=("127.0.0.1", 0), loop=self.loop, defer_start=False)
# server.server.serve_forever.assert_awaited()
@pytest.mark.skipif(PYTHON_VERSION < (3, 7), reason="requires python3.7 or above")
@asyncio.coroutine
def testUdpServerServeForeverStart(self):
''' Test StartUdpServer serve_forever() method '''
with patch('asyncio.base_events.Server.serve_forever', new_callable=asynctest.CoroutineMock) as serve:
server = yield from StartTcpServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop)
yield from server.serve_forever()
serve.assert_awaited()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testUdpServerServeForeverClose(self):
''' Test StartUdpServer serve_forever() method '''
server = yield from StartUdpServer(context=self.context,address=("127.0.0.1", 0), loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
self.assertTrue(asyncio.isfuture(server.on_connection_terminated))
self.assertFalse(server.on_connection_terminated.done())
server.server_close()
self.assertTrue(server.protocol.is_closing())
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testUdpServerServeForeverTwice(self):
''' Call on serve_forever() twice should result in a runtime error '''
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
server = yield from StartUdpServer(context=self.context,address=("127.0.0.1", 0),
loop=self.loop,identity=identity)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with self.assertRaises(RuntimeError):
yield from server.serve_forever()
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testUdpServerReceiveData(self):
''' Test that the sending data on datagram socket gets data pushed to framer '''
server = yield from StartUdpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with patch('pymodbus.transaction.ModbusSocketFramer.processIncomingPacket',new_callable=Mock) as process:
server.endpoint.datagram_received(data=b"12345", addr=("127.0.0.1", 12345))
yield from asyncio.sleep(0.1)
process.seal()
if PYTHON_VERSION >= (3, 6):
process.assert_called_once()
self.assertTrue( process.call_args[1]["data"] == b"12345" )
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testUdpServerSendData(self):
''' Test that the modbus udp asyncio server correctly sends data outbound '''
identity = ModbusDeviceIdentification(info={0x00: 'VendorName'})
data = b'x\01\x00\x00\x00\x00\x06\x01\x03\x00\x00\x00\x19'
server = yield from StartUdpServer(context=self.context,address=("127.0.0.1", 0))
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
random_port = server.protocol._sock.getsockname()[1]
received = server.endpoint.datagram_received = Mock(wraps=server.endpoint.datagram_received)
done = self.loop.create_future()
received_value = None
class BasicClient(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
self.transport.sendto(data)
def datagram_received(self, data, addr):
nonlocal received_value, done
print("received")
received_value = data
done.set_result(True)
self.transport.close()
transport, protocol = yield from self.loop.create_datagram_endpoint( BasicClient,
remote_addr=('127.0.0.1', random_port))
yield from asyncio.sleep(0.1)
if PYTHON_VERSION >= (3, 6):
received.assert_called_once()
self.assertEqual(received.call_args[0][0], data)
server.server_close()
self.assertTrue(server.protocol.is_closing())
yield from asyncio.sleep(0.1)
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testUdpServerRoundtrip(self):
''' Test sending and receiving data on udp socket'''
data = b"\x01\x00\x00\x00\x00\x06\x01\x03\x00\x00\x00\x01" # unit 1, read register
expected_response = b'\x01\x00\x00\x00\x00\x05\x01\x03\x02\x00\x11' # value of 17 as per context
server = yield from StartUdpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
random_port = server.protocol._sock.getsockname()[1]
connected, done = self.loop.create_future(),self.loop.create_future()
received_value = None
class BasicClient(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
self.transport.sendto(data)
def datagram_received(self, data, addr):
nonlocal received_value, done
print("received")
received_value = data
done.set_result(True)
transport, protocol = yield from self.loop.create_datagram_endpoint( BasicClient,
remote_addr=('127.0.0.1', random_port))
yield from asyncio.wait_for(done, timeout=0.1)
self.assertEqual(received_value, expected_response)
transport.close()
yield from asyncio.sleep(0)
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testUdpServerException(self):
''' Test sending garbage data on a TCP socket should drop the connection '''
garbage = b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
server = yield from StartUdpServer(context=self.context,address=("127.0.0.1", 0),loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with patch('pymodbus.transaction.ModbusSocketFramer.processIncomingPacket',
new_callable=lambda: Mock(side_effect=Exception)) as process:
connect, receive, eof = self.loop.create_future(),self.loop.create_future(),self.loop.create_future()
received_data = None
random_port = server.protocol._sock.getsockname()[1] # get the random server port
class BasicClient(asyncio.DatagramProtocol):
def connection_made(self, transport):
_logger.debug("Client connected")
self.transport = transport
transport.sendto(garbage)
connect.set_result(True)
def datagram_received(self, data, addr):
nonlocal receive
_logger.debug("Client received data")
receive.set_result(True)
received_data = data
transport, protocol = yield from self.loop.create_datagram_endpoint(BasicClient,
remote_addr=('127.0.0.1', random_port))
yield from asyncio.wait_for(connect, timeout=0.1)
self.assertFalse(receive.done())
self.assertFalse(server.protocol._sock._closed)
server.server_close()
# -----------------------------------------------------------------------#
# Test ModbusServerFactory
# -----------------------------------------------------------------------#
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testModbusServerFactory(self):
''' Test the base class for all the clients '''
with self.assertWarns(DeprecationWarning):
factory = ModbusServerFactory(store=None)
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testStopServer(self):
with self.assertWarns(DeprecationWarning):
StopServer()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerException(self):
''' Sending garbage data on a TCP socket should drop the connection '''
garbage = b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
server = yield from StartTcpServer(context=self.context, address=("127.0.0.1", 0), loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with patch('pymodbus.transaction.ModbusSocketFramer.processIncomingPacket',
new_callable=lambda: Mock(side_effect=Exception)) as process:
connect, receive, eof = self.loop.create_future(), self.loop.create_future(), self.loop.create_future()
received_data = None
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
_logger.debug("Client connected")
self.transport = transport
transport.write(garbage)
connect.set_result(True)
def data_received(self, data):
_logger.debug("Client received data")
receive.set_result(True)
received_data = data
def eof_received(self):
_logger.debug("Client stream eof")
eof.set_result(True)
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',
port=random_port)
yield from asyncio.wait_for(connect, timeout=0.1)
yield from asyncio.wait_for(eof, timeout=0.1)
# neither of these should timeout if the test is successful
server.server_close()
@asyncio.coroutine
@pytest.mark.skipif(not IS_PYTHON3, reason="requires python3.4 or above")
def testTcpServerException(self):
''' Sending garbage data on a TCP socket should drop the connection '''
garbage = b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
server = yield from StartTcpServer(context=self.context, address=("127.0.0.1", 0), loop=self.loop)
if PYTHON_VERSION >= (3, 7):
server_task = asyncio.create_task(server.serve_forever())
else:
server_task = asyncio.ensure_future(server.serve_forever())
yield from server.serving
with patch('pymodbus.transaction.ModbusSocketFramer.processIncomingPacket',
new_callable=lambda: Mock(side_effect=Exception)) as process:
connect, receive, eof = self.loop.create_future(), self.loop.create_future(), self.loop.create_future()
received_data = None
random_port = server.server.sockets[0].getsockname()[1] # get the random server port
class BasicClient(asyncio.BaseProtocol):
def connection_made(self, transport):
_logger.debug("Client connected")
self.transport = transport
transport.write(garbage)
connect.set_result(True)
def data_received(self, data):
_logger.debug("Client received data")
receive.set_result(True)
received_data = data
def eof_received(self):
_logger.debug("Client stream eof")
eof.set_result(True)
transport, protocol = yield from self.loop.create_connection(BasicClient, host='127.0.0.1',
port=random_port)
yield from asyncio.wait_for(connect, timeout=0.1)
yield from asyncio.wait_for(eof, timeout=0.1)
# neither of these should timeout if the test is successful
server.server_close()
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
asynctest.main()
| 15,672 |
2,757 | <filename>EdkCompatibilityPkg/Foundation/Framework/Protocol/FirmwareVolumeBlock/FirmwareVolumeBlock.h
/*++
Copyright (c) 2004 - 2010, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
Module Name:
FirmwareVolumeBlock.h
Abstract:
Firmware Volume Block protocol as defined in the Tiano Firmware Volume
specification.
Low level firmware device access routines to abstract firmware device
hardware.
--*/
#ifndef _FW_VOL_BLOCK_H_
#define _FW_VOL_BLOCK_H_
#include "EfiFirmwareVolumeHeader.h"
//
// The following GUID value has been changed to EFI_FIRMWARE_VOLUME_BLOCK2_PROTOCOL_GUID in
// PI 1.2 spec on purpose. This will force all platforms built with EdkCompatibilityPkg
// produce FVB 2 protocol.
//
#define EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID \
{ \
0x8f644fa9, 0xe850, 0x4db1, {0x9c, 0xe2, 0xb, 0x44, 0x69, 0x8e, 0x8d, 0xa4 } \
}
EFI_FORWARD_DECLARATION (EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL);
typedef EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL EFI_FIRMWARE_VOLUME_BLOCK2_PROTOCOL;
typedef
EFI_STATUS
(EFIAPI *EFI_FVB_GET_ATTRIBUTES) (
IN EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL * This,
OUT EFI_FVB_ATTRIBUTES * Attributes
)
/*++
Routine Description:
Retrieves Volume attributes. No polarity translations are done.
Arguments:
This - Calling context
Attributes - output buffer which contains attributes
Returns:
EFI_INVALID_PARAMETER
EFI_SUCCESS
--*/
;
typedef
EFI_STATUS
(EFIAPI *EFI_FVB_SET_ATTRIBUTES) (
IN EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL * This,
IN OUT EFI_FVB_ATTRIBUTES * Attributes
)
/*++
Routine Description:
Sets Volume attributes. No polarity translations are done.
Arguments:
This - Calling context
Attributes - On input: contains new attributes
On output: contains current attributes of FV
Returns:
EFI_INVALID_PARAMETER
EFI_SUCCESS
--*/
;
typedef
EFI_STATUS
(EFIAPI *EFI_FVB_GET_PHYSICAL_ADDRESS) (
IN EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL * This,
OUT EFI_PHYSICAL_ADDRESS * Address
)
/*++
Routine Description:
Retrieves the physical address of a memory mapped FV.
Arguments:
This - Calling context
Attributes - Address is a pointer to a caller allocated EFI_PHYSICAL_ADDRESS
that on successful return from GetPhysicalAddress() contains the
base address of the firmware volume.
Returns:
EFI_UNSUPPORTED
EFI_SUCCESS
--*/
;
typedef
EFI_STATUS
(EFIAPI *EFI_FVB_GET_BLOCK_SIZE) (
IN EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL * This,
IN EFI_LBA Lba,
OUT UINTN *BlockSize,
OUT UINTN *NumberOfBlocks
)
/*++
Routine Description:
Retrieves the size in bytes of a specific block within an FV.
Arguments:
This - Calling context.
Lba - Indicates which block to return the size for.
BlockSize - BlockSize is a pointer to a caller allocated
UINTN in which the size of the block is returned.
NumberOfBlocks - NumberOfBlocks is a pointer to a caller allocated
UINTN in which the number of consecutive blocks
starting with Lba is returned. All blocks in this
range have a size of BlockSize.
Returns:
EFI_INVALID_PARAMETER
EFI_SUCCESS
--*/
;
typedef
EFI_STATUS
(EFIAPI *EFI_FVB_READ) (
IN EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL * This,
IN EFI_LBA Lba,
IN UINTN Offset,
IN OUT UINTN *NumBytes,
OUT UINT8 *Buffer
)
/*++
Routine Description:
Reads data beginning at Lba:Offset from FV and places the data in Buffer.
The read terminates either when *NumBytes of data have been read, or when
a block boundary is reached. *NumBytes is updated to reflect the actual
number of bytes read.
Arguments:
This - Calling context
Lba - Block in which to begin read
Offset - Offset in the block at which to begin read
NumBytes - At input, indicates the requested read size. At output, indicates
the actual number of bytes read.
Buffer - Data buffer in which to place data read.
Returns:
EFI_INVALID_PARAMETER
EFI_NOT_FOUND
EFI_DEVICE_ERROR
EFI_SUCCESS
--*/
;
typedef
EFI_STATUS
(EFIAPI *EFI_FVB_WRITE) (
IN EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL * This,
IN EFI_LBA Lba,
IN UINTN Offset,
IN OUT UINTN *NumBytes,
IN UINT8 *Buffer
)
/*++
Routine Description:
Writes data beginning at Lba:Offset from FV. The write terminates either
when *NumBytes of data have been written, or when a block boundary is
reached. *NumBytes is updated to reflect the actual number of bytes
written.
Arguments:
This - Calling context
Lba - Block in which to begin write
Offset - Offset in the block at which to begin write
NumBytes - At input, indicates the requested write size. At output, indicates
the actual number of bytes written.
Buffer - Buffer containing source data for the write.
Returns:
EFI_INVALID_PARAMETER
EFI_NOT_FOUND
EFI_DEVICE_ERROR
EFI_SUCCESS
--*/
;
#define EFI_LBA_LIST_TERMINATOR 0xFFFFFFFFFFFFFFFF
typedef
EFI_STATUS
(EFIAPI *EFI_FVB_ERASE_BLOCKS) (
IN EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL * This,
...
)
/*++
Routine Description:
The EraseBlock() function erases one or more blocks as denoted by the
variable argument list. The entire parameter list of blocks must be verified
prior to erasing any blocks. If a block is requested that does not exist
within the associated firmware volume (it has a larger index than the last
block of the firmware volume), the EraseBlock() function must return
EFI_INVALID_PARAMETER without modifying the contents of the firmware volume.
Arguments:
This - Calling context
... - Starting LBA followed by Number of Lba to erase. a -1 to terminate
the list.
Returns:
EFI_INVALID_PARAMETER
EFI_DEVICE_ERROR
EFI_SUCCESS
EFI_ACCESS_DENIED
--*/
;
struct _EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL {
EFI_FVB_GET_ATTRIBUTES GetVolumeAttributes;
EFI_FVB_SET_ATTRIBUTES SetVolumeAttributes;
EFI_FVB_GET_PHYSICAL_ADDRESS GetPhysicalAddress;
EFI_FVB_GET_BLOCK_SIZE GetBlockSize;
EFI_FVB_READ Read;
EFI_FVB_WRITE Write;
EFI_FVB_ERASE_BLOCKS EraseBlocks;
EFI_HANDLE ParentHandle;
};
extern EFI_GUID gEfiFirmwareVolumeBlockProtocolGuid;
#endif
| 3,592 |
14,668 | {
"name": "Optional permissions which cannot be optional",
"version": "0.0.1.33",
"manifest_version": 2,
"description": "Extension with an optional permission listed which can't be optional (debugger).",
"optional_permissions": [
"tabs",
"debugger"
]
}
| 92 |
309 | <gh_stars>100-1000
#!/usr/bin/env python
"""
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
xyxFile, qFile = get_program_parameters()
# Read the data.
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.AutoDetectFormatOn()
pl3d.SetXYZFileName(xyxFile)
pl3d.SetQFileName(qFile)
pl3d.SetScalarFunctionNumber(153)
pl3d.SetVectorFunctionNumber(200)
pl3d.Update()
sg = pl3d.GetOutput().GetBlock(0)
# blue to red lut
#
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667, 0.0)
seeds = [[-0.74, 0.0, 0.3], [-0.74, 0.0, 1.0], [-0.74, 0.0, 2.0], [-0.74, 0.0, 3.0]]
renderers = list()
for s in range(0, len(seeds)):
# computational planes
floorComp = vtk.vtkStructuredGridGeometryFilter()
floorComp.SetExtent(0, 37, 0, 75, 0, 0)
floorComp.SetInputData(sg)
floorComp.Update()
floorMapper = vtk.vtkPolyDataMapper()
floorMapper.SetInputConnection(floorComp.GetOutputPort())
floorMapper.ScalarVisibilityOff()
floorMapper.SetLookupTable(lut)
floorActor = vtk.vtkActor()
floorActor.SetMapper(floorMapper)
floorActor.GetProperty().SetRepresentationToWireframe()
floorActor.GetProperty().SetColor(colors.GetColor3d("Black"))
floorActor.GetProperty().SetLineWidth(2)
postComp = vtk.vtkStructuredGridGeometryFilter()
postComp.SetExtent(10, 10, 0, 75, 0, 37)
postComp.SetInputData(sg)
postMapper = vtk.vtkPolyDataMapper()
postMapper.SetInputConnection(postComp.GetOutputPort())
postMapper.SetLookupTable(lut)
postMapper.SetScalarRange(sg.GetScalarRange())
postActor = vtk.vtkActor()
postActor.SetMapper(postMapper)
postActor.GetProperty().SetColor(colors.GetColor3d("Black"))
# streamers
#
# spherical seed points
rake = vtk.vtkPointSource()
rake.SetCenter(seeds[s])
rake.SetNumberOfPoints(10)
streamers = vtk.vtkStreamTracer()
streamers.SetInputConnection(pl3d.GetOutputPort())
# streamers SetSource [rake GetOutput]
streamers.SetSourceConnection(rake.GetOutputPort())
streamers.SetMaximumPropagation(250)
streamers.SetInitialIntegrationStep(.2)
streamers.SetMinimumIntegrationStep(.01)
streamers.SetIntegratorType(2)
streamers.Update()
tubes = vtk.vtkTubeFilter()
tubes.SetInputConnection(streamers.GetOutputPort())
tubes.SetNumberOfSides(8)
tubes.SetRadius(.08)
tubes.SetVaryRadius(0)
mapTubes = vtk.vtkPolyDataMapper()
mapTubes.SetInputConnection(tubes.GetOutputPort())
mapTubes.SetScalarRange(sg.GetScalarRange())
tubesActor = vtk.vtkActor()
tubesActor.SetMapper(mapTubes)
renderer = vtk.vtkRenderer()
renderer.AddActor(floorActor)
renderer.AddActor(postActor)
renderer.AddActor(tubesActor)
renderer.SetBackground(colors.GetColor3d("SlateGray"))
renderers.append(renderer)
renderWindow = vtk.vtkRenderWindow()
# Setup viewports for the renderers
rendererSize = 256
xGridDimensions = 2
yGridDimensions = 2
renderWindow.SetSize(rendererSize * xGridDimensions, rendererSize * yGridDimensions)
for row in range(0, yGridDimensions):
for col in range(xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [float(col) / xGridDimensions, float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions, float(yGridDimensions - row) / yGridDimensions]
renderers[index].SetViewport(viewport)
camera = vtk.vtkCamera()
camera.SetFocalPoint(0.918037, -0.0779233, 2.69513)
camera.SetPosition(0.840735, -23.6176, 8.50211)
camera.SetViewUp(0.00227904, 0.239501, 0.970893)
camera.SetClippingRange(1, 100)
renderers[0].SetActiveCamera(camera)
for r in range(0, len(renderers)):
renderWindow.AddRenderer(renderers[r])
if r > 0:
renderers[r].SetActiveCamera(camera)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
renderWindow.SetSize(512, 512)
renderWindow.Render()
interactor.Start()
def get_program_parameters():
import argparse
description = 'Streamlines seeded with spherical cloud of points. Four separate cloud positions are shown.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('xyz_file', help='postxyz.bin.')
parser.add_argument('q_file', help='postq.bin.')
args = parser.parse_args()
return args.xyz_file, args.q_file
if __name__ == '__main__':
main()
| 2,140 |
312 | <filename>example/07_file_save.cc<gh_stars>100-1000
#include "workflow/WFFacilities.h"
#include <csignal>
#include "wfrest/HttpServer.h"
using namespace wfrest;
static WFFacilities::WaitGroup wait_group(1);
void sig_handler(int signo)
{
wait_group.done();
}
int main()
{
signal(SIGINT, sig_handler);
HttpServer svr;
// curl -v -X POST "ip:port/file_write1" -F "file=@filename" -H "Content-Type: multipart/form-data"
svr.POST("/file_write1", [](const HttpReq *req, HttpResp *resp)
{
std::string& body = req->body(); // multipart/form - body has boundary
resp->Save("test.txt", std::move(body));
});
svr.GET("/file_write2", [](const HttpReq *req, HttpResp *resp)
{
std::string content = "1234567890987654321";
resp->Save("test1.txt", std::move(content));
});
if (svr.start(8888) == 0)
{
wait_group.wait();
svr.stop();
} else
{
fprintf(stderr, "Cannot start server");
exit(1);
}
return 0;
}
| 466 |
408 | <filename>gluon/packages/dal/pydal/dialects/couchdb.py
from ..adapters.couchdb import CouchDB
from .base import NoSQLDialect
from . import dialects
@dialects.register_for(CouchDB)
class CouchDBDialect(NoSQLDialect):
def _and(self, first, second, query_env={}):
return "(%s && %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, query_env=query_env),
)
def _or(self, first, second, query_env={}):
return "(%s || %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, query_env=query_env),
)
def eq(self, first, second=None, query_env={}):
if second is None:
return "(%s == null)" % self.expand(first, query_env=query_env)
return "(%s == %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def ne(self, first, second=None, query_env={}):
if second is None:
return "(%s != null)" % self.expand(first, query_env=query_env)
return "(%s != %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def comma(self, first, second, query_env={}):
return "%s + %s" % (
self.expand(first, query_env=query_env),
self.expand(second, query_env=query_env),
)
| 688 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_AGGREGATION_SERVICE_AGGREGATION_SERVICE_TOOL_NETWORK_INITIALIZER_H_
#define TOOLS_AGGREGATION_SERVICE_AGGREGATION_SERVICE_TOOL_NETWORK_INITIALIZER_H_
#include <memory>
#include "base/memory/scoped_refptr.h"
#include "mojo/public/cpp/bindings/remote.h"
#include "services/network/network_context.h"
#include "services/network/network_service.h"
#include "services/network/public/cpp/shared_url_loader_factory.h"
#include "services/network/public/mojom/url_loader_factory.mojom.h"
namespace aggregation_service {
// This class is responsible for initializing network states. The object should
// be kept alive for the duration of network usage.
class ToolNetworkInitializer {
public:
ToolNetworkInitializer();
ToolNetworkInitializer(const ToolNetworkInitializer& other) = delete;
ToolNetworkInitializer& operator=(const ToolNetworkInitializer& other) =
delete;
~ToolNetworkInitializer();
scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory() {
return shared_url_loader_factory_;
}
private:
std::unique_ptr<network::NetworkService> network_service_;
std::unique_ptr<network::NetworkContext> network_context_;
mojo::Remote<network::mojom::URLLoaderFactory> url_loader_factory_;
scoped_refptr<network::SharedURLLoaderFactory> shared_url_loader_factory_;
};
} // namespace aggregation_service
#endif // TOOLS_AGGREGATION_SERVICE_AGGREGATION_SERVICE_TOOL_NETWORK_INITIALIZER_H_ | 533 |
1,037 | #define PY_SSIZE_T_CLEAN
#include <Python.h>
#if PY_MAJOR_VERSION == 2
PyMODINIT_FUNC init_distorm3(void)
{
(void)Py_InitModule("_distorm3", NULL);
}
#else
static struct PyModuleDef _distorm3_module = {
PyModuleDef_HEAD_INIT,
"_distorm3",
NULL,
-1,
NULL,
};
PyMODINIT_FUNC PyInit__distorm3(void)
{
PyObject *m;
m = PyModule_Create(&_distorm3_module);
if (m == NULL)
return NULL;
return m;
}
#endif
| 214 |
677 | /*
* Copyright (C) 2016-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "WebAssemblyModuleConstructor.h"
#if ENABLE(WEBASSEMBLY)
#include "ArrayBuffer.h"
#include "ExceptionHelpers.h"
#include "FunctionPrototype.h"
#include "JSArrayBuffer.h"
#include "JSCInlines.h"
#include "JSTypedArrays.h"
#include "JSWebAssemblyCompileError.h"
#include "JSWebAssemblyHelpers.h"
#include "JSWebAssemblyModule.h"
#include "ObjectConstructor.h"
#include "SymbolTable.h"
#include "WasmCallee.h"
#include "WasmModuleInformation.h"
#include "WasmPlan.h"
#include "WebAssemblyModulePrototype.h"
#include <wtf/StdLibExtras.h>
namespace JSC {
static EncodedJSValue JSC_HOST_CALL webAssemblyModuleCustomSections(ExecState*);
static EncodedJSValue JSC_HOST_CALL webAssemblyModuleImports(ExecState*);
static EncodedJSValue JSC_HOST_CALL webAssemblyModuleExports(ExecState*);
}
#include "WebAssemblyModuleConstructor.lut.h"
namespace JSC {
const ClassInfo WebAssemblyModuleConstructor::s_info = { "Function", &Base::s_info, &constructorTableWebAssemblyModule, CREATE_METHOD_TABLE(WebAssemblyModuleConstructor) };
/* Source for WebAssemblyModuleConstructor.lut.h
@begin constructorTableWebAssemblyModule
customSections webAssemblyModuleCustomSections DontEnum|Function 2
imports webAssemblyModuleImports DontEnum|Function 1
exports webAssemblyModuleExports DontEnum|Function 1
@end
*/
EncodedJSValue JSC_HOST_CALL webAssemblyModuleCustomSections(ExecState* exec)
{
VM& vm = exec->vm();
auto* globalObject = exec->lexicalGlobalObject();
auto throwScope = DECLARE_THROW_SCOPE(vm);
JSWebAssemblyModule* module = jsDynamicCast<JSWebAssemblyModule*>(vm, exec->argument(0));
if (!module)
return JSValue::encode(throwException(exec, throwScope, createTypeError(exec, ASCIILiteral("WebAssembly.Module.customSections called with non WebAssembly.Module argument"))));
const String sectionNameString = exec->argument(1).getString(exec);
RETURN_IF_EXCEPTION(throwScope, { });
JSArray* result = constructEmptyArray(exec, nullptr, globalObject);
RETURN_IF_EXCEPTION(throwScope, { });
const auto& customSections = module->moduleInformation().customSections;
for (const Wasm::CustomSection& section : customSections) {
if (String::fromUTF8(section.name) == sectionNameString) {
auto buffer = ArrayBuffer::tryCreate(section.payload.data(), section.payload.size());
if (!buffer)
return JSValue::encode(throwException(exec, throwScope, createOutOfMemoryError(exec)));
result->push(exec, JSArrayBuffer::create(vm, globalObject->m_arrayBufferStructure.get(), WTFMove(buffer)));
RETURN_IF_EXCEPTION(throwScope, { });
}
}
return JSValue::encode(result);
}
EncodedJSValue JSC_HOST_CALL webAssemblyModuleImports(ExecState* exec)
{
VM& vm = exec->vm();
auto* globalObject = exec->lexicalGlobalObject();
auto throwScope = DECLARE_THROW_SCOPE(vm);
JSWebAssemblyModule* module = jsDynamicCast<JSWebAssemblyModule*>(vm, exec->argument(0));
if (!module)
return JSValue::encode(throwException(exec, throwScope, createTypeError(exec, ASCIILiteral("WebAssembly.Module.imports called with non WebAssembly.Module argument"))));
JSArray* result = constructEmptyArray(exec, nullptr, globalObject);
RETURN_IF_EXCEPTION(throwScope, { });
const auto& imports = module->moduleInformation().imports;
if (imports.size()) {
Identifier module = Identifier::fromString(exec, "module");
Identifier name = Identifier::fromString(exec, "name");
Identifier kind = Identifier::fromString(exec, "kind");
for (const Wasm::Import& imp : imports) {
JSObject* obj = constructEmptyObject(exec);
RETURN_IF_EXCEPTION(throwScope, { });
obj->putDirect(vm, module, jsString(exec, String::fromUTF8(imp.module)));
obj->putDirect(vm, name, jsString(exec, String::fromUTF8(imp.field)));
obj->putDirect(vm, kind, jsString(exec, String(makeString(imp.kind))));
result->push(exec, obj);
RETURN_IF_EXCEPTION(throwScope, { });
}
}
return JSValue::encode(result);
}
EncodedJSValue JSC_HOST_CALL webAssemblyModuleExports(ExecState* exec)
{
VM& vm = exec->vm();
auto* globalObject = exec->lexicalGlobalObject();
auto throwScope = DECLARE_THROW_SCOPE(vm);
JSWebAssemblyModule* module = jsDynamicCast<JSWebAssemblyModule*>(vm, exec->argument(0));
if (!module)
return JSValue::encode(throwException(exec, throwScope, createTypeError(exec, ASCIILiteral("WebAssembly.Module.exports called with non WebAssembly.Module argument"))));
JSArray* result = constructEmptyArray(exec, nullptr, globalObject);
RETURN_IF_EXCEPTION(throwScope, { });
const auto& exports = module->moduleInformation().exports;
if (exports.size()) {
Identifier name = Identifier::fromString(exec, "name");
Identifier kind = Identifier::fromString(exec, "kind");
for (const Wasm::Export& exp : exports) {
JSObject* obj = constructEmptyObject(exec);
RETURN_IF_EXCEPTION(throwScope, { });
obj->putDirect(vm, name, jsString(exec, String::fromUTF8(exp.field)));
obj->putDirect(vm, kind, jsString(exec, String(makeString(exp.kind))));
result->push(exec, obj);
RETURN_IF_EXCEPTION(throwScope, { });
}
}
return JSValue::encode(result);
}
static EncodedJSValue JSC_HOST_CALL constructJSWebAssemblyModule(ExecState* exec)
{
VM& vm = exec->vm();
auto throwScope = DECLARE_THROW_SCOPE(vm);
auto* structure = InternalFunction::createSubclassStructure(exec, exec->newTarget(), exec->lexicalGlobalObject()->WebAssemblyModuleStructure());
RETURN_IF_EXCEPTION(throwScope, encodedJSValue());
throwScope.release();
return JSValue::encode(WebAssemblyModuleConstructor::createModule(exec, exec->argument(0), structure));
}
static EncodedJSValue JSC_HOST_CALL callJSWebAssemblyModule(ExecState* exec)
{
VM& vm = exec->vm();
auto scope = DECLARE_THROW_SCOPE(vm);
return JSValue::encode(throwConstructorCannotBeCalledAsFunctionTypeError(exec, scope, "WebAssembly.Module"));
}
JSValue WebAssemblyModuleConstructor::createModule(ExecState* exec, JSValue buffer, Structure* structure)
{
VM& vm = exec->vm();
auto scope = DECLARE_THROW_SCOPE(vm);
Vector<uint8_t> source = createSourceBufferFromValue(vm, exec, buffer);
RETURN_IF_EXCEPTION(scope, { });
return JSWebAssemblyModule::createStub(vm, exec, structure, Wasm::Module::validateSync(vm, WTFMove(source)));
}
WebAssemblyModuleConstructor* WebAssemblyModuleConstructor::create(VM& vm, Structure* structure, WebAssemblyModulePrototype* thisPrototype)
{
auto* constructor = new (NotNull, allocateCell<WebAssemblyModuleConstructor>(vm.heap)) WebAssemblyModuleConstructor(vm, structure);
constructor->finishCreation(vm, thisPrototype);
return constructor;
}
Structure* WebAssemblyModuleConstructor::createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
{
return Structure::create(vm, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), info());
}
void WebAssemblyModuleConstructor::finishCreation(VM& vm, WebAssemblyModulePrototype* prototype)
{
Base::finishCreation(vm, ASCIILiteral("Module"));
putDirectWithoutTransition(vm, vm.propertyNames->prototype, prototype, DontEnum | DontDelete | ReadOnly);
putDirectWithoutTransition(vm, vm.propertyNames->length, jsNumber(1), ReadOnly | DontEnum | DontDelete);
}
WebAssemblyModuleConstructor::WebAssemblyModuleConstructor(VM& vm, Structure* structure)
: Base(vm, structure)
{
}
ConstructType WebAssemblyModuleConstructor::getConstructData(JSCell*, ConstructData& constructData)
{
constructData.native.function = constructJSWebAssemblyModule;
return ConstructType::Host;
}
CallType WebAssemblyModuleConstructor::getCallData(JSCell*, CallData& callData)
{
callData.native.function = callJSWebAssemblyModule;
return CallType::Host;
}
void WebAssemblyModuleConstructor::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
auto* thisObject = jsCast<WebAssemblyModuleConstructor*>(cell);
ASSERT_GC_OBJECT_INHERITS(thisObject, info());
Base::visitChildren(thisObject, visitor);
}
} // namespace JSC
#endif // ENABLE(WEBASSEMBLY)
| 3,345 |
423 | <filename>Pods/Headers/Private/JSQMessagesViewController/UIView+JSQMessages.h<gh_stars>100-1000
//
// Created by <NAME>
// http://www.jessesquires.com
//
//
// Documentation
// http://cocoadocs.org/docsets/JSQMessagesViewController
//
//
// GitHub
// https://github.com/jessesquires/JSQMessagesViewController
//
//
// License
// Copyright (c) 2014 <NAME>
// Released under an MIT license: http://opensource.org/licenses/MIT
//
#import <UIKit/UIKit.h>
@interface UIView (JSQMessages)
/**
* Pins the subview of the receiver to the edge of its frame, as specified by the given attribute, by adding a layout constraint.
*
* @param subview The subview to which the receiver will be pinned.
* @param attribute The layout constraint attribute specifying one of `NSLayoutAttributeBottom`, `NSLayoutAttributeTop`, `NSLayoutAttributeLeading`, `NSLayoutAttributeTrailing`.
*/
- (void)jsq_pinSubview:(UIView *)subview toEdge:(NSLayoutAttribute)attribute;
/**
* Pins all edges of the specified subview to the receiver.
*
* @param subview The subview to which the receiver will be pinned.
*/
- (void)jsq_pinAllEdgesOfSubview:(UIView *)subview;
@end
| 377 |
349 | <reponame>AntonDyukarev/wiremock
/*
* Copyright (C) 2017-2021 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.tomakehurst.wiremock.recording;
import static com.github.tomakehurst.wiremock.matching.MockRequest.mockRequest;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import com.github.tomakehurst.wiremock.http.Request;
import com.github.tomakehurst.wiremock.matching.*;
import org.junit.jupiter.api.Test;
public class RequestBodyAutomaticPatternFactoryTest {
private static final String JSON_TEST_STRING = "{ \"foo\": 1 }";
private static final String XML_TEST_STRING = "<foo/>";
private static final String MULTIPART_TEST_STRING =
"--abc\n"
+ "Content-Disposition: form-data; name=\"test1\"\n\n"
+ "test one\n"
+ "--abc\n"
+ "Content-Disposition: form-data; name=\"texs2\"\n\n"
+ "test two\n"
+ "--abc";
@Test
public void forRequestWithTextBodyIsCaseSensitiveByDefault() {
Request request = mockRequest().body(JSON_TEST_STRING);
EqualToPattern pattern = (EqualToPattern) patternForRequest(request);
assertThat(pattern.getEqualTo(), is(JSON_TEST_STRING));
assertThat(pattern.getCaseInsensitive(), is(false));
}
@Test
public void forRequestWithTextBodyRespectsCaseInsensitiveOption() {
Request request = mockRequest().body(JSON_TEST_STRING);
RequestBodyAutomaticPatternFactory patternFactory =
new RequestBodyAutomaticPatternFactory(false, false, true);
EqualToPattern pattern = (EqualToPattern) patternFactory.forRequest(request);
assertThat(pattern.getEqualTo(), is(JSON_TEST_STRING));
assertThat(pattern.getCaseInsensitive(), is(true));
}
@Test
public void forRequestWithJsonBodyIgnoresExtraElementsAndArrayOrderByDefault() {
Request request =
mockRequest().header("Content-Type", "application/json").body(JSON_TEST_STRING);
EqualToJsonPattern pattern = (EqualToJsonPattern) patternForRequest(request);
assertThat(pattern.getEqualToJson(), is(JSON_TEST_STRING));
assertThat(pattern.isIgnoreExtraElements(), is(true));
assertThat(pattern.isIgnoreArrayOrder(), is(true));
}
@Test
public void forRequestWithJsonBodyRespectsOptions() {
RequestBodyAutomaticPatternFactory patternFactory =
new RequestBodyAutomaticPatternFactory(false, false, false);
Request request =
mockRequest().header("Content-Type", "application/json").body(JSON_TEST_STRING);
EqualToJsonPattern pattern = (EqualToJsonPattern) patternFactory.forRequest(request);
assertThat(pattern.getEqualToJson(), is(JSON_TEST_STRING));
assertThat(pattern.isIgnoreExtraElements(), is(false));
assertThat(pattern.isIgnoreArrayOrder(), is(false));
}
@Test
public void forRequestWithXmlBody() {
Request request = mockRequest().header("Content-Type", "application/xml").body(XML_TEST_STRING);
EqualToXmlPattern pattern = (EqualToXmlPattern) patternForRequest(request);
assertThat(pattern.getEqualToXml(), is(XML_TEST_STRING));
}
@Test
public void forRequestWithMultipartBody() {
Request request =
mockRequest().header("Content-Type", "multipart/form-data").body(MULTIPART_TEST_STRING);
// TODO: Update this when we add a matcher for multipart bodies
AnythingPattern pattern = (AnythingPattern) patternForRequest(request);
assertThat(pattern.toString(), is("anything"));
}
@Test
public void forRequestWithBinaryBody() {
Request request =
mockRequest().header("Content-Type", "application/octet-stream").body(new byte[] {1, 2, 3});
BinaryEqualToPattern pattern = (BinaryEqualToPattern) patternForRequest(request);
assertThat(pattern.toString(), is("binaryEqualTo AQID"));
}
private static ContentPattern<?> patternForRequest(Request request) {
return RequestBodyAutomaticPatternFactory.DEFAULTS.forRequest(request);
}
}
| 1,480 |
3,372 | /*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.codegurureviewer.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/codeguru-reviewer-2019-09-19/DescribeRecommendationFeedback"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeRecommendationFeedbackRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* The Amazon Resource Name (ARN) of the <a
* href="https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html"> <code>CodeReview</code> </a>
* object.
* </p>
*/
private String codeReviewArn;
/**
* <p>
* The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.
* </p>
*/
private String recommendationId;
/**
* <p>
* Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the user
* making the request.
* </p>
* <p>
* The <code>UserId</code> is an IAM principal that can be specified as an Amazon Web Services account ID or an
* Amazon Resource Name (ARN). For more information, see <a href=
* "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#Principal_specifying"
* > Specifying a Principal</a> in the <i>Amazon Web Services Identity and Access Management User Guide</i>.
* </p>
*/
private String userId;
/**
* <p>
* The Amazon Resource Name (ARN) of the <a
* href="https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html"> <code>CodeReview</code> </a>
* object.
* </p>
*
* @param codeReviewArn
* The Amazon Resource Name (ARN) of the <a
* href="https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html">
* <code>CodeReview</code> </a> object.
*/
public void setCodeReviewArn(String codeReviewArn) {
this.codeReviewArn = codeReviewArn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the <a
* href="https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html"> <code>CodeReview</code> </a>
* object.
* </p>
*
* @return The Amazon Resource Name (ARN) of the <a
* href="https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html">
* <code>CodeReview</code> </a> object.
*/
public String getCodeReviewArn() {
return this.codeReviewArn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the <a
* href="https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html"> <code>CodeReview</code> </a>
* object.
* </p>
*
* @param codeReviewArn
* The Amazon Resource Name (ARN) of the <a
* href="https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html">
* <code>CodeReview</code> </a> object.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeRecommendationFeedbackRequest withCodeReviewArn(String codeReviewArn) {
setCodeReviewArn(codeReviewArn);
return this;
}
/**
* <p>
* The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.
* </p>
*
* @param recommendationId
* The recommendation ID that can be used to track the provided recommendations and then to collect the
* feedback.
*/
public void setRecommendationId(String recommendationId) {
this.recommendationId = recommendationId;
}
/**
* <p>
* The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.
* </p>
*
* @return The recommendation ID that can be used to track the provided recommendations and then to collect the
* feedback.
*/
public String getRecommendationId() {
return this.recommendationId;
}
/**
* <p>
* The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.
* </p>
*
* @param recommendationId
* The recommendation ID that can be used to track the provided recommendations and then to collect the
* feedback.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeRecommendationFeedbackRequest withRecommendationId(String recommendationId) {
setRecommendationId(recommendationId);
return this;
}
/**
* <p>
* Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the user
* making the request.
* </p>
* <p>
* The <code>UserId</code> is an IAM principal that can be specified as an Amazon Web Services account ID or an
* Amazon Resource Name (ARN). For more information, see <a href=
* "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#Principal_specifying"
* > Specifying a Principal</a> in the <i>Amazon Web Services Identity and Access Management User Guide</i>.
* </p>
*
* @param userId
* Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the
* user making the request. </p>
* <p>
* The <code>UserId</code> is an IAM principal that can be specified as an Amazon Web Services account ID or
* an Amazon Resource Name (ARN). For more information, see <a href=
* "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#Principal_specifying"
* > Specifying a Principal</a> in the <i>Amazon Web Services Identity and Access Management User Guide</i>.
*/
public void setUserId(String userId) {
this.userId = userId;
}
/**
* <p>
* Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the user
* making the request.
* </p>
* <p>
* The <code>UserId</code> is an IAM principal that can be specified as an Amazon Web Services account ID or an
* Amazon Resource Name (ARN). For more information, see <a href=
* "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#Principal_specifying"
* > Specifying a Principal</a> in the <i>Amazon Web Services Identity and Access Management User Guide</i>.
* </p>
*
* @return Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the
* user making the request. </p>
* <p>
* The <code>UserId</code> is an IAM principal that can be specified as an Amazon Web Services account ID or
* an Amazon Resource Name (ARN). For more information, see <a href=
* "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#Principal_specifying"
* > Specifying a Principal</a> in the <i>Amazon Web Services Identity and Access Management User Guide</i>.
*/
public String getUserId() {
return this.userId;
}
/**
* <p>
* Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the user
* making the request.
* </p>
* <p>
* The <code>UserId</code> is an IAM principal that can be specified as an Amazon Web Services account ID or an
* Amazon Resource Name (ARN). For more information, see <a href=
* "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#Principal_specifying"
* > Specifying a Principal</a> in the <i>Amazon Web Services Identity and Access Management User Guide</i>.
* </p>
*
* @param userId
* Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the
* user making the request. </p>
* <p>
* The <code>UserId</code> is an IAM principal that can be specified as an Amazon Web Services account ID or
* an Amazon Resource Name (ARN). For more information, see <a href=
* "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#Principal_specifying"
* > Specifying a Principal</a> in the <i>Amazon Web Services Identity and Access Management User Guide</i>.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DescribeRecommendationFeedbackRequest withUserId(String userId) {
setUserId(userId);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getCodeReviewArn() != null)
sb.append("CodeReviewArn: ").append(getCodeReviewArn()).append(",");
if (getRecommendationId() != null)
sb.append("RecommendationId: ").append(getRecommendationId()).append(",");
if (getUserId() != null)
sb.append("UserId: ").append(getUserId());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DescribeRecommendationFeedbackRequest == false)
return false;
DescribeRecommendationFeedbackRequest other = (DescribeRecommendationFeedbackRequest) obj;
if (other.getCodeReviewArn() == null ^ this.getCodeReviewArn() == null)
return false;
if (other.getCodeReviewArn() != null && other.getCodeReviewArn().equals(this.getCodeReviewArn()) == false)
return false;
if (other.getRecommendationId() == null ^ this.getRecommendationId() == null)
return false;
if (other.getRecommendationId() != null && other.getRecommendationId().equals(this.getRecommendationId()) == false)
return false;
if (other.getUserId() == null ^ this.getUserId() == null)
return false;
if (other.getUserId() != null && other.getUserId().equals(this.getUserId()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getCodeReviewArn() == null) ? 0 : getCodeReviewArn().hashCode());
hashCode = prime * hashCode + ((getRecommendationId() == null) ? 0 : getRecommendationId().hashCode());
hashCode = prime * hashCode + ((getUserId() == null) ? 0 : getUserId().hashCode());
return hashCode;
}
@Override
public DescribeRecommendationFeedbackRequest clone() {
return (DescribeRecommendationFeedbackRequest) super.clone();
}
}
| 4,539 |
342 | /**
* Copyright 2014 Cloudera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kitesdk.minicluster;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.net.DNS;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An HBase minicluster service implementation.
*/
public class HBaseService implements Service {
private static final Logger logger = LoggerFactory
.getLogger(HBaseService.class);
/**
* Service registration for MiniCluster factory
*/
static {
MiniCluster.registerService(HBaseService.class);
}
/**
* Service configuration keys
*/
public static final String MASTER_PORT_KEY = "hbase-master-port";
public static final String REGIONSERVER_PORT_KEY = "hbase-regionserver-port";
/**
* The name of the HBase meta table, which we need to successfully scan before
* considering the cluster launched.
*/
private static final String HBASE_META_TABLE = "hbase:meta";
/**
* Configuration settings
*/
private Configuration hadoopConf;
private int zookeeperClientPort = 2828;
private String bindIP = "127.0.0.1";
private int masterPort = 60000;
private int regionserverPort = 60020;
/**
* Embedded HBase cluster
*/
private MiniHBaseCluster hbaseCluster;
public HBaseService() {
}
@Override
public void configure(ServiceConfig serviceConfig) {
if (serviceConfig.contains(MiniCluster.BIND_IP_KEY)) {
bindIP = serviceConfig.get(MiniCluster.BIND_IP_KEY);
}
if (serviceConfig.contains(MiniCluster.ZK_PORT_KEY)) {
zookeeperClientPort = Integer.parseInt(serviceConfig
.get(MiniCluster.ZK_PORT_KEY));
}
if (serviceConfig.contains(MASTER_PORT_KEY)) {
masterPort = Integer.parseInt(serviceConfig.get(MASTER_PORT_KEY));
}
if (serviceConfig.contains(REGIONSERVER_PORT_KEY)) {
masterPort = Integer.parseInt(serviceConfig.get(REGIONSERVER_PORT_KEY));
}
hadoopConf = serviceConfig.getHadoopConf();
}
@Override
public Configuration getHadoopConf() {
return hadoopConf;
}
@Override
public void start() throws IOException, InterruptedException {
Preconditions.checkState(hadoopConf != null,
"Hadoop Configuration must be set before starting mini HBase cluster");
Preconditions.checkState(zookeeperClientPort != 0,
"The zookeeper client port must be configured to a non zero value");
// We first start an empty HBase cluster before fully configuring it
hbaseCluster = new MiniHBaseCluster(hadoopConf, 0, 0, null, null);
// Configure the cluster, and start a master and regionserver.
hadoopConf = configureHBaseCluster(hbaseCluster.getConf(),
zookeeperClientPort, FileSystem.get(hadoopConf), bindIP, masterPort,
regionserverPort);
hbaseCluster.startMaster();
hbaseCluster.startRegionServer();
waitForHBaseToComeOnline(hbaseCluster);
logger.info("HBase Minicluster Service Started.");
}
@Override
public void stop() throws IOException {
if (hbaseCluster != null) {
hbaseCluster.shutdown();
this.hbaseCluster.killAll();
this.hbaseCluster.waitUntilShutDown();
logger.info("HBase Minicluster Service Shut Down.");
this.hbaseCluster = null;
}
}
@Override
public List<Class<? extends Service>> dependencies() {
List<Class<? extends Service>> services = new ArrayList<Class<? extends Service>>();
services.add(HdfsService.class);
services.add(ZookeeperService.class);
return services;
}
/**
* Configure the HBase cluster before launching it
*
* @param config
* already created Hadoop configuration we'll further configure for
* HDFS
* @param zkClientPort
* The client port zookeeper is listening on
* @param hdfsFs
* The HDFS FileSystem this HBase cluster will run on top of
* @param bindIP
* The IP Address to force bind all sockets on. If null, will use
* defaults
* @param masterPort
* The port the master listens on
* @param regionserverPort
* The port the regionserver listens on
* @return The updated Configuration object.
* @throws IOException
*/
private static Configuration configureHBaseCluster(Configuration config,
int zkClientPort, FileSystem hdfsFs, String bindIP, int masterPort,
int regionserverPort) throws IOException {
// Configure the zookeeper port
config
.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(zkClientPort));
// Initialize HDFS path configurations required by HBase
Path hbaseDir = new Path(hdfsFs.makeQualified(hdfsFs.getHomeDirectory()),
"hbase");
FSUtils.setRootDir(config, hbaseDir);
hdfsFs.mkdirs(hbaseDir);
config.set("fs.defaultFS", hdfsFs.getUri().toString());
config.set("fs.default.name", hdfsFs.getUri().toString());
FSUtils.setVersion(hdfsFs, hbaseDir);
// Configure the bind addresses and ports. If running in Openshift, we only
// have permission to bind to the private IP address, accessible through an
// environment variable.
logger.info("HBase force binding to ip: " + bindIP);
config.set("hbase.master.ipc.address", bindIP);
config.set(HConstants.MASTER_PORT, Integer.toString(masterPort));
config.set("hbase.regionserver.ipc.address", bindIP);
config
.set(HConstants.REGIONSERVER_PORT, Integer.toString(regionserverPort));
config.set(HConstants.ZOOKEEPER_QUORUM, bindIP);
// By default, the HBase master and regionservers will report to zookeeper
// that its hostname is what it determines by reverse DNS lookup, and not
// what we use as the bind address. This means when we set the bind
// address, daemons won't actually be able to connect to eachother if they
// are different. Here, we do something that's illegal in 48 states - use
// reflection to override a private static final field in the DNS class
// that is a cachedHostname. This way, we are forcing the hostname that
// reverse dns finds. This may not be compatible with newer versions of
// Hadoop.
try {
Field cachedHostname = DNS.class.getDeclaredField("cachedHostname");
cachedHostname.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(cachedHostname, cachedHostname.getModifiers()
& ~Modifier.FINAL);
cachedHostname.set(null, bindIP);
} catch (Exception e) {
// Reflection can throw so many checked exceptions. Let's wrap in an
// IOException.
throw new IOException(e);
}
// By setting the info ports to -1 for, we won't launch the master or
// regionserver info web interfaces
config.set(HConstants.MASTER_INFO_PORT, "-1");
config.set(HConstants.REGIONSERVER_INFO_PORT, "-1");
return config;
}
/**
* Wait for the hbase cluster to start up and come online, and then return.
*
* @param hbaseCluster
* The hbase cluster to wait for.
* @throws IOException
*/
private static void waitForHBaseToComeOnline(MiniHBaseCluster hbaseCluster)
throws IOException, InterruptedException {
// Wait for the master to be initialized. This is required because even
// before it's initialized, the regionserver can come online and the meta
// table can be scannable. If the cluster is quickly shut down after all of
// this before the master is initialized, it can cause the shutdown to hang
// indefinitely as initialization tasks will block forever.
//
// Unfortunately, no method available to wait for master to come online like
// regionservers, so we use a while loop with a sleep so we don't hammer the
// isInitialized method.
while (!hbaseCluster.getMaster().isInitialized()) {
Thread.sleep(1000);
}
// Now wait for the regionserver to come online.
hbaseCluster.getRegionServer(0).waitForServerOnline();
// Don't leave here till we've done a successful scan of the hbase:meta
// This validates that not only is the regionserver up, but that the
// meta region is online so there are no race conditions where operations
// requiring the meta region might run before it's available. Otherwise,
// operations are susceptible to region not online errors.
HTable t = new HTable(hbaseCluster.getConf(), HBASE_META_TABLE);
ResultScanner s = t.getScanner(new Scan());
while (s.next() != null) {
continue;
}
s.close();
t.close();
}
}
| 3,245 |
2,151 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "headless/public/util/deterministic_http_protocol_handler.h"
#include <memory>
#include "base/macros.h"
#include "headless/public/headless_browser_context.h"
#include "headless/public/util/deterministic_dispatcher.h"
#include "headless/public/util/generic_url_request_job.h"
#include "headless/public/util/http_url_fetcher.h"
#include "net/http/http_response_headers.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_job_factory_impl.h"
namespace headless {
class DeterministicHttpProtocolHandler::NopGenericURLRequestJobDelegate
: public GenericURLRequestJob::Delegate {
public:
NopGenericURLRequestJobDelegate() = default;
~NopGenericURLRequestJobDelegate() override = default;
void OnResourceLoadFailed(const Request* request, net::Error error) override {
}
void OnResourceLoadComplete(
const Request* request,
const GURL& final_url,
scoped_refptr<net::HttpResponseHeaders> response_headers,
const char* body,
size_t body_size) override {}
private:
DISALLOW_COPY_AND_ASSIGN(NopGenericURLRequestJobDelegate);
};
DeterministicHttpProtocolHandler::DeterministicHttpProtocolHandler(
DeterministicDispatcher* deterministic_dispatcher,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner)
: deterministic_dispatcher_(deterministic_dispatcher),
headless_browser_context_(nullptr),
io_task_runner_(io_task_runner),
nop_delegate_(new NopGenericURLRequestJobDelegate()) {}
DeterministicHttpProtocolHandler::~DeterministicHttpProtocolHandler() {
if (url_request_context_)
io_task_runner_->DeleteSoon(FROM_HERE, url_request_context_.release());
if (url_request_job_factory_)
io_task_runner_->DeleteSoon(FROM_HERE, url_request_job_factory_.release());
}
net::URLRequestJob* DeterministicHttpProtocolHandler::MaybeCreateJob(
net::URLRequest* request,
net::NetworkDelegate* network_delegate) const {
if (!url_request_context_) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
// Create our own URLRequestContext with an empty URLRequestJobFactoryImpl
// which lets us use the default http(s) RequestJobs.
url_request_context_.reset(new net::URLRequestContext());
url_request_context_->CopyFrom(request->context());
url_request_job_factory_.reset(new net::URLRequestJobFactoryImpl());
url_request_context_->set_job_factory(url_request_job_factory_.get());
}
return new GenericURLRequestJob(
request, network_delegate, deterministic_dispatcher_,
std::make_unique<HttpURLFetcher>(url_request_context_.get()),
nop_delegate_.get(), headless_browser_context_);
}
} // namespace headless
| 969 |
549 | <reponame>D-J-Roberts/xsimd
/***************************************************************************
* Copyright (c) <NAME>, <NAME>, <NAME> and *
* <NAME> *
* Copyright (c) QuantStack *
* Copyright (c) <NAME> *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef XSIMD_FMA3_SSE_REGISTER_HPP
#define XSIMD_FMA3_SSE_REGISTER_HPP
#include "./xsimd_sse4_2_register.hpp"
namespace xsimd
{
template <typename arch>
struct fma3;
/**
* @ingroup arch
*
* SSE4.2 + FMA instructions
*/
template <>
struct fma3<sse4_2> : sse4_2
{
static constexpr bool supported() noexcept { return XSIMD_WITH_FMA3_SSE; }
static constexpr bool available() noexcept { return true; }
static constexpr unsigned version() noexcept { return generic::version(1, 4, 3); }
static constexpr char const* name() noexcept { return "fma3+sse4.2"; }
};
#if XSIMD_WITH_FMA3_SSE
namespace types
{
XSIMD_DECLARE_SIMD_REGISTER_ALIAS(fma3<sse4_2>, sse4_2);
}
#endif
}
#endif
| 802 |
452 | <reponame>the-real-mrcs/firebase-admin-java<filename>src/main/java/com/google/firebase/database/snapshot/Index.java
/*
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.firebase.database.snapshot;
import com.google.firebase.database.core.Path;
import java.util.Comparator;
public abstract class Index implements Comparator<NamedNode> {
public static Index fromQueryDefinition(String str) {
if (str.equals(".value")) {
return ValueIndex.getInstance();
} else if (str.equals(".key")) {
return KeyIndex.getInstance();
} else if (str.equals(".priority")) {
throw new IllegalStateException(
"queryDefinition shouldn't ever be .priority since it's the default");
} else {
return new PathIndex(new Path(str));
}
}
public abstract boolean isDefinedOn(Node a);
public boolean indexedValueChanged(Node oldNode, Node newNode) {
NamedNode oldWrapped = new NamedNode(ChildKey.getMinName(), oldNode);
NamedNode newWrapped = new NamedNode(ChildKey.getMinName(), newNode);
return this.compare(oldWrapped, newWrapped) != 0;
}
public abstract NamedNode makePost(ChildKey name, Node value);
public NamedNode minPost() {
return NamedNode.getMinNode();
}
public abstract NamedNode maxPost();
public abstract String getQueryDefinition();
public int compare(NamedNode one, NamedNode two, boolean reverse) {
if (reverse) {
return this.compare(two, one);
} else {
return this.compare(one, two);
}
}
}
| 646 |
379 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from paddle_fl.paddle_fl.core.scheduler.agent_master import FLScheduler
def parse_args():
parser = argparse.ArgumentParser(description="scheduler")
parser.add_argument(
'--trainer_num',
type=int,
default=2,
help='number trainers(default: 2)')
return parser.parse_args()
args = parse_args()
num_trainer = args.trainer_num
worker_num = num_trainer
server_num = 1
# Define the number of worker/server and the port for scheduler
scheduler = FLScheduler(worker_num, server_num, port=9091)
scheduler.set_sample_worker_num(worker_num)
scheduler.init_env()
print("init env done.")
scheduler.start_fl_training()
| 411 |
2,591 | package liquibase.changelog.filter;
import liquibase.changelog.ChangeSet;
public interface ChangeSetFilter {
public ChangeSetFilterResult accepts(ChangeSet changeSet);
}
| 52 |
1,178 | <reponame>leozz37/makani<gh_stars>1000+
/*
* Copyright 2020 Makani Technologies LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef AVIONICS_MOTOR_FIRMWARE_ERRORS_H_
#define AVIONICS_MOTOR_FIRMWARE_ERRORS_H_
#include <stdbool.h>
#include "avionics/motor/firmware/flags.h"
#define MOTOR_ERRORS_CRITICAL (kMotorErrorTimeout \
| kMotorErrorBadMode \
| kMotorErrorBadCommand \
| kMotorErrorOverCurrentIaP \
| kMotorErrorOverCurrentIaN \
| kMotorErrorOverCurrentIbP \
| kMotorErrorOverCurrentIbN \
| kMotorErrorOverCurrentIcP \
| kMotorErrorOverCurrentIcN \
| kMotorErrorOverCurrentIBusP \
| kMotorErrorOverCurrentIBusN \
| kMotorErrorOverVoltage \
| kMotorErrorUnderVoltage \
| kMotorErrorFaultCurrentIaP \
| kMotorErrorFaultCurrentIaN \
| kMotorErrorFaultCurrentIbP \
| kMotorErrorFaultCurrentIbN \
| kMotorErrorFaultCurrentIcP \
| kMotorErrorFaultCurrentIcN)
#define MOTOR_ERRORS_NON_CRITICAL \
(kMotorErrorAll ^ MOTOR_ERRORS_CRITICAL)
typedef struct {
// This should really be a bool, but needs to be compatible with the address
// list in io.c.
float shutdown_on_warning_enable;
} ErrorConfig;
static inline bool IsNonCriticalError(uint32_t errors) {
return (bool)(errors & MOTOR_ERRORS_NON_CRITICAL);
}
static inline bool IsCriticalError(uint32_t errors) {
return (bool)(errors & MOTOR_ERRORS_CRITICAL);
}
static inline bool IsWarning(uint32_t warning) {
return (bool)(warning & kMotorWarningAll);
}
extern ErrorConfig g_error_config;
static inline bool ShutDownOnWarning(void) {
return g_error_config.shutdown_on_warning_enable > 0.5f;
}
#endif // AVIONICS_MOTOR_FIRMWARE_ERRORS_H_
| 1,386 |
4,639 | from django import http
from django.test import TestCase
from oscar.core.loading import get_class
CustomerHistoryManager = get_class('customer.history', 'CustomerHistoryManager')
class TestProductHistory(TestCase):
def setUp(self):
self.request = http.HttpRequest()
self.response = http.HttpResponse()
def test_starts_with_empty_list(self):
products = CustomerHistoryManager.get(self.request)
self.assertEqual([], products)
| 157 |
639 | <filename>src/nnfusion/core/operators/generic_op/generic_op_define/All.cpp
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include "nnfusion/core/operators/generic_op/generic_op.hpp"
REGISTER_OP(All)
.attr<int>("axis", -1)
.attr<bool>("keep_dims", false)
.infershape([](std::shared_ptr<graph::GNode> gnode) -> void {
NNFUSION_CHECK(1 == gnode->get_input_size());
auto& shape_0 = gnode->get_input_shape(0);
auto generic_op = std::dynamic_pointer_cast<nnfusion::op::GenericOp>(gnode->get_op_ptr());
bool keep_dims = generic_op->localOpConfig.getRoot()["keep_dims"];
int axis = generic_op->localOpConfig.getRoot()["axis"];
if (axis == -1)
{
axis = shape_0.size() - 1;
}
nnfusion::Shape output_shape_0;
for (int i = 0; i < axis; ++i)
output_shape_0.push_back(shape_0[i]);
if (keep_dims)
output_shape_0.push_back(1);
for (int i = axis + 1; i < shape_0.size(); ++i)
output_shape_0.push_back(shape_0[i]);
gnode->set_output_type_and_shape(0, gnode->get_input_element_type(0), output_shape_0);
});
| 556 |
348 | {"nom":"Serques","circ":"8ème circonscription","dpt":"Pas-de-Calais","inscrits":959,"abs":499,"votants":460,"blancs":46,"nuls":18,"exp":396,"res":[{"nuance":"REM","nom":"<NAME>","voix":237},{"nuance":"FN","nom":"<NAME>","voix":159}]} | 94 |
826 |
#define SYNC_STREAM cudaStreamDefault
void setsizes(long long N, dim3 *gridp, int *nthreadsp);
void setsizesLean(long long N, dim3 *gridp, int *nthreadsp);
int apply_links(float *A, int *L, float *C, int nrows, int ncols);
int apply_preds(float *A, int *L, float *C, int nrows, int ncols);
int apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols);
int apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols);
int apply_dlinks(double *A, int *L, double *C, int nrows, int ncols);
int apply_dpreds(double *A, int *L, double *C, int nrows, int ncols);
int apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols);
int apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols);
int veccmp(int *A, int *B, int *C);
int hammingdists(int *a, int *b, int *w, int *op, int *ow, int n);
int LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps);
int LDA_GibbsBino(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *Cv, float *P, int nsamps);
int LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k);
int LDA_Gibbsv(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float* nsamps);
int treePack(float *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed);
int treePackfc(float *fdata, int *treenodes, float *fcats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed);
int treePackInt(int *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed);
int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps, int impType);
int treeWalk(float *fdata, int *inodes, float *fnodes, int *itrees, int *ftrees, int *vtrees, float *ctrees,
int nrows, int ncols, int ntrees, int nnodes, int getcat, int nbits, int nlevels);
int floatToInt(int n, float *in, int *out, int nbits);
int jfeatsToIfeats(int itree, int *inodes, int *jfeats, int *ifeats, int n, int nfeats, int seed);
int findBoundaries(long long *keys, int *jc, int n, int njc, int shift);
int hashmult(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose);
int hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose);
int multinomial(int nrows, int ncols, float *A, int *B, float *Norm, int nvals);
int multinomial2(int nrows, int ncols, float *A, int *B, int nvals);
int multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM, float *Sumsq,
float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen,
float istep, int addgrad, float epsilon, int biasv, int nbr);
int multADAGradTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM,
float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen,
float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr);
int multGradTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM,
float *Mask, int maskrows, float *lrate, int lrlen, float limit, int biasv, int nbr);
int hashmultADAGrad(int nrows, int nfeats, int ncols, int bound1, int bound2, float *A, float *Bdata, int *Bir, int *Bjc, int transpose,
float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon);
int word2vecPos(int nrows, int ncols, int shift, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp);
int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp);
int word2vecNegFilt(int nrows, int ncols, int nwords, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp);
int word2vecEvalPos(int nrows, int ncols, int shift, int *W, int *LB, int *UB, float *A, float *B, float *Retval);
int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval);
int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C);
int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate);
int apply_fwd(float *A, float *B, int ifn, int n);
int apply_deriv(float *A, float *B, float *C, int ifn, int n);
int ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve,
float *ts, int nts, float *lrate, int nlrate, float langevin, float eps, int doupdate);
int ADAGradm(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr,
float nw, float *ve, int nve, float *ts, int nts, float *lrate, int nlrate, float langevin, float eps, int doupdate);
int ADAGradn(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr,
float nw, float *ve, int nve, float *ts, int nts, float *lrate, int nlrate, float langevin, float eps, int doupdate);
int lstm_fwd(float *inC, float *LIN1, float *LIN2, float *LIN3, float *LIN4, float *outC, float *outH, int n);
int lstm_bwd(float *inC, float *LIN1, float *LIN2, float *LIN3, float *LIN4, float *doutC, float *doutH,
float *dinC, float *dLIN1, float *dLIN2, float *dLIN3, float *dLIN4, int n);
int pairembed(int *r1, int *r2, long long *res, int n);
int pairMultTile(int nrows, int ncols, int bound1, int bound2, float *A, int lda, float *A2, int lda2,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, float *C, int ldc, int transpose);
int pairMultADAGradTile(int nrows, int ncols, int bound1, int bound2, float *A, int lda, int aroff, int acoff,
float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, int transpose,
float *MM, int ldmm, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon);
int linComb(float *X, float wx, float *Y, float wy, float *Z, int len);
| 3,035 |
956 | <filename>scripts/automation/trex_control_plane/interactive/trex/wireless/trex_wireless_client.py
import threading
from enum import Enum
from scapy.contrib.capwap import *
from trex_openssl import *
from .trex_wireless_ap import *
from .trex_wireless_device import WirelessDevice
from .services.trex_stl_ap import *
from trex.utils.parsing_opts import check_mac_addr, check_ipv4_addr
from trex.stl.trex_stl_packet_builder_scapy import is_valid_ipv4_ret
class APClient(WirelessDevice):
"""A Client attached to an AP, as seen by a WirelessWorker."""
wlc_assoc_timeout = 30
def __init__(self, worker, mac, ip, ap, gateway_ip=None, client_info=None):
"""Create a Wireless Client attached to an AP.
Args:
worker: attached worker
mac: mac address for the client, in str format
ip: ip address for the client, in str format, or None in case of DHCP
ap: attached AP
gateway_ip: ip of the gateway, optional if e.g. DHCP, in byte format
client_info (ClientInfo): original ClientInfo
"""
name = "Client_{}".format(mac)
super().__init__(worker, name, mac, gateway_ip, client_info)
self.mac_bytes = mac2str(mac)
self.mac = mac
if not ip:
self.ip = None
self.ip_bytes = None
self.dhcp = True # flag is set to True if DHCP is needed to get an IP address
else:
self.dhcp = False
check_ipv4_addr(ip)
self.ip_bytes = is_valid_ipv4_ret(ip)
self.ip = ip
check_mac_addr(self.mac)
# assert isinstance(ap, AP)
self.ap = ap
self.reset()
self.retries = 0
self.state = ClientState.AUTHENTICATION
self.gateway_ip = None
# event awaken when the client is associated
self.joined_event = threading.Event()
@property
def attached_devices_macs(self):
return [self.mac, self.ap.mac]
@property
def is_closed(self):
return self.state >= ClientState.CLOSE
def is_running(self):
return self.state == ClientState.RUN
def reset(self):
self.got_disconnect = False
self.is_associated = False
self.is_connected = False
self.connect_time = None # time at which the client receives the association repsonse
self.seen_arp_reply = False
self.state = ClientState.ASSOCIATION
if self.dhcp:
self.ip = None
self.ip_bytes = None
def disconnect(self):
self.logger.debug('disconnected')
self.reset()
self.got_disconnect = True
try:
client.got_disconnected_event.succeed()
except (RuntimeError, AttributeError):
# already triggered or not waiting for this packet
pass
def __str__(self):
return "Client {} - {}".format(self.ip, self.mac)
| 1,270 |
877 | <filename>checker/tests/resourceleak/CommonModuleCrash.java
import java.net.*;
class CommonModuleCrash {
Socket bar = new Socket();
static void baz(Socket s) {}
static {
baz(new Socket());
}
}
| 70 |
669 | <filename>onnxruntime/test/contrib_ops/bifurcation_detector_op_test.cc
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "gtest/gtest.h"
#include "test/common/tensor_op_test_utils.h"
#include "test/common/cuda_op_test_utils.h"
#include "test/providers/provider_test_utils.h"
namespace onnxruntime {
namespace test {
TEST(BifurcationDetectorTest, Test1) {
OpTester tester("BifurcationDetector", 1, onnxruntime::kMSDomain);
tester.AddInput<int64_t>("src_tokens", {4}, {1, 5, 3, 4});
tester.AddInput<int64_t>("cur_tokens", {1}, {2});
tester.AddInput<int64_t>("prev_suffix_match_idx", {}, {0});
tester.AddInput<int64_t>("pred_tokens", {5}, {1, 5, 3, 4, 2});
tester.AddOutput<int64_t>("tokens", {6}, {2, 1, 5, 3, 4, 2});
tester.AddOutput<int64_t>("suffix_match_idx", {}, {-1});
std::vector<std::unique_ptr<IExecutionProvider>> execution_providers;
execution_providers.push_back(DefaultCpuExecutionProvider());
tester.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers);
}
TEST(BifurcationDetectorTest, Test2) {
OpTester tester("BifurcationDetector", 1, onnxruntime::kMSDomain);
tester.AddInput<int64_t>("src_tokens", {26}, {756, 194, 39, 1015, 5529, 1216, 24, 72, 23, 1976, 6174, 1340,
6, 39, 194, 2161, 1480, 4955, 8, 7806, 65, 1091, 8, 560,
4077, 196});
tester.AddInput<int64_t>("cur_tokens", {6}, {2, 756, 194, 39, 8155, 23});
tester.AddInput<int64_t>("find_end_idx", {}, {0});
tester.AddOutput<int64_t>("tokens", {6}, {2, 756, 194, 39, 8155, 23});
tester.AddOutput<int64_t>("new_end_idx", {}, {9});
std::vector<std::unique_ptr<IExecutionProvider>> execution_providers;
execution_providers.push_back(DefaultCpuExecutionProvider());
tester.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers);
}
} // namespace test
} // namespace onnxruntime
| 836 |
3,457 | <filename>sandbox/jacobi/poisson.hpp
#ifndef KASTORS_POISSON_H
#define KASTORS_POISSON_H
#include <cassert>
#include <string>
/******************************************************************************/
/*
Purpose:
SWEEP carries out one step of the Jacobi iteration.
Discussion:
Assuming DX = DY, we can approximate
- (d/dx d/dx + d/dy d/dy) U(X,Y)
by
(U(i-1,j) + U(i+1,j) + U(i,j-1) + U(i,j+1) - 4*U(i,j)) / dx / dy
The discretization employed below will not be correct in the general
case where DX and DY are not equal. It's only a little more complicated
to allow DX and DY to be different, but we're not going to worry about
that right now.
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
14 December 2011
Author:
<NAME>
Parameters:
Input, int NX, NY, the X and Y grid dimensions.
Input, double DX, DY, the spacing between grid points.
Input, double F[NX][NY], the right hand side data.
Input, int ITOLD, the iteration index on input.
Input, int ITNEW, the desired iteration index
on output.
Input, double U[NX][NY], the solution estimate on
iteration ITNEW-1.
Input/output, double UNEW[NX][NY], on input, the solution
estimate on iteration ITOLD. On output, the solution estimate on
iteration ITNEW.
*/
void sweep(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew, int block_size);
void sweep_seq(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew);
struct user_parameters {
int check;
int succeed;
int niter;
int titer;
int matrix_size;
int blocksize;
};
double run(struct user_parameters* params, unsigned num_threads, std::string model);
//////////////////////////////////////////////////////////////////////////
void omp_block_for(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew, int block_size);
void omp_block_task(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew, int block_size);
void omp_block_task_dep(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew, int block_size);
void omp_task(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew, int block_size);
void omp_task_dep(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew, int block_size);
//////////////////////////////////////////////////////////////////////////
void taskflow(int nx, int ny, double dx, double dy, double *f,
int itold, int itnew, double *u, double *unew, int block_size, unsigned num_threads);
//////////////////////////////////////////////////////////////////////////
static inline void copy_block(int nx, int ny, int block_x, int block_y, double *u_, double *unew_, int block_size) {
int i, j, start_i, start_j;
double (*u)[nx][ny] = (double (*)[nx][ny])u_;
double (*unew)[nx][ny] = (double (*)[nx][ny])unew_;
start_i = block_x * block_size;
start_j = block_y * block_size;
for (i = start_i; i < start_i + block_size; i++) {
for (j = start_j; j < start_j + block_size; j++) {
assert((i < nx) && (j < ny));
(*u)[i][j] = (*unew)[i][j];
}
}
}
static inline void compute_estimate(int block_x, int block_y, double *u_,
double *unew_, double *f_, double dx,
double dy, int nx, int ny, int block_size) {
int i, j, start_i, start_j;
double (*f)[nx][ny] = (double (*)[nx][ny])f_;
double (*u)[nx][ny] = (double (*)[nx][ny])u_;
double (*unew)[nx][ny] = (double (*)[nx][ny])unew_;
start_i = block_x * block_size;
start_j = block_y * block_size;
for (i = start_i; i < start_i + block_size; i++) {
for (j = start_j; j < start_j + block_size; j++) {
if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) {
(*unew)[i][j] = (*f)[i][j];
} else {
(*unew)[i][j] = 0.25 * ((*u)[i-1][j] + (*u)[i][j+1]
+ (*u)[i][j-1] + (*u)[i+1][j]
+ (*f)[i][j] * dx * dy);
}
}
}
}
inline void sweep_seq(int nx, int ny, double dx, double dy, double *f_,
int itold, int itnew, double *u_, double *unew_)
{
int i;
int it;
int j;
double (*f)[nx][ny] = (double (*)[nx][ny])f_;
double (*u)[nx][ny] = (double (*)[nx][ny])u_;
double (*unew)[nx][ny] = (double (*)[nx][ny])unew_;
for (it = itold + 1; it <= itnew; it++) {
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
(*u)[i][j] = (*unew)[i][j];
}
}
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) {
(*unew)[i][j] = (*f)[i][j];
}
else {
(*unew)[i][j] = 0.25 * ((*u)[i-1][j] + (*u)[i][j+1]
+ (*u)[i][j-1] + (*u)[i+1][j]
+ (*f)[i][j] * dx * dy);
}
}
}
}
}
#endif
| 2,128 |
1,500 | <filename>tests/garage/envs/test_normalized_gym.py
from garage.envs import GymEnv, normalize
class TestNormalizedGym:
def setup_method(self):
self.env = normalize(GymEnv('CartPole-v1'),
normalize_reward=True,
normalize_obs=True,
flatten_obs=True)
def teardown_method(self):
self.env.close()
def test_does_not_modify_action(self):
a = self.env.action_space.sample()
a_copy = a
self.env.reset()
self.env.step(a)
assert a == a_copy
def test_flatten(self):
for _ in range(10):
self.env.reset()
self.env.visualize()
for _ in range(5):
action = self.env.action_space.sample()
es = self.env.step(action)
next_obs, done = es.observation, es.terminal
assert next_obs.shape == self.env.observation_space.low.shape
if done:
break
def test_unflatten(self):
for _ in range(10):
self.env.reset()
for _ in range(5):
action = self.env.action_space.sample()
es = self.env.step(action)
next_obs, done = es.observation, es.terminal
# yapf: disable
assert (self.env.observation_space.flatten(next_obs).shape
== self.env.observation_space.flat_dim)
# yapf: enable
if done:
break
| 867 |
31,928 | from localstack import config
from localstack.services.infra import start_moto_server
def start_rg(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_RESOURCE_GROUPS
return start_moto_server(
"resource-groups",
port,
name="Resource Groups API",
asynchronous=asynchronous,
update_listener=update_listener,
)
| 147 |
5,800 | // ArduinoJson - https://arduinojson.org
// Copyright <NAME> 2014-2021
// MIT License
#pragma once
#include <string>
struct custom_char_traits : std::char_traits<char> {};
struct custom_allocator : std::allocator<char> {};
typedef std::basic_string<char, custom_char_traits, custom_allocator>
custom_string;
| 111 |
315 | <filename>velocity-spring-boot-autoconfigure/src/main/java/com/alibaba/boot/velocity/tools/AbstractVelocityToolsScanner.java
package com.alibaba.boot.velocity.tools;
import com.alibaba.boot.velocity.util.CompatibleRelaxedPropertyResolver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.core.env.Environment;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/**
* Abstract {@link VelocityToolsScanner} implementation
*
* @author <a href="mailto:<EMAIL>">Mercy</a>
* @see VelocityToolsScanner
* @since 1.0.3
*/
public abstract class AbstractVelocityToolsScanner implements VelocityToolsScanner {
protected final Log logger = LogFactory.getLog(getClass());
private CompatibleRelaxedPropertyResolver propertyResolver;
@Override
public final Map<String, VelocityTool> scan() {
Map<String, VelocityTool> velocityToolsMap = new HashMap<String, VelocityTool>();
String[] sources = resolveSources();
scan(sources, velocityToolsMap);
if (logger.isInfoEnabled()) {
logger.info(getClass().getSimpleName() + " scanned " + velocityToolsMap.size() +
" velocity tools [" + velocityToolsMap.values() +
"] from sources[" + Arrays.asList(sources) + "]");
}
return unmodifiableMap(velocityToolsMap);
}
protected abstract String[] resolveSources();
/**
* Resolves relaxed property as string array from {@link Environment}
*
* @param propertyName the name of property
* @return non-null property values, if it's absent , return empty string array.
*/
protected String[] resolveRelaxedPropertyAsStringArray(String propertyName) {
return propertyResolver.getProperty(propertyName, String[].class, new String[0]);
}
/**
* Scan
*
* @param sources sources
* @param velocityToolsMap The map of Velocity tools required to added if possible.
*/
protected abstract void scan(String[] sources, Map<String, VelocityTool> velocityToolsMap);
public void setEnvironment(Environment environment) {
this.propertyResolver = new CompatibleRelaxedPropertyResolver(environment);
}
}
| 788 |
988 | <filename>platform/keyring.impl/src/org/netbeans/modules/keyring/gnome/libsecret/GnomeLibSecretProvider.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.modules.keyring.gnome.libsecret;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.ptr.PointerByReference;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.netbeans.modules.keyring.gnome.libsecret.Glib.GError;
import org.netbeans.modules.keyring.gnome.libsecret.LibSecret.SecretSchema;
import org.netbeans.modules.keyring.gnome.libsecret.LibSecret.SecretSchemaAttribute;
import org.netbeans.modules.keyring.impl.KeyringSupport;
import org.netbeans.spi.keyring.KeyringProvider;
import org.openide.util.lookup.ServiceProvider;
@ServiceProvider(service = KeyringProvider.class, position = 90)
public class GnomeLibSecretProvider implements KeyringProvider {
private static final Logger LOG = Logger.getLogger(GnomeLibSecretProvider.class.getName());
private static final String KEY = "key"; // NOI18N
private static final Charset CHARSET = Charset.forName(Native.getDefaultStringEncoding());
private final String appName;
private SecretSchema secretSchema = null;
public GnomeLibSecretProvider() {
appName = KeyringSupport.getAppName();
}
private SecretSchema getSchema() {
if (secretSchema != null) {
return secretSchema;
}
secretSchema = new SecretSchema();
secretSchema.name = appName;
secretSchema.flags = LibSecret.SECRET_SCHEMA_NONE;
secretSchema.attributes[0] = new SecretSchemaAttribute();
secretSchema.attributes[0].name = KEY;
secretSchema.attributes[0].type = LibSecret.SECRET_SCHEMA_ATTRIBUTE_STRING;
return secretSchema;
}
@Override
public boolean enabled() {
if (Boolean.getBoolean("netbeans.keyring.no.native")) {
LOG.fine("native keyring integration disabled");
return false;
}
try {
read("NoNeXiStEnT"); // NOI18N
return true;
} catch (RuntimeException e) {
LOG.log(Level.WARNING, null, e);
return false;
} catch (UnsatisfiedLinkError e) {
LOG.log(Level.FINE, null, e);
return false;
}
}
@Override
public char[] read(String key) {
PointerByReference gerrorBuffer = new PointerByReference();
SecretSchema schema = getSchema();
Pointer pointer = LibSecret.INSTANCE.secret_password_lookup_sync(schema, null, gerrorBuffer, KEY, key);
if (gerrorBuffer.getValue() != null) {
processError(gerrorBuffer);
return null;
}
if (pointer == null) {
return null;
}
return decode(readZeroTerminatedBytes(pointer));
}
@Override
public void save(String key, char[] password, String description) {
PointerByReference gerrorBuffer = new PointerByReference();
SecretSchema schema = getSchema();
String label = appName + " - " + (description != null ? description : key);
LibSecret.INSTANCE.secret_password_store_sync(schema, LibSecret.SECRET_COLLECTION_DEFAULT, label, encode(password), null, gerrorBuffer, KEY, key);
if (gerrorBuffer.getValue() != null) {
processError(gerrorBuffer);
}
}
@Override
public void delete(String key) {
PointerByReference gerrorBuffer = new PointerByReference();
SecretSchema schema = getSchema();
LibSecret.INSTANCE.secret_password_clear_sync(schema, null, gerrorBuffer, KEY, key);
if (gerrorBuffer.getValue() != null) {
processError(gerrorBuffer);
}
}
private void processError(PointerByReference gerrorBuffer) throws IllegalArgumentException {
try {
GError gerror = Structure.newInstance(GError.class, gerrorBuffer.getValue());
gerror.read();
throw new RuntimeException(String.format("%d/%d: %s", gerror.domain, gerror.code, gerror.message));
} finally {
Glib.INSTANCE.g_error_free(gerrorBuffer.getValue());
}
}
private byte[] encode(char[] password) {
ByteBuffer encodedPasswordBuffer = CHARSET.encode(CharBuffer.wrap(password));
byte[] encodedPassword = new byte[encodedPasswordBuffer.limit() + 1]; // zero terminated
encodedPasswordBuffer.get(encodedPassword, 0, encodedPasswordBuffer.limit());
return encodedPassword;
}
private char[] decode(byte[] bytes) {
CharBuffer decodedPasswordBuffer = CHARSET.decode(ByteBuffer.wrap(bytes));
char[] decodedPassword = new char[decodedPasswordBuffer.limit()];
decodedPasswordBuffer.get(decodedPassword, 0, decodedPasswordBuffer.limit());
return decodedPassword;
}
private byte[] readZeroTerminatedBytes(Pointer pointer) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
for (int i = 0; i < 100000; i++) {
byte curVal = pointer.getByte(i);
if (curVal == 0) {
break;
}
baos.write(curVal);
}
return baos.toByteArray();
}
}
| 2,324 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
package com.sun.star.wizards.web;
import javax.swing.ListModel;
import com.sun.star.awt.KeyEvent;
import com.sun.star.awt.XControl;
import com.sun.star.awt.XKeyListener;
import com.sun.star.awt.XWindow;
import com.sun.star.container.NoSuchElementException;
import com.sun.star.lang.EventObject;
import com.sun.star.lang.XMultiServiceFactory;
import com.sun.star.uno.UnoRuntime;
import com.sun.star.util.XCloseable;
import com.sun.star.wizards.common.Configuration;
import com.sun.star.wizards.common.FileAccess;
import com.sun.star.wizards.common.Helper;
import com.sun.star.wizards.common.JavaTools;
import com.sun.star.wizards.common.SystemDialog;
import com.sun.star.wizards.common.PropertyNames;
import com.sun.star.wizards.ui.UnoDialog;
import com.sun.star.wizards.ui.event.DataAware;
import com.sun.star.wizards.ui.event.ListModelBinder;
import com.sun.star.wizards.ui.event.Task;
import com.sun.star.wizards.web.data.CGDocument;
import com.sun.star.wizards.web.data.CGPublish;
import com.sun.star.wizards.web.data.CGSession;
import com.sun.star.wizards.web.data.CGSessionName;
/**
* This class implements the ui-events of the
* web wizard.
* it is therfore sorted to steps.
* not much application-logic here - just plain
* methods which react to events.
* The only exception are the finish methods with the save
* session methods.
*/
public abstract class WWD_Events extends WWD_Startup
{
private static final short[] EMPTY_SHORT_ARRAY = new short[0];
/**
* Tracks the current loaded session.
* If PropertyNames.EMPTY_STRING - it means the current session is the default one (empty)
* If a session is loaded, this will be the name of the loaded session.
*/
protected String currentSession = PropertyNames.EMPTY_STRING;
/**
* He - my constructor !
* I add a window listener, which, when
* the window closes, deltes the temp directory.
*/
public WWD_Events(XMultiServiceFactory xmsf) throws Exception
{
super(xmsf);
Create c = new Create();
XWindow xWindow = UnoRuntime.queryInterface(XWindow.class, chkFTP);
xWindow.addKeyListener(c);
xWindow = UnoRuntime.queryInterface(XWindow.class, chkLocalDir);
xWindow.addKeyListener(c);
xWindow = UnoRuntime.queryInterface(XWindow.class, chkZip);
xWindow.addKeyListener(c);
}
/* *********************************************************
* *******************************************************
* EVENT and UI METHODS
* *******************************************************
* *********************************************************/
protected void leaveStep(int nOldStep, int nNewStep)
{
if (nOldStep == 1 && nNewStep == 2)
{
// 1. check if the selected session is the same as the current one.
}
}
protected void enterStep(int old, int newStep)
{
if ((old == 1))
{
String sessionToLoad = PropertyNames.EMPTY_STRING;
short[] s = (short[]) Helper.getUnoPropertyValue(getModel(lstLoadSettings), PropertyNames.SELECTED_ITEMS);
if (s.length == 0 || s[0] == 0)
{
sessionToLoad = PropertyNames.EMPTY_STRING;
}
else
{
sessionToLoad = ((CGSessionName) settings.cp_SavedSessions.getElementAt(s[0])).cp_Name;
}
if (!sessionToLoad.equals(currentSession))
{
loadSession(sessionToLoad);
}
}
if (newStep == 5)
{
}
}
/* *********************************
* STEP 1
*/
/**
* Called from the Uno event dispatcher when the
* user selects a saved session.
*/
public void sessionSelected()
{
short[] s = (short[]) Helper.getUnoPropertyValue(getModel(lstLoadSettings), PropertyNames.SELECTED_ITEMS);
setEnabled(btnDelSession, s.length > 0 && s[0] > 0);
}
/**
* Ha ! the session should be loaded :-)
*/
public void loadSession(final String sessionToLoad)
{
try
{
final StatusDialog sd = getStatusDialog();
final Task task = new Task("LoadDocs", PropertyNames.EMPTY_STRING, 10);
sd.execute(this, task, resources.resLoadingSession);
task.start();
setSelectedDoc(EMPTY_SHORT_ARRAY);
Helper.setUnoPropertyValue(getModel(lstDocuments), PropertyNames.SELECTED_ITEMS, EMPTY_SHORT_ARRAY);
Helper.setUnoPropertyValue(getModel(lstDocuments), PropertyNames.STRING_ITEM_LIST, EMPTY_STRING_ARRAY);
Object view = null;
if (sessionToLoad.equals(PropertyNames.EMPTY_STRING))
{
view = Configuration.getConfigurationRoot(xMSF, CONFIG_PATH + "/DefaultSession", false);
}
else
{
view = Configuration.getConfigurationRoot(xMSF, CONFIG_PATH + "/SavedSessions", false);
view = Configuration.getNode(sessionToLoad, view);
}
CGSession session = new CGSession();
session.setRoot(settings);
session.readConfiguration(view, CONFIG_READ_PARAM);
task.setMax(session.cp_Content.cp_Documents.getSize() * 5 + 7);
task.advance(true);
if (sessionToLoad.equals(PropertyNames.EMPTY_STRING))
{
setSaveSessionName(session);
}
mount(session, task, false, sd.xControl);
checkSteps();
currentSession = sessionToLoad;
while (task.getStatus() <= task.getMax())
{
task.advance(false);
}
task.removeTaskListener(sd);
}
catch (Exception ex)
{
unexpectedError(ex);
}
try
{
refreshStylePreview();
updateIconsetText();
}
catch (Exception e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* hmm. the user clicked the delete button.
*/
public void delSession()
{
short[] selected = (short[]) Helper.getUnoPropertyValue(getModel(lstLoadSettings), PropertyNames.SELECTED_ITEMS);
if (selected.length == 0)
{
return;
}
if (selected[0] == 0)
{
return;
}
boolean confirm = AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), resources.resDelSessionConfirm, ErrorHandler.ERROR_QUESTION_NO);
if (confirm)
{
try
{
String name = (String) settings.cp_SavedSessions.getKey(selected[0]);
// first delete the session from the registry/configuration.
Configuration.removeNode(xMSF, CONFIG_PATH + "/SavedSessions", name);
// then delete the session from the java-set (settings.cp_SavedSessions)
settings.cp_SavedSessions.remove(selected[0]);
settings.savedSessions.remove(selected[0] - 1);
short[] nextSelected = new short[]
{
(short) 0
};
// We try to select the same item index again, if possible
if (settings.cp_SavedSessions.getSize() > selected[0])
{
nextSelected[0] = selected[0];
}
else
// this will always be available because
// the user can not remove item 0.
{
nextSelected[0] = (short) (selected[0] - 1); // if the <none> session will be selected, disable the remove button...
}
if (nextSelected[0] == 0)
{
Helper.setUnoPropertyValue(getModel(btnDelSession), PropertyNames.PROPERTY_ENABLED, Boolean.FALSE); // select...
}
Helper.setUnoPropertyValue(getModel(lstLoadSettings), PropertyNames.SELECTED_ITEMS, nextSelected);
//ListModelBinder.fillComboBox(cbSaveSettings, settings.savedSessions.items(), null);
}
catch (Exception ex)
{
ex.printStackTrace();
unexpectedError(ex);
}
}
}
/* ********************************
* STEP 2
*/
/**
* A method used by the UnoDataAware attached
* to the Documents listbox.
* See the concept of the DataAware objects to undestand
* why it is there...
*/
public short[] getSelectedDoc()
{
return selectedDoc;
}
private static String[] EMPTY_STRING_ARRAY = new String[0];
/* public void loadSessionSelected() {
UIHelper.setEnabled(btnLoadSession,true);
UIHelper.setEnabled(btnDelSession,true);
}
*/
/**
* when the user clicks another document
* in the listbox, this method is called,
* and couses the display in
* the textboxes title,description, author and export format
* to change
*/
public void setSelectedDoc(short[] s)
{
CGDocument oldDoc = getDoc(selectedDoc);
CGDocument doc = getDoc(s);
if (doc == null)
{
fillExportList(EMPTY_STRING_ARRAY);
//I try to avoid refreshing the export list if
//the same type of document is chosen.
}
else if (oldDoc == null || (!oldDoc.appType.equals(doc.appType)))
{
fillExportList(settings.getExporters(doc.appType));
}
selectedDoc = s;
mount(doc, docAware);
disableDocUpDown();
}
/**
* The user clicks the "Add" button.
* This will open a "FileOpen" dialog,
* and, if the user chooses more than one file,
* will open a status dialog, when validating each document.
*/
public void addDocument()
{
final String[] files = getDocAddDialog().callOpenDialog(true, settings.cp_DefaultSession.cp_InDirectory);
if (files == null)
{
return;
}
final Task task = new Task(PropertyNames.EMPTY_STRING, PropertyNames.EMPTY_STRING, files.length * 5);
/*
* If more than a certain number
* of documents have been added,
* open the status dialog.
*/
if (files.length > MIN_ADD_FILES_FOR_DIALOG)
{
StatusDialog sd = getStatusDialog();
sd.setLabel(resources.resValidatingDocuments);
sd.execute(this, task, resources.prodName); // new LoadDocs( sd.xControl, files, task )
LoadDocs oLoadDocs = new LoadDocs(this.xControl, files, task);
oLoadDocs.loadDocuments();
task.removeTaskListener(sd);
}
/*
* When adding a single document, do not use a
* status dialog...
*/
else
{
LoadDocs oLoadDocs = new LoadDocs(this.xControl, files, task);
oLoadDocs.loadDocuments();
}
}
/**
* The user clicked delete.
*/
public void removeDocument()
{
if (selectedDoc.length == 0)
{
return;
}
settings.cp_DefaultSession.cp_Content.cp_Documents.remove(selectedDoc[0]);
// update the selected document
while (selectedDoc[0] >= getDocsCount())
{
selectedDoc[0]--; // if there are no documents...
}
if (selectedDoc[0] == -1)
{
selectedDoc = EMPTY_SHORT_ARRAY; // update the list to show the right selection.
}
docListDA.updateUI();
// disables all the next steps, if the list of docuemnts
// is empty.
checkSteps();
}
/**
* doc up.
*/
public void docUp()
{
Object doc = settings.cp_DefaultSession.cp_Content.cp_Documents.getElementAt(selectedDoc[0]);
settings.cp_DefaultSession.cp_Content.cp_Documents.remove(selectedDoc[0]);
settings.cp_DefaultSession.cp_Content.cp_Documents.add(--selectedDoc[0], doc);
docListDA.updateUI();
disableDocUpDown();
}
/**
* doc down
*/
public void docDown()
{
Object doc = settings.cp_DefaultSession.cp_Content.cp_Documents.getElementAt(selectedDoc[0]);
settings.cp_DefaultSession.cp_Content.cp_Documents.remove(selectedDoc[0]);
settings.cp_DefaultSession.cp_Content.cp_Documents.add(++selectedDoc[0], doc);
docListDA.updateUI();
disableDocUpDown();
}
/* ******************************
* STEP 5
*/
/**
* invoked when the user clicks "Choose backgrounds" button.
*/
private ImageListDialog bgDialog;
/**
* the user clicked the "backgrounds" button
*/
public void chooseBackground()
{
try
{
setEnabled(btnBackgrounds, false);
if (bgDialog == null)
{
bgDialog = new BackgroundsDialog(xMSF, settings.cp_BackgroundImages, resources);
bgDialog.createWindowPeer(xControl.getPeer());
}
bgDialog.setSelected(settings.cp_DefaultSession.cp_Design.cp_BackgroundImage);
short i = bgDialog.executeDialog((UnoDialog) WWD_Events.this);
if (i == 1) //ok
{
setBackground(bgDialog.getSelected());
}
}
catch (Exception ex)
{
ex.printStackTrace();
}
finally
{
setEnabled(btnBackgrounds, true);
}
}
/**
* invoked when the BackgorundsDialog is "OKed".
*/
public void setBackground(Object background)
{
if (background == null)
{
background = PropertyNames.EMPTY_STRING;
}
settings.cp_DefaultSession.cp_Design.cp_BackgroundImage = (String) background;
refreshStylePreview();
}
private IconsDialog iconsDialog;
/**
* is called when the user clicks "Icon sets" button.
*
*/
public void chooseIconset()
{
try
{
setEnabled(btnIconSets, false);
if (iconsDialog == null)
{
iconsDialog = new IconsDialog(xMSF, settings.cp_IconSets, resources);
iconsDialog.createWindowPeer(xControl.getPeer());
}
iconsDialog.setIconset(settings.cp_DefaultSession.cp_Design.cp_IconSet);
short i = iconsDialog.executeDialog((UnoDialog) WWD_Events.this);
if (i == 1) //ok
{
setIconset(iconsDialog.getIconset());
}
}
catch (Exception ex)
{
ex.printStackTrace();
}
finally
{
setEnabled(btnIconSets, true);
}
}
/**
* invoked when the Iconsets Dialog is OKed.
*/
public void setIconset(String icon)
{
settings.cp_DefaultSession.cp_Design.cp_IconSet = icon;
updateIconsetText();
}
/* ******************************
* STEP 7
*/
/**
* sets the publishing url of either a local/zip or ftp publisher.
* updates the ui....
*/
private CGPublish setPublishUrl(String publisher, String url, int number)
{
if (url == null)
{
return null;
}
CGPublish p = getPublisher(publisher);
p.cp_URL = url;
p.cp_Publish = true;
updatePublishUI(number);
p.overwriteApproved = true;
return p;
}
/**
* updates the ui of a certain publisher
* (the text box url)
* @param number
*/
private void updatePublishUI(int number)
{
((DataAware) pubAware.get(number)).updateUI();
((DataAware) pubAware.get(number + 1)).updateUI();
checkPublish();
}
/**
* The user clicks the local "..." button.
*
*/
public void setPublishLocalDir()
{
String dir = showFolderDialog("Local destination directory", PropertyNames.EMPTY_STRING, settings.cp_DefaultSession.cp_OutDirectory);
//if ok was pressed...
setPublishUrl(LOCAL_PUBLISHER, dir, 0);
}
/**
* The user clicks the "Configure" FTP button.
*
*/
public void setFTPPublish()
{
if (showFTPDialog(getPublisher(FTP_PUBLISHER)))
{
getPublisher(FTP_PUBLISHER).cp_Publish = true;
updatePublishUI(2);
}
}
/**
* show the ftp dialog
* @param pub
* @return true if OK was pressed, otherwise false.
*/
private boolean showFTPDialog(CGPublish pub)
{
try
{
return getFTPDialog(pub).execute(this) == 1;
}
catch (Exception ex)
{
ex.printStackTrace();
return false;
}
}
/**
* the user clicks the zip "..." button.
* Choose a zip file...
*/
public void setZipFilename()
{
SystemDialog sd = getZipDialog();
String zipFile = sd.callStoreDialog(settings.cp_DefaultSession.cp_OutDirectory, resources.resDefaultArchiveFilename);
setPublishUrl(ZIP_PUBLISHER, zipFile, 4);
getPublisher(ZIP_PUBLISHER).overwriteApproved = true;
}
private TOCPreview docPreview;
/**
* the user clicks the "Preview" button.
*/
public void documentPreview()
{
try
{
if (docPreview == null)
{
docPreview = new TOCPreview(xMSF, settings, resources, stylePreview.tempDir, myFrame);
}
docPreview.refresh(settings);
}
catch (Exception ex)
{
unexpectedError(ex);
}
}
/* **********************
* FINISH
*/
/**
* This method checks if the given target's path, added the pathExtension argument,
* exists, and asks the user what to do about it.
* If the user says its all fine, then the target will
* be replaced.
* @return true if "create" should continue. false if "create" should abort.
*/
private boolean publishTargetApproved()
{
boolean result = true;
// 1. check local publish target
CGPublish p = getPublisher(LOCAL_PUBLISHER);
// should publish ?
if (p.cp_Publish)
{
String path = getFileAccess().getPath(p.url, null);
// target exists?
if (getFileAccess().exists(p.url, false))
{
//if its a directory
if (getFileAccess().isDirectory(p.url))
{
//check if its empty
String[] files = getFileAccess().listFiles(p.url, true);
if (files.length > 0)
{
/* it is not empty :-(
* it either a local publisher or an ftp (zip uses no directories
* as target...)
*/
String message = JavaTools.replaceSubString(resources.resLocalTragetNotEmpty,
path, "%FILENAME");
result = AbstractErrorHandler.showMessage(
xMSF, xControl.getPeer(), message,
ErrorHandler.MESSAGE_WARNING, ErrorHandler.BUTTONS_YES_NO,
ErrorHandler.DEF_NO, ErrorHandler.RESULT_YES);
if (!result)
{
return result;
}
}
}
else
{//not a directory, but still exists
String message = JavaTools.replaceSubString(resources.resLocalTargetExistsAsfile,
path, "%FILENAME");
AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message, ErrorHandler.ERROR_PROCESS_FATAL);
return false;
}
// try to write to the path...
}
else
{
// the local target directory does not exist.
String message = JavaTools.replaceSubString(resources.resLocalTargetCreate,
path, "%FILENAME");
try
{
result = AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_QUESTION_YES);
}
catch (Exception ex)
{
ex.printStackTrace();
}
if (!result)
{
return result;
// try to create the directory...
}
try
{
getFileAccess().fileAccess.createFolder(p.cp_URL);
}
catch (Exception ex)
{
message = JavaTools.replaceSubString(resources.resLocalTargetCouldNotCreate,
path, "%FILENAME");
AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_PROCESS_FATAL);
return false;
}
}
}
// 2. Check ZIP
// should publish ?
p = getPublisher(ZIP_PUBLISHER);
if (p.cp_Publish)
{
String path = getFileAccess().getPath(p.cp_URL, null);
// target exists?
if (getFileAccess().exists(p.cp_URL, false))
{
//if its a directory
if (getFileAccess().isDirectory(p.cp_URL))
{
String message = JavaTools.replaceSubString(resources.resZipTargetIsDir,
path, "%FILENAME");
AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_PROCESS_FATAL);
return false;
}
else
{//not a directory, but still exists ( a file...)
if (!p.overwriteApproved)
{
String message = JavaTools.replaceSubString(resources.resZipTargetExists,
path, "%FILENAME");
result = AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_QUESTION_YES);
if (!result)
{
return false;
}
}
}
}
}
// 3. check FTP
p = getPublisher(FTP_PUBLISHER);
// should publish ?
if (p.cp_Publish)
{
String path = getFileAccess().getPath(p.cp_URL, null);
// target exists?
if (getFileAccess().exists(p.url, false))
{
//if its a directory
if (getFileAccess().isDirectory(p.url))
{
//check if its empty
String[] files = getFileAccess().listFiles(p.url, true);
if (files.length > 0)
{
/* it is not empty :-(
* it either a local publisher or an ftp (zip uses no directories
* as target...)
*/
String message = JavaTools.replaceSubString(resources.resFTPTargetNotEmpty,
path, "%FILENAME");
result = AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_QUESTION_CANCEL);
if (!result)
{
return result;
}
}
}
else
{//not a directory, but still exists (as a file)
String message = JavaTools.replaceSubString(resources.resFTPTargetExistsAsfile,
path, "%FILENAME");
AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_PROCESS_FATAL);
return false;
}
// try to write to the path...
}
else
{
// the ftp target directory does not exist.
String message = JavaTools.replaceSubString(resources.resFTPTargetCreate,
path, "%FILENAME");
result = AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_QUESTION_YES);
if (!result)
{
return result;
// try to create the directory...
}
try
{
getFileAccess().fileAccess.createFolder(p.url);
}
catch (Exception ex)
{
message = JavaTools.replaceSubString(resources.resFTPTargetCouldNotCreate,
path, "%FILENAME");
AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message,
ErrorHandler.ERROR_PROCESS_FATAL);
return false;
}
}
}
return true;
}
/*
* return false if "create" should be aborted. true if everything is fine.
*/
private boolean saveSession()
{
try
{
Object node = null;
String name = getSessionSaveName();
//set documents index field.
ListModel docs = settings.cp_DefaultSession.cp_Content.cp_Documents;
for (int i = 0; i < docs.getSize(); i++)
{
((CGDocument) docs.getElementAt(i)).cp_Index = i;
}
Object conf = Configuration.getConfigurationRoot(xMSF, CONFIG_PATH + "/SavedSessions", true);
// first I check if a session with the given name exists
try
{
node = Configuration.getNode(name, conf);
if (node != null)
{
if (!AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(),
JavaTools.replaceSubString(resources.resSessionExists, name, "${NAME}"),
ErrorHandler.ERROR_NORMAL_IGNORE))
{
return false; //remove the old session
}
}
Configuration.removeNode(conf, name);
}
catch (NoSuchElementException nsex)
{
}
settings.cp_DefaultSession.cp_Index = 0;
node = Configuration.addConfigNode(conf, name);
settings.cp_DefaultSession.cp_Name = name;
settings.cp_DefaultSession.writeConfiguration(node, CONFIG_READ_PARAM);
settings.cp_SavedSessions.reindexSet(conf, name, "Index");
Configuration.commit(conf);
// now I reload the sessions to actualize the list/combo boxes load/save sessions.
settings.cp_SavedSessions.clear();
Object confView = Configuration.getConfigurationRoot(xMSF, CONFIG_PATH + "/SavedSessions", false);
settings.cp_SavedSessions.readConfiguration(confView, CONFIG_READ_PARAM);
settings.cp_LastSavedSession = name;
currentSession = name;
// now save the name of the last saved session...
settings.cp_LastSavedSession = name;
// TODO add the <none> session...
prepareSessionLists();
ListModelBinder.fillList(lstLoadSettings, settings.cp_SavedSessions.items(), null);
ListModelBinder.fillComboBox(cbSaveSettings, settings.savedSessions.items(), null);
selectSession();
currentSession = settings.cp_LastSavedSession;
return true;
}
catch (Exception ex)
{
ex.printStackTrace();
return false;
}
}
private String targetStringFor(String publisher)
{
CGPublish p = getPublisher(publisher);
if (p.cp_Publish)
{
return "\n" + getFileAccess().getPath(p.cp_URL, null);
}
else
{
return PropertyNames.EMPTY_STRING;
}
}
/**
* this method will be called when the Status Dialog
* is hidden.
* It checks if the "Process" was successfull, and if so,
* it closes the wizard dialog.
*/
public void finishWizardFinished()
{
if (process.getResult())
{
String targets =
targetStringFor(LOCAL_PUBLISHER) +
targetStringFor(ZIP_PUBLISHER) +
targetStringFor(FTP_PUBLISHER);
String message = JavaTools.replaceSubString(resources.resFinishedSuccess, targets, "%FILENAME");
AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), message, ErrorHandler.ERROR_MESSAGE);
if (exitOnCreate)
{
this.xDialog.endExecute();
}
}
else
{
AbstractErrorHandler.showMessage(xMSF, xControl.getPeer(), resources.resFinishedNoSuccess, ErrorHandler.ERROR_WARNING);
}
}
public void cancel()
{
xDialog.endExecute();
}
private Process process;
private boolean exitOnCreate = true;
/**
* the user clicks the finish/create button.
*/
public boolean finishWizard()
{
finishWizard(true);
return true;
}
/**
* finish the wizard
* @param exitOnCreate_ should the wizard close after
* a successfull create.
* Default is true,
* I have a hidden feature which enables false here
*/
public void finishWizard(boolean exitOnCreate_)
{
exitOnCreate = exitOnCreate_;
/**
* First I check if ftp password was set, if not - the ftp dialog pops up...
* This may happen when a session is loaded, since the
* session saves the ftp url and username, but not the password.
*/
final CGPublish p = getPublisher(FTP_PUBLISHER);
// if ftp is checked, and no proxies are set, and password is empty...
if (p.cp_Publish && (!proxies) && (p.password == null || p.password.equals(PropertyNames.EMPTY_STRING)))
{
if (showFTPDialog(p))
{
updatePublishUI(2);
//now continue...
finishWizard2();
}
}
else
{
finishWizard2();
}
}
/**
* this method is only called
* if ftp-password was eather set, or
* the user entered one in the FTP Dialog which
* popped up when clicking "Create".
*
*/
private void finishWizard2()
{
CGPublish p = getPublisher(LOCAL_PUBLISHER);
p.url = p.cp_URL;
/*
* zip publisher is using another url form...
*/
p = getPublisher(ZIP_PUBLISHER);
//replace the '%' with '%25'
String url1 = JavaTools.replaceSubString(p.cp_URL, "%25", "%");
//replace all '/' with '%2F'
url1 = JavaTools.replaceSubString(url1, "%2F", "/");
p.url = "vnd.sun.star.zip://" + url1 + "/";
/*
* and now ftp...
*/
p = getPublisher(FTP_PUBLISHER);
p.url = FTPDialog.getFullURL(p);
/* first we check the publishing targets. If they exist we warn and ask
* what to do. a False here means the user said "cancel" (or rather: clicked...)
*/
if (!publishTargetApproved())
{
return;
/*
* In order to save the session correctly,
* I return the value of the ftp publisher cp_Publish
* property to its original value...
*/
}
p.cp_Publish = __ftp;
//if the "save settings" checkbox is on...
if (isSaveSession())
{
// if canceled by user
if (!saveSession())
{
return;
}
}
else
{
settings.cp_LastSavedSession = PropertyNames.EMPTY_STRING;
}
try
{
Object conf = Configuration.getConfigurationRoot(xMSF, CONFIG_PATH, true);
Configuration.set(
settings.cp_LastSavedSession,
"LastSavedSession", conf);
Configuration.commit(conf);
}
catch (Exception ex)
{
ex.printStackTrace();
}
/*
* again, if proxies are on, I disable ftp before the creation process
* starts.
*/
if (proxies)
{
p.cp_Publish = false;
/*
* There is currently a bug, which crashes office when
* writing folders to an existing zip file, after deleting
* its content, so I "manually" delete it here...
*/
}
p = getPublisher(ZIP_PUBLISHER);
if (getFileAccess().exists(p.cp_URL, false))
{
getFileAccess().delete(p.cp_URL);
}
try
{
ErrorHandler eh = new ProcessErrorHandler(xMSF, xControl.getPeer(), resources);
process = new Process(settings, xMSF, eh);
StatusDialog pd = getStatusDialog();
pd.setRenderer(new ProcessStatusRenderer(resources));
pd.execute(this, process.myTask, resources.prodName); //process,
process.runProcess();
finishWizardFinished();
process.myTask.removeTaskListener(pd);
}
catch (Exception ex)
{
ex.printStackTrace();
}
}
/**
* implements a hidden feature for "finishing" without
* closing the wizard.
* press "&%" quite fast when the focus is on one
* of the last steps' checkboxes.
* @author rp143992
*/
private class Create implements XKeyListener
{
long time = 0;
int count = 0;
/* (non-Javadoc)
* @see com.sun.star.awt.XKeyListener#keyPressed(com.sun.star.awt.KeyEvent)
*/
public void keyPressed(KeyEvent ke)
{
if (ke.KeyChar == '&')
{
time = System.currentTimeMillis();
}
else if (ke.KeyChar == '%' && ((System.currentTimeMillis() - time) < 300))
{
Boolean b = (Boolean) getControlProperty("btnWizardFinish", PropertyNames.PROPERTY_ENABLED);
if (b.booleanValue())
{
finishWizard(false);
}
}
}
public void keyReleased(KeyEvent arg0)
{
}
public void disposing(EventObject arg0)
{
}
}
/**
* is called on the WindowHidden event,
* deletes the temporary directory.
*/
public void cleanup()
{
try
{
dpStylePreview.dispose();
}
catch (Exception ex)
{
ex.printStackTrace();
}
stylePreview.cleanup();
try
{
if (bgDialog != null)
{
bgDialog.xComponent.dispose();
}
}
catch (Exception ex)
{
ex.printStackTrace();
}
try
{
if (iconsDialog != null)
{
iconsDialog.xComponent.dispose();
}
}
catch (Exception ex)
{
ex.printStackTrace();
}
try
{
if (ftpDialog != null)
{
ftpDialog.xComponent.dispose();
}
}
catch (Exception ex)
{
ex.printStackTrace();
}
try
{
xComponent.dispose();
}
catch (Exception ex)
{
ex.printStackTrace();
}
try
{
//XCloseable xCloseable = (XCloseable) UnoRuntime.queryInterface(XCloseable.class, myDocument);
//if (xCloseable != null)
// xCloseable.close(false);
XCloseable xCloseable = UnoRuntime.queryInterface(XCloseable.class, myFrame);
if (xCloseable != null)
{
xCloseable.close(false);
}
}
catch (Exception ex)
{
ex.printStackTrace();
}
}
public class LoadDocs
{
private XControl xC;
String[] files;
Task task;
public LoadDocs(XControl xC_, String[] files_, Task task_)
{
xC = xC_;
files = files_;
task = task_;
}
public void loadDocuments()
{
//LogTaskListener lts = new LogTaskListener();
//task.addTaskListener(lts);
// task.start();
// where the documents are added to in the list (offset)
int offset = (getSelectedDoc().length > 0 ? selectedDoc[0] + 1 : getDocsCount());
/* if the user chose one file, the list starts at 0,
* if he chose more than one, the first entry is a directory name,
* all the others are filenames.
*/
int start = (files.length > 1 ? 1 : 0);
/*
* Number of documents failed to validate.
*/
int failed = 0;
// store the directory
settings.cp_DefaultSession.cp_InDirectory = start == 1 ? files[0] : FileAccess.getParentDir(files[0]);
/*
* Here i go through each file, and validate it.
* If its ok, I add it to the ListModel/ConfigSet
*/
for (int i = start; i < files.length; i++)
{
CGDocument doc = new CGDocument();
doc.setRoot(settings);
doc.cp_URL = (start == 0) ? files[i] : FileAccess.connectURLs(files[0], files[i]);
/* so - i check each document and if it is ok I add it.
* The failed variable is used only to calculate the place to add -
* Error reporting to the user is (or should (-: )done in the checkDocument(...) method
*/
if (checkDocument(doc, task, xC))
{
settings.cp_DefaultSession.cp_Content.cp_Documents.add(offset + i - failed - start, doc);
}
else
{
failed++;
}
}
// if any documents where added,
// set the first one to be the current-selected document.
if (files.length > start + failed)
{
setSelectedDoc(new short[]
{
(short) offset
});
}
// update the ui...
docListDA.updateUI();
// this enables/disables the next steps.
// when no documents in the list, all next steps are disabled
checkSteps();
/* a small insurance that the status dialog will
* really close...
*/
while (task.getStatus() < task.getMax())
{
task.advance(false);
}
}
}
}
| 20,101 |
1,761 | package com.lody.legend.dalvik;
import static com.lody.legend.dalvik.DalvikConstants.*;
/**
* @author Lody
* @version 1.0
*/
public class DalvikHelper {
public static int dvmComputeJniArgInfo(String shorty) {
int returnType, jniArgInfo;
int hints;
/* The first shorty character is the return type. */
switch (shorty.charAt(0)) {
case 'V':
returnType = DALVIK_JNI_RETURN_VOID;
break;
case 'F':
returnType = DALVIK_JNI_RETURN_FLOAT;
break;
case 'D':
returnType = DALVIK_JNI_RETURN_DOUBLE;
break;
case 'J':
returnType = DALVIK_JNI_RETURN_S8;
break;
case 'Z':
case 'B':
returnType = DALVIK_JNI_RETURN_S1;
break;
case 'C':
returnType = DALVIK_JNI_RETURN_U2;
break;
case 'S':
returnType = DALVIK_JNI_RETURN_S2;
break;
default:
returnType = DALVIK_JNI_RETURN_S4;
break;
}//end switch
jniArgInfo = returnType << DALVIK_JNI_RETURN_SHIFT;
hints = dvmPlatformInvokeHints(shorty);
if ((hints & DALVIK_JNI_NO_ARG_INFO) != 0) {
jniArgInfo |= DALVIK_JNI_NO_ARG_INFO;
} else {
jniArgInfo |= hints;
}
return jniArgInfo;
}
public static int dvmPlatformInvokeHints(String shorty) {
int padFlags, jniHints;
char sigByte;
int stackOffset, padMask;
stackOffset = padFlags = 0;
padMask = 0x00000001;
/* Skip past the return type */
int index = 1;
while (index < shorty.length()) {
sigByte = shorty.charAt(index);
index++;
if (sigByte == 'D' || sigByte == 'J') {
if ((stackOffset & 1) != 0) {
padFlags |= padMask;
stackOffset++;
padMask <<= 1;
}
stackOffset += 2;
padMask <<= 2;
} else {
stackOffset++;
padMask <<= 1;
}
}
jniHints = 0;
if (stackOffset > DALVIK_JNI_COUNT_SHIFT) {
/* too big for "fast" version */
jniHints = DALVIK_JNI_NO_ARG_INFO;
} else {
stackOffset -= 2; // r2/r3 holds first two items
if (stackOffset < 0)
stackOffset = 0;
jniHints |= ((stackOffset + 1) / 2) << DALVIK_JNI_COUNT_SHIFT;
jniHints |= padFlags;
}
return jniHints;
}
public static int dvmCalcMethodArgsSize(String shorty) {
int count = 0;
/* Skip the return type. */
int index = 1;
char currentChar;
while (index < shorty.length()) {
currentChar = shorty.charAt(index);
index++;
switch (currentChar) {
case 'D':
case 'J':{
count += 2;
break;
}
default:
count++;
break;
}//end switch
}//end while
return count;
}
}
| 1,961 |
997 | #ifndef ntt_H
#define ntt_H
#include <stdint.h>
extern void PQCLEAN_NTRULPR857_AVX2_ntt512_7681(int16_t *f, int reps);
extern void PQCLEAN_NTRULPR857_AVX2_ntt512_10753(int16_t *f, int reps);
extern void PQCLEAN_NTRULPR857_AVX2_invntt512_7681(int16_t *f, int reps);
extern void PQCLEAN_NTRULPR857_AVX2_invntt512_10753(int16_t *f, int reps);
#endif
| 182 |
3,100 | <reponame>andruuuuush/xenia
package jp.xenia.emulator;
public class WindowDemoActivity extends WindowedAppActivity {
@Override
protected String getWindowedAppIdentifier() {
return "xenia_ui_window_vulkan_demo";
}
}
| 92 |
1,831 | /**
* Copyright (c) 2017-present, Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <chrono>
#include <boost/python.hpp>
#include <boost/python/suite/indexing/map_indexing_suite.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include "logdevice/clients/python/util/util.h"
#include "logdevice/common/debug.h"
#include "logdevice/ops/ldquery/Errors.h"
#include "logdevice/ops/ldquery/LDQuery.h"
#include "logdevice/ops/ldquery/TableRegistry.h"
#include "logdevice/ops/ldquery/Utils.h"
using namespace boost::python;
using namespace facebook::logdevice;
using namespace facebook::logdevice::ldquery;
using namespace facebook::logdevice::dbg;
namespace facebook { namespace logdevice { namespace ldquery {
object ldQueryErrorClass;
object ldStatementErrorClass;
template <typename T>
boost::shared_ptr<T>
make_boost_shared_ptr_from_std_shared_ptr(std::shared_ptr<T> ptr) {
return boost::shared_ptr<T>(ptr.get(), [ptr](T*) mutable { ptr.reset(); });
}
// Not to be confused with extract_string() in logdevice_python.h/python.cpp
static std::string extract_string(const object& from, const char* /* name */) {
if (PyString_Check(from.ptr())) {
return extract<std::string>(from);
}
return extract<std::string>(from.attr("encode")("utf-8"));
}
boost::shared_ptr<LDQuery> ldquery_make_ldquery(object config,
int command_timeout_sec,
bool use_ssl = false) {
std::chrono::milliseconds cmd_timeout = std::chrono::milliseconds::max();
if (command_timeout_sec > 0) {
cmd_timeout = std::chrono::milliseconds{command_timeout_sec * 1000};
}
std::shared_ptr<LDQuery> ldquery = std::make_shared<LDQuery>(
extract_string(config, "config"), cmd_timeout, use_ssl);
return make_boost_shared_ptr_from_std_shared_ptr(ldquery);
}
LDQuery::QueryResults* ldquery_query(LDQuery& self, object query) {
auto object = extract_string(query, "query");
LDQuery::QueryResults res;
{
gil_release_and_guard guard;
res = self.query(std::move(object));
}
std::unique_ptr<LDQuery::QueryResults> res_ptr(
new LDQuery::QueryResults(std::move(res)));
return res_ptr.release();
}
template <class T>
void translateException(T err, object pythonError) {
PyErr_SetString(pythonError.ptr(), err.what());
}
bool operator==(const TableColumn& lhs, const TableColumn& rhs) {
return (lhs.name == rhs.name) && (lhs.type == rhs.type);
}
bool operator==(const TableMetadata& lhs, const TableMetadata& rhs) {
return (lhs.name == rhs.name) && (lhs.description == rhs.description) &&
(lhs.columns == rhs.columns);
}
}}} // namespace facebook::logdevice::ldquery
BOOST_PYTHON_MODULE(ext) {
ldQueryErrorClass = createExceptionClass(
"LDQueryError",
"A LDQueryError is a general super type for all ldquery errors");
ldStatementErrorClass =
createExceptionClass("StatementError",
"An exception in the query statement",
ldQueryErrorClass.ptr());
class_<LDQuery, boost::shared_ptr<LDQuery>, boost::noncopyable>(
"LDQueryBinding", no_init)
.def("__init__",
make_constructor(&ldquery_make_ldquery,
default_call_policies(),
(arg("config"),
arg("command_timeout_sec"),
arg("use_ssl") = false)))
.def("set_pretty_output",
+[](LDQuery& self, object val) {
self.setPrettyOutput(extract<bool>(val));
},
args("val"))
.def("get_pretty_output",
+[](LDQuery& self) { return self.getPrettyOutput(); },
args("val"))
.def("set_cache_ttl",
+[](LDQuery& self, object val) {
self.setCacheTTL(std::chrono::seconds(extract<long>(val)));
},
args("seconds"))
.def("get_cache_ttl",
+[](LDQuery& self) {
return std::chrono::duration_cast<std::chrono::seconds>(
self.getCacheTTL())
.count();
})
.def("enable_server_side_filtering",
+[](LDQuery& self, object val) {
self.enableServerSideFiltering(extract<bool>(val));
},
args("val"))
.def("server_side_filtering_enabled",
+[](LDQuery& self) { return self.serverSideFilteringEnabled(); })
.def("query",
&ldquery_query,
return_value_policy<manage_new_object>(),
args("query"))
.def("get_tables",
+[](LDQuery& self) { return self.getTables(); },
default_call_policies(),
return_value_policy<copy_const_reference>());
class_<std::vector<std::string>>("StringVec")
.def(vector_indexing_suite<std::vector<std::string>>());
class_<std::vector<size_t>>("SizetVec")
.def(vector_indexing_suite<std::vector<size_t>>());
class_<LDQuery::Rows>("Rows")
.def(vector_indexing_suite<LDQuery::Rows>())
.def_readonly("size", &LDQuery::Rows::size);
class_<LDQuery::QueryResult, boost::noncopyable>("QueryResult", no_init)
.def_readonly("headers", &LDQuery::QueryResult::headers)
.def_readonly("rows", &LDQuery::QueryResult::rows)
.def_readonly("cols_max_size", &LDQuery::QueryResult::cols_max_size)
.def_readonly("metadata", &LDQuery::QueryResult::metadata);
class_<LDQuery::QueryResults>("QueryResults")
.def(vector_indexing_suite<LDQuery::QueryResults>());
class_<FailedNodeDetails>("Details")
.def_readonly("address", &FailedNodeDetails::address)
.def_readonly("failure_reason", &FailedNodeDetails::failure_reason)
.def("__str__", &FailedNodeDetails::toString)
.def("__repr__", &FailedNodeDetails::toString);
class_<std::map<int, FailedNodeDetails>>("FailedNodes")
.def(map_indexing_suite<std::map<int, FailedNodeDetails>>())
.def_readonly("size", &std::map<int, FailedNodeDetails>::size);
class_<ActiveQueryMetadata>("Metadata")
.def_readonly("failures", &ActiveQueryMetadata::failures)
.def_readonly("contacted_nodes", &ActiveQueryMetadata::contacted_nodes)
.def_readonly("latency", &ActiveQueryMetadata::latency)
.def_readonly("success", &ActiveQueryMetadata::success);
class_<TableColumn>("Column")
.def_readonly("name", &TableColumn::name)
.def_readonly("type", &TableColumn::type_as_string)
.def_readonly("description", &TableColumn::description);
class_<std::vector<TableColumn>>("Columns")
.def(vector_indexing_suite<std::vector<TableColumn>>())
.def("__len__", &std::vector<TableColumn>::size);
class_<TableMetadata>("Table")
.def_readonly("name", &TableMetadata::name)
.def_readonly("description", &TableMetadata::description)
.def_readonly("columns", &TableMetadata::columns);
class_<std::vector<TableMetadata>>("Tables")
.def(vector_indexing_suite<std::vector<TableMetadata>>())
.def("__len__", &std::vector<TableMetadata>::size);
register_exception_translator<LDQueryError>([](LDQueryError err) {
return translateException(err, ldQueryErrorClass);
});
register_exception_translator<StatementError>([](StatementError err) {
return translateException(err, ldStatementErrorClass);
});
}
| 3,085 |
1,412 | <reponame>IMCG/CPPCon14
//
// Wrapper.h
// PlusPlus
//
// Created by <NAME> on 7/11/14.
// Released into the public domain by <NAME>, 2014.
//
#ifndef PLUSPLUS_WRAPPER_H
#define PLUSPLUS_WRAPPER_H
#include "Boxed.h"
#include "EnumWrapper.h"
#include "Inverted.h"
#include <utility>
#include <type_traits>
/*
A wrapper family is an invertible functor family (see Inverted.h) that establishes a correspondence
between a strong type (often a class or enum class) and a weaker underlying type.
It's expected that each library relying on PlusPlus will have a single wrapper family
to handle its wrapping uniformly.
Unwrapping is expected to be idempotent: if an object is double-unwrapped using the same wrapper family,
the second unwrap must be trivial, and must not throw an exception.
This file provides three starting points, intended to be used as base classes for a library's
wrapper, like this:
namespace UnderlyingLibrary_PlusPlus
{
template < class W > struct Wrapper: PlusPlus::DefaultWrapper<W> {};
}
The Wrapper template can then be specialized to handle the particular wrapping needs of UnderlyingLibrary_PlusPlus;
Boxed.h and EnumWrapper.h provide wrapper implementations for PlusPlus::Boxed and enum types, which
can also be used by inheritance:
namespace UnderlyingLibrary_PlusPlus
{
struct SomeBoxTag
{
using ContentType = int;
static ContentType DefaultContent() { return 0; }
};
using SomeBoxType = PlusPlus::Boxed< SomeBoxTag >;
template <> struct Wrapper< SomeBoxType >: PlusPlus::BoxedWrapper< SomeBoxType > {};
enum class SomeEnumType: int;
template <> struct Wrapper< SomeEnumType >: PlusPlus::EnumWrapper< SomeEnumType > {};
}
The three starting points are:
DefaultWrapper Trivial for all types.
AllBoxedTypesAreWrapped This unwrapper assumes all PlusPlus::Boxed types may be unwrapped by
UnderlyingLibrary_PlusPlus. Don't use this if the underlying library uses PlusPlus::Boxed.
(If the underlying library does use PlusPlus, something must have gone wrong.)
AllBoxedAndEnumTypesAreWrapped This unwrapper assumes all enums and PlusPlus::Boxed types may be unwrapped by
SomeLibrary. Don't use this if the underlying library uses enums or PlusPlus::Boxed.
*/
namespace PlusPlus
{
template < class W > using DefaultWrapper = InvertibleIdentityFunctor<W>;
template < class W > struct AllBoxedTypesAreWrapped: DefaultWrapper< W > {};
template < class T > struct AllBoxedTypesAreWrapped< Boxed<T> >: BoxedWrapper< Boxed<T> > {};
template < class W, bool isEnum = std::is_enum<W>::value > struct AllBoxedAndEnumTypesAreWrapped;
template < class W > struct AllBoxedAndEnumTypesAreWrapped< W, false >: AllBoxedTypesAreWrapped<W> {};
template < class W > struct AllBoxedAndEnumTypesAreWrapped< W, true >: EnumWrapper<W> {};
template < template <class> class WrapperFamily, class W, class U >
auto Wrap( U&& u )
-> decltype( WrapperFamily<W>()( std::forward<U>(u) ) )
{ return WrapperFamily<W>()( std::forward<U>(u) ); }
template < template <class> class WrapperFamily, class W >
auto Unwrap( W&& w )
-> decltype( WrapperFamily< typename std::decay<W>::type >().Inverse( std::forward<W>(w) ) )
{ return WrapperFamily< typename std::decay<W>::type >().Inverse( std::forward<W>(w) ); }
template < template <class W> class WrapperFamily >
using Unwrapper = Inverted< WrapperFamily >;
}
#endif
| 1,609 |
1,433 | <gh_stars>1000+
/*
* Copyright (c) 2016, 2017, 2018, 2019 FabricMC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fabricmc.fabric.mixin.tag.extension;
import com.mojang.serialization.DynamicOps;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Inject;
import org.spongepowered.asm.mixin.injection.callback.CallbackInfoReturnable;
import net.minecraft.resource.ResourceManager;
import net.minecraft.util.dynamic.RegistryOps;
import net.minecraft.util.registry.DynamicRegistryManager;
import net.fabricmc.fabric.impl.tag.extension.TagFactoryImpl;
/**
* This mixin loads dynamic registry tags right after datapack entries loaded.
* Needs a higher priority so it will be called before biome modifications.
*/
@Mixin(value = RegistryOps.class, priority = 900)
public class MixinRegistryOps {
@Inject(method = "method_36574", at = @At("RETURN"))
private static <T> void afterDynamicRegistryLoaded(DynamicOps<T> dynamicOps, ResourceManager resourceManager, DynamicRegistryManager registryManager, CallbackInfoReturnable<RegistryOps<T>> cir) {
TagFactoryImpl.loadDynamicRegistryTags(registryManager, resourceManager);
}
}
| 505 |
534 | package mekanism.common.recipe.lookup.monitor;
import mekanism.api.math.FloatingLong;
import mekanism.api.recipes.NucleosynthesizingRecipe;
import mekanism.common.capabilities.energy.MachineEnergyContainer;
import mekanism.common.recipe.lookup.IRecipeLookupHandler;
public class NucleosynthesizerRecipeCacheLookupMonitor extends RecipeCacheLookupMonitor<NucleosynthesizingRecipe> {
public NucleosynthesizerRecipeCacheLookupMonitor(IRecipeLookupHandler<NucleosynthesizingRecipe> handler) {
super(handler);
}
public FloatingLong updateAndProcess(MachineEnergyContainer<?> energyContainer) {
FloatingLong prev = energyContainer.getEnergy().copyAsConst();
if (updateAndProcess()) {
//TODO: Re-evaluate this at some point
int toProcess = (int) Math.sqrt(prev.divide(energyContainer.getEnergyPerTick()).doubleValue());
for (int i = 0; i < toProcess - 1; i++) {
cachedRecipe.process();
}
//Update amount of energy that actually got used, as if we are "near" full we may not have performed our max number of operations
return prev.subtract(energyContainer.getEnergy());
}
//If we don't have a cached recipe so didn't process anything at all just return zero
return FloatingLong.ZERO;
}
} | 479 |
1,561 | <reponame>radetsky/themis<filename>src/themis/secure_cell.c<gh_stars>1000+
/*
* Copyright (c) 2015 Cossack Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "themis/secure_cell.h"
#include "themis/secure_cell_seal_passphrase.h"
#include "themis/sym_enc_message.h"
themis_status_t themis_secure_cell_encrypt_seal(const uint8_t* master_key,
const size_t master_key_length,
const uint8_t* user_context,
const size_t user_context_length,
const uint8_t* message,
const size_t message_length,
uint8_t* encrypted_message,
size_t* encrypted_message_length)
{
size_t ctx_length_;
size_t msg_length_;
size_t total_length;
THEMIS_CHECK_PARAM(encrypted_message_length != NULL);
THEMIS_STATUS_CHECK(themis_auth_sym_encrypt_message(master_key,
master_key_length,
message,
message_length,
user_context,
user_context_length,
NULL,
&ctx_length_,
NULL,
&msg_length_),
THEMIS_BUFFER_TOO_SMALL);
total_length = ctx_length_ + msg_length_;
if (!encrypted_message || *encrypted_message_length < total_length) {
*encrypted_message_length = total_length;
return THEMIS_BUFFER_TOO_SMALL;
}
*encrypted_message_length = total_length;
return themis_auth_sym_encrypt_message(master_key,
master_key_length,
message,
message_length,
user_context,
user_context_length,
encrypted_message,
&ctx_length_,
encrypted_message + ctx_length_,
&msg_length_);
}
themis_status_t themis_secure_cell_decrypt_seal(const uint8_t* master_key,
const size_t master_key_length,
const uint8_t* user_context,
const size_t user_context_length,
const uint8_t* encrypted_message,
const size_t encrypted_message_length,
uint8_t* plain_message,
size_t* plain_message_length)
{
size_t ctx_length_ = 0;
size_t msg_length_ = 0;
THEMIS_STATUS_CHECK(themis_auth_sym_decrypt_message(master_key,
master_key_length,
user_context,
user_context_length,
encrypted_message,
encrypted_message_length,
NULL,
0,
NULL,
&msg_length_),
THEMIS_BUFFER_TOO_SMALL);
if (encrypted_message_length < msg_length_) {
return THEMIS_INVALID_PARAMETER;
}
ctx_length_ = encrypted_message_length - msg_length_;
return themis_auth_sym_decrypt_message(master_key,
master_key_length,
user_context,
user_context_length,
encrypted_message,
ctx_length_,
encrypted_message + ctx_length_,
msg_length_,
plain_message,
plain_message_length);
}
themis_status_t themis_secure_cell_encrypt_seal_with_passphrase(const uint8_t* passphrase,
size_t passphrase_length,
const uint8_t* user_context,
size_t user_context_length,
const uint8_t* message,
size_t message_length,
uint8_t* encrypted_message,
size_t* encrypted_message_length)
{
themis_status_t res = THEMIS_FAIL;
size_t auth_token_length = 0;
size_t ciphertext_length = 0;
size_t total_length = 0;
THEMIS_CHECK_PARAM(encrypted_message_length != NULL);
/*
* Since Seal mode prepends authentication token to the message
* we need to get the size of this token at first.
*/
res = themis_auth_sym_encrypt_message_with_passphrase(passphrase,
passphrase_length,
message,
message_length,
user_context,
user_context_length,
NULL,
&auth_token_length,
NULL,
&ciphertext_length);
if (res != THEMIS_BUFFER_TOO_SMALL) {
return res;
}
total_length = auth_token_length + ciphertext_length;
if (!encrypted_message || *encrypted_message_length < total_length) {
*encrypted_message_length = total_length;
return THEMIS_BUFFER_TOO_SMALL;
}
res = themis_auth_sym_encrypt_message_with_passphrase(passphrase,
passphrase_length,
message,
message_length,
user_context,
user_context_length,
encrypted_message,
&auth_token_length,
encrypted_message + auth_token_length,
&ciphertext_length);
if (res == THEMIS_SUCCESS || res == THEMIS_BUFFER_TOO_SMALL) {
*encrypted_message_length = auth_token_length + ciphertext_length;
}
return res;
}
themis_status_t themis_secure_cell_decrypt_seal_with_passphrase(const uint8_t* passphrase,
size_t passphrase_length,
const uint8_t* user_context,
size_t user_context_length,
const uint8_t* encrypted_message,
size_t encrypted_message_length,
uint8_t* plain_message,
size_t* plain_message_length)
{
themis_status_t res = THEMIS_FAIL;
size_t auth_token_length = 0;
size_t message_length = 0;
THEMIS_CHECK_PARAM(plain_message_length != NULL);
/*
* Since Seal mode prepends authentication token to the message we need
* to get the size of this token at first. Token size is not available
* directly so we infer it from the size of encrypted message and
* plaintext message length embedded in the token.
*
* Note that this might fail if the encrypted message does not start with
* a valid token. It also might produce a false positive if the data looks
* like a token but does not contain valid measurements. This will lead to
* a decryption failure later.
*/
res = themis_auth_sym_decrypt_message_with_passphrase(passphrase,
passphrase_length,
user_context,
user_context_length,
encrypted_message,
encrypted_message_length,
NULL,
0,
NULL,
&message_length);
if (res != THEMIS_BUFFER_TOO_SMALL) {
return res;
}
/* We should not overflow here. If we do then the message is corrupted. */
if (encrypted_message_length < message_length) {
return THEMIS_FAIL;
}
auth_token_length = encrypted_message_length - message_length;
res = themis_auth_sym_decrypt_message_with_passphrase(passphrase,
passphrase_length,
user_context,
user_context_length,
encrypted_message,
auth_token_length,
encrypted_message + auth_token_length,
message_length,
plain_message,
plain_message_length);
return res;
}
themis_status_t themis_secure_cell_encrypt_token_protect(const uint8_t* master_key,
const size_t master_key_length,
const uint8_t* user_context,
const size_t user_context_length,
const uint8_t* message,
const size_t message_length,
uint8_t* context,
size_t* context_length,
uint8_t* encrypted_message,
size_t* encrypted_message_length)
{
return themis_auth_sym_encrypt_message(master_key,
master_key_length,
message,
message_length,
user_context,
user_context_length,
context,
context_length,
encrypted_message,
encrypted_message_length);
}
themis_status_t themis_secure_cell_decrypt_token_protect(const uint8_t* master_key,
const size_t master_key_length,
const uint8_t* user_context,
const size_t user_context_length,
const uint8_t* encrypted_message,
const size_t encrypted_message_length,
const uint8_t* context,
const size_t context_length,
uint8_t* plain_message,
size_t* plain_message_length)
{
return themis_auth_sym_decrypt_message(master_key,
master_key_length,
user_context,
user_context_length,
context,
context_length,
encrypted_message,
encrypted_message_length,
plain_message,
plain_message_length);
}
themis_status_t themis_secure_cell_encrypt_context_imprint(const uint8_t* master_key,
const size_t master_key_length,
const uint8_t* message,
const size_t message_length,
const uint8_t* context,
const size_t context_length,
uint8_t* encrypted_message,
size_t* encrypted_message_length)
{
return themis_sym_encrypt_message_u(master_key,
master_key_length,
context,
context_length,
message,
message_length,
encrypted_message,
encrypted_message_length);
}
themis_status_t themis_secure_cell_decrypt_context_imprint(const uint8_t* master_key,
const size_t master_key_length,
const uint8_t* encrypted_message,
const size_t encrypted_message_length,
const uint8_t* context,
const size_t context_length,
uint8_t* plain_message,
size_t* plain_message_length)
{
return themis_sym_decrypt_message_u(master_key,
master_key_length,
context,
context_length,
encrypted_message,
encrypted_message_length,
plain_message,
plain_message_length);
}
| 11,466 |
3,444 | <filename>blade-websocket/src/test/java/com/hellokaton/blade/websocket/BaseWebSocketHandler.java
package com.hellokaton.blade.websocket;
import com.hellokaton.blade.websocket.annotaion.OnClose;
import com.hellokaton.blade.websocket.annotaion.OnOpen;
/**
* @author darren
* @description
* @date 2018/12/18 13:29
*/
public abstract class BaseWebSocketHandler {
@OnOpen
public void OnOpen(WebSocketContext ctx) {
System.out.println("ws from annotation @OnOpen:" + ctx.session().uuid());
}
@OnClose
public void OnClose(WebSocketContext ctx) {
System.out.println("ws from annotation @OnClose:" + ctx.session().uuid() + " disconnect");
}
}
| 251 |
1,351 | <filename>example/plugins/c-api/disable_http2/disable_http2.cc
/** @file
An example plugin for accept object protocol set API.
This clones the protocol sets attached to all the accept objects and unregisters HTTP/2 from those
copies. The protocol set for incoming connections that match a list of domains are replaced with
the copy, effectively disabling HTTP/2 for those domains.
@section license License
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <ts/ts.h>
#include <unordered_set>
#include <string>
#include <cstring>
#include <openssl/ssl.h>
#define PLUGIN_NAME "disable_http2"
// Map of domains to tweak.
using DomainSet = std::unordered_set<std::string>;
DomainSet Domains;
int
CB_SNI(TSCont contp, TSEvent, void *cb_data)
{
auto vc = static_cast<TSVConn>(cb_data);
TSSslConnection ssl_conn = TSVConnSslConnectionGet(vc);
auto *ssl = reinterpret_cast<SSL *>(ssl_conn);
char const *sni = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
if (sni) {
if (Domains.find(sni) != Domains.end()) {
TSDebug(PLUGIN_NAME, "Disable H2 for SNI=%s", sni);
TSVConnProtocolDisable(vc, TS_ALPN_PROTOCOL_HTTP_2_0);
}
}
TSVConnReenable(vc);
return TS_SUCCESS;
}
void
TSPluginInit(int argc, char const *argv[])
{
int ret;
TSPluginRegistrationInfo info;
info.plugin_name = PLUGIN_NAME;
info.vendor_name = "Apache Software Foundation";
info.support_email = "<EMAIL>";
ret = TSPluginRegister(&info);
if (ret != TS_SUCCESS) {
TSError("[%s] registration failed", PLUGIN_NAME);
return;
} else if (argc < 2) {
TSError("[%s] Usage %s.so servername1 servername2 ... ", PLUGIN_NAME, PLUGIN_NAME);
return;
} else {
TSDebug(PLUGIN_NAME, "registration succeeded");
}
for (int i = 1; i < argc; i++) {
TSDebug(PLUGIN_NAME, "%s added to the No-H2 list", argv[i]);
Domains.emplace(std::string(argv[i], strlen(argv[i])));
}
// These callbacks do not modify any state so no lock is needed.
TSCont cb_sni = TSContCreate(&CB_SNI, nullptr);
TSHttpHookAdd(TS_SSL_SERVERNAME_HOOK, cb_sni);
}
| 1,037 |
884 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package com.microsoft.commondatamodel.objectmodel.storage;
import com.microsoft.commondatamodel.objectmodel.storage.GithubAdapter;
import java.time.Duration;
import java.util.concurrent.ExecutionException;
import org.testng.Assert;
import org.testng.annotations.Test;
public class GithubAdapterTest {
@Test
public void checkSuccessfulRead() throws ExecutionException, InterruptedException {
final GithubAdapter githubAdapter = new GithubAdapter();
githubAdapter.setTimeout(Duration.ofMillis(5000));
githubAdapter.setMaximumTimeout(Duration.ofMillis(10000));
githubAdapter.setNumberOfRetries(2);
final String s = githubAdapter.readAsync("/foundations.cdm.json").get();
Assert.assertNotNull(s);
}
}
| 286 |
381 | package com.tngtech.jgiven.example.springboot;
import java.util.Arrays;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.ApplicationContext;
@SpringBootApplication
public class Application {
public static void main( String[] args ) {
ApplicationContext ctx = SpringApplication.run( Application.class, args );
System.out.println( "Let's inspect the beans provided by Spring Boot:" );
String[] beanNames = ctx.getBeanDefinitionNames();
Arrays.sort( beanNames );
for( String beanName : beanNames ) {
System.out.println( beanName );
}
}
} | 236 |
743 | <reponame>riag23/AdaptiveCards<gh_stars>100-1000
{"actions":[],"body":[{"columns":[{"backgroundImage":"https://adaptivecards.io/content/AlkiBeach.jpg","items":[],"minHeight":"50px","type":"Column","width":"auto"},{"backgroundImage":{"url":"https://adaptivecards.io/content/GoldenGardensPark.jpg","verticalAlignment":"center"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"},{"backgroundImage":"https://adaptivecards.io/content/BainbridgeIsland.jpg","items":[],"minHeight":"50px","type":"Column","width":"auto"}],"type":"ColumnSet"},{"text":"You can even repeat the background image...","type":"TextBlock"},{"columns":[{"backgroundImage":{"fillMode":"repeat","url":"https://adaptivecards.io/content/uparrow.png"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"},{"horizontalAlignment":"Center","items":[{"horizontalAlignment":"center","text":"Those are some neat arrows","type":"TextBlock","wrap":true}],"type":"Column","verticalContentAlignment":"Center","width":"stretch"}],"type":"ColumnSet"},{"text":"Horizontal repeat...","type":"TextBlock"},{"columns":[{"backgroundImage":{"fillMode":"repeatHorizontally","url":"https://adaptivecards.io/content/downarrow.png"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"},{"backgroundImage":{"fillMode":"repeatHorizontally","url":"https://adaptivecards.io/content/uparrow.png","verticalAlignment":"center"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"},{"backgroundImage":{"fillMode":"repeatHorizontally","url":"https://adaptivecards.io/content/uparrow.png","verticalAlignment":"bottom"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"}],"type":"ColumnSet"},{"text":"Vertical repeat...","type":"TextBlock"},{"columns":[{"backgroundImage":{"fillMode":"repeatVertically","url":"https://adaptivecards.io/content/uparrow.png"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"},{"backgroundImage":{"fillMode":"repeatVertically","horizontalAlignment":"center","url":"https://adaptivecards.io/content/downarrow.png"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"},{"backgroundImage":{"fillMode":"repeatVertically","horizontalAlignment":"right","url":"https://adaptivecards.io/content/uparrow.png"},"items":[],"minHeight":"50px","type":"Column","width":"stretch"}],"type":"ColumnSet"}],"type":"AdaptiveCard","version":"1.2"} | 630 |
1,072 | from .ops import (
s_add_e,
s_mul_e,
s_sub_e,
s_add_e_sum,
s_mul_e_sum,
s_sub_e_sum,
s_add_e_mean,
s_mul_e_mean,
s_sub_e_mean,
s_add_t,
s_mul_t,
s_dot_t,
s_sub_t,
s_div_t,
)
| 164 |
348 | <filename>docs/data/leg-t2/011/01101222.json
{"nom":"Mas-Cabardès","circ":"1ère circonscription","dpt":"Aude","inscrits":145,"abs":54,"votants":91,"blancs":15,"nuls":2,"exp":74,"res":[{"nuance":"REM","nom":"<NAME>","voix":45},{"nuance":"FN","nom":"<NAME>","voix":29}]} | 111 |
340 | <filename>src/main/java/com/zblog/web/backend/controller/UploadController.java
package com.zblog.web.backend.controller;
import java.io.InputStream;
import java.util.Date;
import org.apache.shiro.authz.annotation.Logical;
import org.apache.shiro.authz.annotation.RequiresRoles;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.InputStreamResource;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.multipart.MultipartFile;
import com.zblog.biz.UploadManager;
import com.zblog.biz.editor.Ueditor;
import com.zblog.core.dal.entity.Upload;
import com.zblog.core.plugin.JMap;
import com.zblog.web.support.ServletRequestReader;
import com.zblog.web.support.WebContextFactory;
@Controller
@RequestMapping("/backend/uploads")
@RequiresRoles(value = { "admin", "editor" }, logical = Logical.OR)
public class UploadController{
@Autowired
private Ueditor ueditor;
@Autowired
private UploadManager uploadManager;
@RequestMapping(method = RequestMethod.GET)
public String index(@RequestParam(value = "page", defaultValue = "1") int page, Model model){
model.addAttribute("page", uploadManager.list(page, 15));
return "backend/upload/list";
}
@ResponseBody
@RequestMapping(method = RequestMethod.POST)
public Object insert(MultipartFile file){
Upload upload = null;
try(InputStream in = file.getInputStream()){
upload = uploadManager.insertUpload(new InputStreamResource(in), new Date(), file.getOriginalFilename(),
WebContextFactory.get().getUser().getId());
}catch(Exception e){
e.printStackTrace();
}
return JMap.create("success", upload != null);
}
@RequestMapping(value = "/edit", method = RequestMethod.GET)
public String edit(){
return "backend/upload/edit";
}
@ResponseBody
@RequestMapping(value = "/{uploadid}", method = RequestMethod.DELETE)
public Object remove(@PathVariable("uploadid") String uploadid){
uploadManager.removeUpload(uploadid);
return JMap.create("success", true);
}
@ResponseBody
@RequestMapping(value = "/ueditor")
public Object ueditor(ServletRequestReader reader){
return ueditor.server(reader);
}
}
| 896 |
653 | <reponame>mkinsner/llvm<filename>clang/test/SemaSYCL/esimd-private-glob.cpp
// RUN: %clang_cc1 -fsycl-is-device -fsyntax-only -verify %s
// expected-no-diagnostics
int x = 0;
| 78 |
3,428 | {"id":"00403","group":"spam-1","checksum":{"type":"MD5","value":"46d0face754b6bb7dce8b3ea560f75fb"},"text":"From <EMAIL> Sat Sep 21 10:48:50 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: zzzz@<EMAIL>assass<EMAIL>int.org\nReceived: from localhost (jalapeno [127.0.0.1])\n\tby zzzzason.org (Postfix) with ESMTP id 899FA16F16\n\tfor <zzzz@localhost>; Sat, 21 Sep 2002 10:48:49 +0100 (IST)\nReceived: from jalapeno [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor zzzz@localhost (single-drop); Sat, 21 Sep 2002 10:48:49 +0100 (IST)\nReceived: from webnote.net (mail.webnote.net [172.16.58.3]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g8L42hC07532 for\n <<EMAIL>>; Sat, 21 Sep 2002 05:02:43 +0100\nReceived: from rack3.easydns.com (rack3.easydns.com [205.210.42.50]) by\n webnote.net (8.9.3/8.9.3) with ESMTP id FAA08333 for <<EMAIL>>;\n Sat, 21 Sep 2002 05:03:15 +0100\nReceived: from winxp1 (unknown [195.166.226.132]) by rack3.easydns.com\n (Postfix) with SMTP id 7300A4B243 for <<EMAIL>>; Sat,\n 21 Sep 2002 00:03:07 -0400 (EDT)\nFrom: \"davidsavimbi\" <<EMAIL>>\nTo: \"zzzz\" <<EMAIL>>\nSubject: private\nDate: Sat, 21 Sep 02 05:01:06 Greenwich Standard Time\nMIME-Version: 1.0\nX-Priority: 3\nX-Msmail-Priority: Normal\nX-Mailer: Microsoft Outlook Express 6.00.2462.0000\nX-Mimeole: Produced By Microsoft MimeOLE V6.00.2462.0000\nMessage-Id: <<EMAIL>.<EMAIL>>\nContent-Type: multipart/mixed;boundary= \"----=_NextPart_000_002B_83A1E322.4D8B344B\"\n\n------=_NextPart_000_002B_83A1E322.4D8B344B\nContent-Type: text/plain\nContent-Transfer-Encoding: base64\n\nLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0NClRoaXMgbWVzc2FnZSB3\nYXMgc2VudCBieSBFeHByZXNzIERpcmVjdCBFbWFpbCBCbGFzdGVyIFY1LjEsIA0KeW91IGNh\nbiBkb3dubG9hZCBpdCBmcm9tOiBodHRwOi8vd3d3LmZhc3RidWxrZW1haWwuY29tIA0KRXhw\ncmVzcyBEaXJlY3QgRW1haWwgQmxhc3RlciBpcyBhIHBvd2VyZnVsIGVtYWlsIG1hcmtldGlu\nZyB0b29sISENCi0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0NCg0KDQpE\nZWFyIHNpci9tYWRhbQ0KDQpXaXRoIGdvb2QgcmVjb21tZW5kYXRpb24sSSAgZGVjaWRlZCB0\nbyBjb250YWN0IHlvdS4NCkkgYW0gUFJJTkNFIERBVklEICBTQVZJTUJJLCB0aGUgc29uIG9m\nIEpPTkFTIFNBVklNQkkgLHRoZSBsZWFkZXIgb2YgdGhlDQpVTklUQSByZWJlbCBpbiBBTkdP\nTEEsIHdobyAgd2FzIGtpbGxlZCBpbiBmZWJydWFyeSBieSB0aGUgdHJvb3AgbG95YWwgdG8N\nCnRoZSBnb3Zlcm5tZW50IC5BZnRlciBoaXMgZnVuZXJhbCAgYW5kIGJ1cmlhbCx3ZSBoYWQg\nYSBmYW1pbHkgbWVldGluZyBhbmQNCnJlc29sdmVkIHRvIGludmVzdCB0aGUgbW9uZXkgaGUg\nbGVmdCAgYmVoaW5kIGluIHlvdXIgY291bnRyeSwgaGVuY2UgdGhpcw0KbGV0dGVyIHRvIHlv\ndS5IZSBsZWZ0IHRoZSBzdW0gb2YgJDgwIG1pbGxpb24gIHVzIGRvbGxhcnMgaW4gY2FzaCwg\nd2hpY2ggd2FzDQphIHByb2NlZWQgZnJvbSB0aGUgc2FsZSBvZiBkaWFtb25kcyx3aGljaCB3\nYXMgIHVuZGVyIGhpcyBjb250cm9sLg0KDQpIb3dldmVyLHRoZSBlbnRpcmUgIGRldmVsb3Bt\nZW50IG1hZGUgdXMgdG8gZmxlZSBBTkdPTEEgdG8gSk9IQU5FU0JVUkcsU09VVEgNCkFGUklD\nQSwgZm9yIGZlYXIgb2YgIGJlaW5nIGtpbGxlZC4NClRoZSBhbW91bnQgb2YgJDgwIG1pbGxp\nb24gIHVuaXRlZCBzdGF0ZXMgZG9sbGFycyBpbiBjYXNoIGlzIHByZXNlbnRseSBpbiBvdXIN\nCnBvc3Nlc3Npb24sIGFuZCBJIHdvdWxkIHdhbnQgaXQgIHRvIGJlIGludmVzdGVkIGluIHlv\ndXIgY291bnRyeSxkdWUgdG8gdGhlDQpzdGFiaWxpdHkgb2YgeW91ciBlY29ub215LlRoZSBt\nb25leSBpcyAgcHJlc2VudGx5IHdpdGggYW4gZW1lcmdlbmN5DQpkaXBsb21hdGljIHNlY3Vy\naXR5IGNvdXJpZXIgY29tcGFueSx3aGljaCBteSBtb3RoZXIgYW5kIEkgaHVycmllZGx5IHVz\nZWQgdG8gbW92ZSB0aGUNCmNhcmdvIG91dCBmcm9tIG91ciBjb3VudHJ5IHRvIFNPVVRIIEFG\nUklDQS5XZSAgcmVnaXN0ZXJlZCBpdCB1bmRlciBhIHNlY3VyaXR5DQpjb2RlIHdpdGggbm8g\nbmFtZSwgYW5kIHdhcyByZWdpc3RlcmVkIGFzICBjb250YWluaW5nIHBob3RvZ3JhcGhpYyBw\nYXBlcg0KbWF0ZXJpYWwuDQoNCk15IG1vdGhlciBhbmQgSSBoYXZlIHJlc29sdmVkIHRvIGdp\ndmUgeW91ICAxNSUgb2YgdGhlIHRvdGFsIHN1bSBmb3IgeW91ciBhc3Npc3RhbmNlIGluDQpn\nZXR0aW5nIHRoaXMgZnVuZHMgdHJhbnNmZXJyZWQgdG8gIHlvdXIgcmVsaWFibGUgY29tcGFu\neSdzIGFjY291bnQgaW4geW91cg0KY291bnRyeSxmb3IgaW52ZXN0bWVudC5XZSBoYXZlIGVx\ndWFsbHkgIHJlc29sdmVkIHRvIHNwZW5kIHRoZSByZXN0IG9mIG91cg0KbGl2ZXMgaW4geW91\nciBjb3VudHJ5LllvdSBoYXZlIHRvICBtYWtlIHRyYXZlbGluZyBhcnJhbmdlbWVudA0Kb2Yg\nY29taW5nIHRvIEpPSEFORVNCVVJHLFNPVVRIIEFGUklDQSxpbW1lZGlhdGVseSB0byAgbWVl\ndCB3aXRoIG1lIGZhY2UgdG8NCmZhY2UgYW5kIHNlZSB0aGUgZnVuZHMgcGh5c2ljYWxseSx0\naGVuIGhhdmUgaXQgdHJhbnNmZXJyZWQgIHRvIHlvdXINCmFjY291bnQuQWZ0ZXJ3aGljaCBi\nb3RoIG9mIHVzIHdvdWxkIHRyYXZlbCB0b2dldGhlciB0byB5b3VyIGNvdW50cnkgZm9yDQpz\naGFyaW5nIGFuZCBpbW1lZGlhdGUgaW52ZXN0bWVudCx0aGVuIG15IGZhbWlseSB3aWxsIGpv\naW4gbWUgbGF0ZXIuDQoNCkFzIHdlIGF3YWl0IHlvdXIgdXJnZW50ICByZXBseSwgc3RyaWN0\nbHkgdGhyb3VnaCBteSBlLW1haWwgZGF2c2F2QG1ha3Rvb2IuY29tIG9yDQogZGF2aWRzYXZp\nbWJpQG1ha3Rvb2IuY29tICBvciBteSBwcml2YXRlIHBob25lIG51bWJlciArODgyIDEgNjQ2\nNjg1MjI5LiAgeW91cg0KYWJzb2x1dGUgdHJ1c3QgYW5kIGNvbmZpZGVudGlhbGl0eSBpcyBo\naWdobHkgc29saWNpdGVkIHRvIHNlcnZlIHVzIGJldHRlci4NCg0KQmVzdCByZWdhcmQNClBS\nSU5DRSBEQVZJRCBTQVZJTUJJICAgIA==\n------=_NextPart_000_002B_83A1E322.4D8B344B--\n\n\n"} | 3,037 |
5,169 | <filename>Specs/e/4/7/SpotX-SDK-AVPlayer/1.5.1/SpotX-SDK-AVPlayer.podspec.json
{
"name": "SpotX-SDK-AVPlayer",
"version": "1.5.1",
"summary": "SpotX SDK for AVPlayer",
"authors": "SpotX, Inc.",
"homepage": "http://www.spotx.tv",
"license": {
"type": "Commercial",
"text": "https://www.spotx.tv/contact/"
},
"source": {
"http": "http://bintray.com/artifact/download/spotxmobile/pods/spotx-sdk-avplayer-1.5.1.zip",
"flatten": false
},
"requires_arc": true,
"module_name": "SpotX",
"header_dir": "SpotX",
"ios": {
"preserve_paths": [
"iphone/SpotX.framework"
],
"vendored_frameworks": "iphone/SpotX.framework",
"public_header_files": "iphone/SpotX.framework/Headers/*.h",
"source_files": "iphone/SpotX.framework/Headers/*.h",
"module_map": "iphone/SpotX.framework/Modules/module.modulemap",
"frameworks": [
"AdSupport",
"Foundation",
"UIKit",
"AVKit",
"AVFoundation"
]
},
"platforms": {
"ios": "8.0",
"tvos": "9.0"
},
"tvos": {
"preserve_paths": [
"appletv/SpotX.framework"
],
"vendored_frameworks": "appletv/SpotX.framework",
"public_header_files": "appletv/SpotX.framework/Headers/*.h",
"source_files": "appletv/SpotX.framework/Headers/*.h",
"module_map": "appletv/SpotX.framework/Modules/module.modulemap",
"frameworks": [
"AdSupport",
"Foundation",
"UIKit",
"AVKit",
"AVFoundation",
"TVMLKit"
]
}
}
| 702 |
3,102 | <reponame>medismailben/llvm-project
// RUN: %clang_cc1 -I %S -emit-pch \
// RUN: -include Inputs/pch-through3c.h \
// RUN: -pch-through-header=Inputs/pch-through3c.h -o %t.3c %s
// Checks that no warnings appear for this successful use.
// RUN: %clang_cc1 -verify -I %S -include-pch %t.3c \
// RUN: -include Inputs/pch-through3c.h \
// RUN: -pch-through-header=Inputs/pch-through3c.h \
// RUN: %S/Inputs/pch-through-use3c.cpp 2>&1
| 203 |
1,133 | <filename>components/stdproc/model/addsubmodel/include/addsubmodelmodule.h
//#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//#
//# Author: <NAME>
//# Copyright 2013, by the California Institute of Technology. ALL RIGHTS RESERVED.
//# United States Government Sponsorship acknowledged.
//# Any commercial use must be negotiated with the Office of Technology Transfer at
//# the California Institute of Technology.
//# This software may be subject to U.S. export control laws.
//# By accepting this software, the user agrees to comply with all applicable U.S.
//# export laws and regulations. User has the responsibility to obtain export licenses,
//# or other export authority as may be required before exporting such information to
//# foreign countries or providing access to foreign persons.
//#
//#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#ifndef addsubmodelmodule_h
#define addsubmodelmodule_h
#include <Python.h>
extern "C"
{
PyObject* createaddsubmodel_C(PyObject *, PyObject *);
PyObject* destroyaddsubmodel_C(PyObject *, PyObject *);
PyObject* cpxCpxProcess_C(PyObject *, PyObject *);
PyObject* unwUnwProcess_C(PyObject *, PyObject *);
PyObject* cpxUnwProcess_C(PyObject *, PyObject *);
PyObject* setDims_C(PyObject *, PyObject *);
PyObject* setScaleFactor_C(PyObject *, PyObject *);
PyObject* setFlip_C(PyObject *, PyObject *);
}
static PyMethodDef addsubmodel_methods[] =
{
{"createaddsubmodel", createaddsubmodel_C, METH_VARARGS, " "},
{"destroyaddsubmodel", destroyaddsubmodel_C, METH_VARARGS, " "},
{"cpxCpxProcess", cpxCpxProcess_C, METH_VARARGS, " "},
{"cpxUnwProcess", cpxUnwProcess_C, METH_VARARGS, " "},
{"unwUnwProcess", unwUnwProcess_C, METH_VARARGS, " "},
{"setDims", setDims_C, METH_VARARGS, " "},
{"setFlip", setFlip_C, METH_VARARGS, " "},
{"setScaleFactor", setScaleFactor_C, METH_VARARGS, " "},
{NULL, NULL, 0 , NULL}
};
#endif //addsubmodelmodule_h
| 678 |
6,989 | //===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCPP___CONCEPTS_DESTRUCTIBLE_H
#define _LIBCPP___CONCEPTS_DESTRUCTIBLE_H
#include <__config>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
_LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_CONCEPTS)
// [concept.destructible]
template<class _Tp>
concept destructible = is_nothrow_destructible_v<_Tp>;
#endif // _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_CONCEPTS)
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___CONCEPTS_DESTRUCTIBLE_H
| 333 |
1,467 | <gh_stars>1000+
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.awssdk.codegen.emitters.tasks;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import software.amazon.awssdk.codegen.emitters.GeneratorTask;
import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams;
import software.amazon.awssdk.codegen.poet.builder.BaseClientBuilderClass;
import software.amazon.awssdk.codegen.poet.builder.BaseClientBuilderInterface;
/**
* Task for classes shared by {@link AsyncClientGeneratorTasks} and {@link SyncClientGeneratorTasks}.
*/
public class CommonClientGeneratorTasks extends BaseGeneratorTasks {
public CommonClientGeneratorTasks(GeneratorTaskParams dependencies) {
super(dependencies);
}
@Override
protected List<GeneratorTask> createTasks() throws Exception {
return Arrays.asList(createBaseBuilderTask(),
createBaseBuilderInterfaceTask());
}
private GeneratorTask createBaseBuilderTask() throws IOException {
return createPoetGeneratorTask(new BaseClientBuilderClass(model));
}
private GeneratorTask createBaseBuilderInterfaceTask() throws IOException {
return createPoetGeneratorTask(new BaseClientBuilderInterface(model));
}
}
| 556 |
3,200 | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_FUNC_GRAPH_SUBGRAPH_H
#define MINDSPORE_LITE_TOOLS_FUNC_GRAPH_SUBGRAPH_H
#include <memory>
#include <string>
#include <vector>
#include <map>
#include <set>
#include <utility>
#include "src/common/log_adapter.h"
#include "include/errorcode.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
namespace mindspore::lite {
class SubGraph;
using SubGraphPtr = std::shared_ptr<SubGraph>;
class SubGraph {
public:
explicit SubGraph(FuncGraphPtr belong_anf, std::string graph_name = "")
: belong_anf_(std::move(belong_anf)), name_(std::move(graph_name)) {}
virtual ~SubGraph() = default;
int Init(const std::set<CNodePtr> &head_nodes = {});
int Reset(const std::set<CNodePtr> &nodes, const std::set<CNodePtr> &head_nodes = {});
bool MergeSubGraph(const SubGraphPtr &subgraph);
std::set<CNodePtr> GetNodes() const;
std::set<CNodePtr> GetInCNodes() const;
std::set<CNodePtr> GetOutCNodes() const;
int ApplySubGraph();
private:
std::set<CNodePtr> GetInputCNodes() const;
std::set<CNodePtr> GetOutputCNodes() const;
// init subgraph methods
int InitSubGraphNode(const std::set<CNodePtr> &head_nodes);
int InitSubGraphInNode();
int InitSubGraphOutNode();
// merge subgraph methods
std::set<CNodePtr> FindCommonOutputs(const SubGraphPtr &subgraph) const;
bool IfDependOnSameNode(const SubGraphPtr &subgraph) const;
// apply subgraph methods
SubGraphPtr FindBeforeSubGraphInBelongAnf() const;
SubGraphPtr FindAfterSubGraphInBelongAnf() const;
int CreateParameterForPartialSubGraph(const FuncGraphPtr &sub_graph, std::vector<AnfNodePtr> *partial_inputs,
std::map<AnfNodePtr, AnfNodePtr> *partial_inputs_and_subgraph_input_map);
int CreateCNodeForPartialSubGraph(const FuncGraphPtr &sub_graph,
const std::map<AnfNodePtr, AnfNodePtr> &partial_inputs_and_subgraph_input_map);
int CreatePartialInBelongAnf();
static int SetFuncGraphOutput(const FuncGraphPtr &graph, const std::set<CNodePtr> &outputs);
private:
std::set<CNodePtr> nodes_;
std::set<CNodePtr> in_nodes_;
std::set<CNodePtr> out_nodes_;
const FuncGraphPtr belong_anf_ = nullptr;
const std::string name_;
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_TOOLS_FUNC_GRAPH_SUBGRAPH_H
| 1,058 |
1,144 | <filename>backend/de.metas.handlingunits.base/src/test/java/de/metas/handlingunits/shipmentschedule/util/ShipmentScheduleHelper.java
package de.metas.handlingunits.shipmentschedule.util;
import static org.adempiere.model.InterfaceWrapperHelper.getModelTableId;
import static org.adempiere.model.InterfaceWrapperHelper.loadOutOfTrx;
import static org.adempiere.model.InterfaceWrapperHelper.newInstance;
import static org.adempiere.model.InterfaceWrapperHelper.newInstanceOutOfTrx;
import static org.adempiere.model.InterfaceWrapperHelper.saveRecord;
/*
* #%L
* de.metas.handlingunits.base
* %%
* Copyright (C) 2015 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.List;
import org.adempiere.warehouse.WarehouseId;
import org.compiere.model.I_C_BPartner;
import org.compiere.model.I_C_BPartner_Location;
import org.compiere.model.I_C_Order;
import org.compiere.model.I_C_UOM;
import org.compiere.model.I_M_Product;
import org.compiere.model.I_M_Warehouse;
import de.metas.bpartner.BPartnerId;
import de.metas.bpartner.BPartnerLocationId;
import de.metas.business.BusinessTestHelper;
import de.metas.handlingunits.HUTestHelper;
import de.metas.handlingunits.expectations.HUTransactionExpectation;
import de.metas.handlingunits.expectations.ShipmentScheduleQtyPickedExpectations;
import de.metas.handlingunits.hutransaction.IHUTransactionCandidate;
import de.metas.handlingunits.hutransaction.impl.HUTransactionCandidate;
import de.metas.handlingunits.model.I_C_OrderLine;
import de.metas.handlingunits.model.I_M_HU;
import de.metas.handlingunits.model.I_M_HU_Item;
import de.metas.handlingunits.model.I_M_ShipmentSchedule_QtyPicked;
import de.metas.inoutcandidate.api.IShipmentScheduleAllocBL;
import de.metas.inoutcandidate.api.IShipmentScheduleBL;
import de.metas.inoutcandidate.api.IShipmentSchedulePA;
import de.metas.inoutcandidate.model.I_M_ShipmentSchedule;
import de.metas.order.DeliveryRule;
import de.metas.product.ProductId;
import de.metas.quantity.StockQtyAndUOMQtys;
import de.metas.uom.UomId;
import de.metas.util.Services;
/**
* Miscellaneous helpers for writing tests which involve {@link I_M_HU} and {@link I_M_ShipmentSchedule}.
*
* @author tsa
*
*/
public class ShipmentScheduleHelper
{
private final HUTestHelper helper;
public final IShipmentScheduleBL shipmentScheduleBL = Services.get(IShipmentScheduleBL.class);
public final IShipmentSchedulePA shipmentSchedulesRepo = Services.get(IShipmentSchedulePA.class);
public final IShipmentScheduleAllocBL shipmentScheduleAllocBL = Services.get(IShipmentScheduleAllocBL.class);
private final WarehouseId defaultWarehouseId;
private final BPartnerId defaultCustomerId;
private final BPartnerLocationId defaultCustomerLocationId;
public ShipmentScheduleHelper(final HUTestHelper helper)
{
this.helper = helper;
final I_M_Warehouse warehouse = newInstanceOutOfTrx(I_M_Warehouse.class);
saveRecord(warehouse);
defaultWarehouseId = WarehouseId.ofRepoId(warehouse.getM_Warehouse_ID());
final I_C_BPartner defaultCustomer = BusinessTestHelper.createBPartner("test customer");
defaultCustomerId = BPartnerId.ofRepoId(defaultCustomer.getC_BPartner_ID());
final I_C_BPartner_Location defaultCustomerLocation = BusinessTestHelper.createBPartnerLocation(defaultCustomer);
defaultCustomerLocationId = BPartnerLocationId.ofRepoId(defaultCustomerId, defaultCustomerLocation.getC_BPartner_Location_ID());
}
public void assertValidTransaction(final IHUTransactionCandidate trx,
final I_M_ShipmentSchedule schedule,
final BigDecimal trxQtyExpected,
final BigDecimal scheduleQtyPickedExpected)
{
new HUTransactionExpectation<>()
.product(loadOutOfTrx(schedule.getM_Product_ID(), I_M_Product.class))
.qty(trxQtyExpected)
.uom(shipmentScheduleBL.getUomOfProduct(schedule))
.referencedModel(schedule)
.assertExpected(trx);
new ShipmentScheduleQtyPickedExpectations()
.shipmentSchedule(schedule)
.qtyPicked(scheduleQtyPickedExpected)
.assertExpected_ShipmentSchedule("shipment schedule");
}
/**
*
* @param product
* @param uom
* @param qtyToDeliver
* @param qtyPickedInitial is != zero, then this method also creates a {@link I_M_ShipmentSchedule_QtyPicked} record.
* @return
*/
public I_M_ShipmentSchedule createShipmentSchedule(
final org.compiere.model.I_M_Product product,
final I_C_UOM uom,
final BigDecimal qtyToDeliver,
final BigDecimal qtyPickedInitial)
{
//
// Create Order and Order Line
final I_C_Order order = newInstance(I_C_Order.class);
saveRecord(order);
final I_C_OrderLine orderLine = newInstance(I_C_OrderLine.class, helper.getContextProvider());
orderLine.setC_Order_ID(order.getC_Order_ID());
orderLine.setC_UOM_ID(uom.getC_UOM_ID());
saveRecord(orderLine);
//
// Create shipment schedule
final I_M_ShipmentSchedule shipmentSchedule = newInstance(I_M_ShipmentSchedule.class, helper.getContextProvider());
shipmentSchedule.setM_Warehouse_ID(defaultWarehouseId.getRepoId());
shipmentSchedule.setM_Product_ID(product.getM_Product_ID());
shipmentSchedule.setC_BPartner_ID(defaultCustomerId.getRepoId());
shipmentSchedule.setC_BPartner_Location_ID(defaultCustomerLocationId.getRepoId());
// task 09005: set QtyOrdered_calculated because it's the initial value for the newly created shipment schedule
shipmentSchedule.setQtyOrdered_Calculated(qtyToDeliver);
shipmentSchedule.setQtyToDeliver(qtyToDeliver);
shipmentSchedule.setC_Order_ID(order.getC_Order_ID());
shipmentSchedule.setC_OrderLine(orderLine);
shipmentSchedule.setAD_Table_ID(getModelTableId(orderLine));
shipmentSchedule.setRecord_ID(orderLine.getC_OrderLine_ID());
shipmentSchedule.setDeliveryRule(DeliveryRule.AVAILABILITY.getCode());
saveRecord(shipmentSchedule);
//
// Set initial QtyPicked and validate
final ShipmentScheduleQtyPickedExpectations shipmentScheduleExpectations = new ShipmentScheduleQtyPickedExpectations()
.shipmentSchedule(shipmentSchedule)
.qtyPicked("0")
.assertExpected_ShipmentSchedule("create shipment schedule");
if (qtyPickedInitial != null && qtyPickedInitial.signum() != 0)
{
shipmentScheduleAllocBL.createNewQtyPickedRecord(
shipmentSchedule,
StockQtyAndUOMQtys.createConvert(
qtyPickedInitial,
ProductId.ofRepoId(product.getM_Product_ID()),
UomId.ofRepoId(uom.getC_UOM_ID())));
shipmentScheduleExpectations
.qtyPicked(qtyPickedInitial)
.assertExpected_ShipmentSchedule("create shipment schedule, after setting inital qtyPicked");
}
return shipmentSchedule;
}
/**
* Create dummy counterpart HU Transactions
*
* @param trxs
* @param dummyItem
* @return created counterpart transactions
*/
public List<IHUTransactionCandidate> createHUTransactionDummyCounterparts(
final List<IHUTransactionCandidate> trxs,
final I_M_HU_Item dummyItem)
{
final List<IHUTransactionCandidate> trxsFinal = new ArrayList<>();
for (final IHUTransactionCandidate trx : trxs)
{
// Create counterpart
final HUTransactionCandidate trxCounterpart = new HUTransactionCandidate(
trx.getReferencedModel(), // referenced object
dummyItem, // M_HU_Item
dummyItem, // VHU M_HU_Item
trx.getProductId(),
trx.getQuantity().negate(),
trx.getDate());
trx.pair(trxCounterpart);
trxsFinal.add(trx);
trxsFinal.add(trxCounterpart);
}
return trxsFinal;
}
}
| 2,878 |
335 | {
"word": "Premonition",
"definitions": [
"a strong feeling that something is about to happen, especially something unpleasant"
],
"parts-of-speech": "noun"
}
| 68 |
304 | /*
* Copyright 2019-2021 CloudNetService team & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cloudnetservice.cloudnet.ext.npcs.bukkit.listener;
import com.github.juliarn.npc.event.PlayerNPCInteractEvent;
import de.dytanic.cloudnet.common.collection.Pair;
import de.dytanic.cloudnet.driver.CloudNetDriver;
import de.dytanic.cloudnet.driver.service.ServiceInfoSnapshot;
import de.dytanic.cloudnet.ext.bridge.BridgeServiceProperty;
import de.dytanic.cloudnet.ext.bridge.player.IPlayerManager;
import eu.cloudnetservice.cloudnet.ext.npcs.CloudNPC;
import eu.cloudnetservice.cloudnet.ext.npcs.NPCAction;
import eu.cloudnetservice.cloudnet.ext.npcs.bukkit.BukkitNPCManagement;
import eu.cloudnetservice.cloudnet.ext.npcs.bukkit.BukkitNPCProperties;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.stream.Collectors;
import org.bukkit.entity.Player;
import org.bukkit.event.EventHandler;
import org.bukkit.event.Listener;
import org.bukkit.event.inventory.InventoryClickEvent;
import org.bukkit.inventory.Inventory;
import org.bukkit.inventory.ItemStack;
public class NPCInventoryListener implements Listener {
private static final Random RANDOM = new Random();
private final BukkitNPCManagement npcManagement;
private final IPlayerManager playerManager = CloudNetDriver.getInstance().getServicesRegistry()
.getFirstService(IPlayerManager.class);
private final Map<Integer, BukkitNPCProperties> propertiesCache = new HashMap<>();
public NPCInventoryListener(BukkitNPCManagement npcManagement) {
this.npcManagement = npcManagement;
}
@EventHandler
public void handleNPCInteract(PlayerNPCInteractEvent event) {
if (event.getHand() != PlayerNPCInteractEvent.Hand.MAIN_HAND) {
return;
}
Player player = event.getPlayer();
int entityId = event.getNPC().getEntityId();
BukkitNPCProperties properties = this.propertiesCache
.computeIfAbsent(entityId, key -> this.npcManagement.getNPCProperties().values().stream()
.filter(npcProperty -> npcProperty.getEntityId() == key)
.findFirst()
.orElse(null));
if (properties != null) {
CloudNPC cloudNPC = properties.getHolder();
PlayerNPCInteractEvent.EntityUseAction action = event.getUseAction();
if (action == PlayerNPCInteractEvent.EntityUseAction.INTERACT_AT
|| action == PlayerNPCInteractEvent.EntityUseAction.ATTACK) {
NPCAction npcAction = action == PlayerNPCInteractEvent.EntityUseAction.INTERACT_AT
? cloudNPC.getRightClickAction()
: cloudNPC.getLeftClickAction();
if (npcAction == NPCAction.OPEN_INVENTORY) {
player.openInventory(properties.getInventory());
} else if (npcAction.name().startsWith("DIRECT")) {
List<ServiceInfoSnapshot> services = this.npcManagement.filterNPCServices(cloudNPC).stream()
.map(Pair::getFirst)
.collect(Collectors.toList());
if (services.size() > 0) {
String targetServiceName = null;
switch (npcAction) {
case DIRECT_CONNECT_RANDOM:
targetServiceName = services.get(RANDOM.nextInt(services.size())).getName();
break;
case DIRECT_CONNECT_LOWEST_PLAYERS:
targetServiceName = services.stream()
.min(Comparator.comparingInt(
service -> service.getProperty(BridgeServiceProperty.ONLINE_COUNT).orElse(0)
))
.map(ServiceInfoSnapshot::getName)
.orElse(null);
break;
case DIRECT_CONNECT_HIGHEST_PLAYERS:
targetServiceName = services.stream()
.max(Comparator.comparingInt(
service -> service.getProperty(BridgeServiceProperty.ONLINE_COUNT).orElse(0)
))
.map(ServiceInfoSnapshot::getName)
.orElse(null);
break;
default:
break;
}
if (targetServiceName != null) {
this.playerManager.getPlayerExecutor(player.getUniqueId()).connect(targetServiceName);
}
}
}
}
}
}
@EventHandler
public void handleInventoryClick(InventoryClickEvent event) {
Inventory inventory = event.getClickedInventory();
ItemStack currentItem = event.getCurrentItem();
if (inventory != null && currentItem != null && inventory.getHolder() == null && event
.getWhoClicked() instanceof Player) {
this.npcManagement.getNPCProperties().values().stream()
.filter(properties -> properties.getInventory().equals(inventory))
.findFirst()
.ifPresent(properties -> {
event.setCancelled(true);
int slot = event.getSlot();
if (properties.getServerSlots().containsKey(slot)) {
Player player = (Player) event.getWhoClicked();
String serverName = properties.getServerSlots().get(slot);
this.playerManager.getPlayerExecutor(player.getUniqueId()).connect(serverName);
}
});
}
}
}
| 2,258 |
335 | {
"word": "Cavity",
"definitions": [
"An empty space within a solid object.",
"A decayed part of a tooth."
],
"parts-of-speech": "Noun"
} | 78 |
669 | <gh_stars>100-1000
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import os
import re
import CloudFlare
import boto3
def generate_batch_id():
"""
Generate a unique id for each hitl run
"""
import datetime
dt = datetime.datetime.now()
return int(dt.strftime("%Y%m%d%H%M%S"))
def deregister_dashboard_subdomain(batch_id):
"""
Deregister all subdomains of a given batch on craftassist.io
"""
if (
os.getenv("CLOUDFLARE_TOKEN")
and os.getenv("CLOUDFLARE_ZONE_ID")
and os.getenv("CLOUDFLARE_EMAIL")
):
logging.info("Deresigister subdomains on craftassist.io")
cf_token = os.getenv("CLOUDFLARE_TOKEN")
zone_id = os.getenv("CLOUDFLARE_ZONE_ID")
cf_email = os.getenv("CLOUDFLARE_EMAIL")
cf = CloudFlare.CloudFlare(email=cf_email, token=cf_token)
dns_records = cf.zones.dns_records.get(zone_id)
for record in dns_records:
print(f'{record["name"]} pattern : {batch_id}')
if re.match(fr"dashboard-{batch_id}-\d+.craftassist.io", record["name"]):
print(f"matched cf record to be deleted: {record['name']}")
cf.zones.dns_records.delete(zone_id, record["id"])
logging.debug(f'Deleted cf dns record: {record["name"]}')
print(f'Deleted cf dns record: {record["name"]}')
def dedup_commands(command_list):
"""
Deduplicate a command list.
Now it only removes repeated commands.
"""
cmd_set = set()
deduped_cmd_list = []
for command in command_list:
if command.lower() not in cmd_set:
cmd_set.add(command.lower())
deduped_cmd_list.append(command)
return deduped_cmd_list
def examine_hit(hit_id):
"""
Examine all assignments of a given HIT
"""
access_key = os.getenv("MTURK_AWS_ACCESS_KEY_ID")
secret_key = os.getenv("MTURK_AWS_SECRET_ACCESS_KEY")
aws_region = os.getenv("MTURK_AWS_REGION", default="us-east-1")
dev_flag = None
if dev_flag:
MTURK_URL = "https://mturk-requester-sandbox.{}.amazonaws.com".format(aws_region)
else:
MTURK_URL = "https://mturk-requester.{}.amazonaws.com".format(aws_region)
mturk = boto3.client(
"mturk",
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=aws_region,
endpoint_url=MTURK_URL,
)
worker_results = mturk.list_assignments_for_hit(
HITId=hit_id, AssignmentStatuses=["Submitted"]
)
print(worker_results["NumResults"])
print(worker_results["Assignments"])
def delete_all_mturk_hits():
"""
Delete all HITs of a given account.
Please use it with caution.
"""
import os
import boto3
from datetime import datetime
access_key = os.getenv("MTURK_AWS_ACCESS_KEY_ID")
secret_key = os.getenv("MTURK_AWS_SECRET_ACCESS_KEY")
aws_region = os.getenv("MTURK_AWS_REGION", default="us-east-1")
MTURK_URL = "https://mturk-requester-sandbox.{}.amazonaws.com".format(aws_region)
mturk = boto3.client(
"mturk",
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=aws_region,
endpoint_url=MTURK_URL,
)
all_hits = mturk.list_hits()["HITs"]
hit_ids = [item["HITId"] for item in all_hits]
# This is slow but there's no better way to get the status of pending HITs
for hit_id in hit_ids:
# Get HIT status
status = mturk.get_hit(HITId=hit_id)["HIT"]["HITStatus"]
try:
response = mturk.update_expiration_for_hit(HITId=hit_id, ExpireAt=datetime(2015, 1, 1))
mturk.delete_hit(HITId=hit_id)
except:
pass
print(f"Hit {hit_id}, status: {status}")
if __name__ == "__main__":
# pass
for i in range(100):
deregister_dashboard_subdomain(20211014214358)
# examine_hit("34YWR3PJ2AD51SZWORZ4M41QBOG0XV")
| 1,885 |
839 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.microprofile.client.cdi;
import java.util.NoSuchElementException;
import javax.enterprise.context.spi.CreationalContext;
import javax.enterprise.inject.spi.Bean;
import javax.enterprise.inject.spi.BeanManager;
import javax.enterprise.inject.spi.CDI;
import org.apache.cxf.Bus;
public final class CDIUtils {
private CDIUtils() {
}
static BeanManager getCurrentBeanManager(Bus bus) {
BeanManager bm = bus.getExtension(BeanManager.class);
if (bm == null) {
bm = getCurrentBeanManager();
bus.setExtension(bm, BeanManager.class);
}
return bm;
}
static BeanManager getCurrentBeanManager() {
return CDI.current().getBeanManager();
}
static <T> Instance<T> getInstanceFromCDI(Class<T> clazz) {
return getInstanceFromCDI(clazz, null);
}
static <T> Instance<T> getInstanceFromCDI(Class<T> clazz, Bus bus) {
Instance<T> instance;
try {
instance = findBean(clazz, bus);
} catch (ExceptionInInitializerError | NoClassDefFoundError | IllegalStateException ex) {
// expected if no CDI implementation is available
instance = null;
} catch (NoSuchElementException ex) {
// expected if ClientHeadersFactory is not managed by CDI
instance = null;
}
return instance;
}
@SuppressWarnings("unchecked")
private static <T> Instance<T> findBean(Class<T> clazz, Bus bus) {
BeanManager beanManager = bus == null ? getCurrentBeanManager() : getCurrentBeanManager(bus);
Bean<?> bean = beanManager.getBeans(clazz).iterator().next();
CreationalContext<?> ctx = beanManager.createCreationalContext(bean);
Instance<T> instance = new Instance<>((T) beanManager.getReference(bean, clazz, ctx),
beanManager.isNormalScope(bean.getScope()) ? () -> { } : ctx::release);
return instance;
}
} | 998 |
937 | {
"extends": "./node_modules/gts/",
"rules": {
"no-prototype-builtins": "off",
"no-unused-vars": "off",
"require-yield": "off",
"node/no-deprecated-api": "off",
"@typescript-eslint/no-unused-vars": "off",
"@typescript-eslint/no-empty-interface": "off",
"@typescript-eslint/no-explicit-any": "off",
"@typescript-eslint/prefer-as-const": "off"
}
}
| 182 |
14,668 | <filename>net/spdy/http2_priority_dependencies.h<gh_stars>1000+
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_SPDY_HTTP2_PRIORITY_DEPENDENCIES_H_
#define NET_SPDY_HTTP2_PRIORITY_DEPENDENCIES_H_
#include <list>
#include <map>
#include <utility>
#include <vector>
#include "net/base/net_export.h"
#include "net/third_party/quiche/src/spdy/core/spdy_protocol.h"
namespace net {
// A helper class encapsulating the state and logic to set the priority fields
// for HTTP/2 streams based on their spdy::SpdyPriority and the ordering of
// creation and deletion of the streams. This implentation includes a gross hack
// in which the HTTP/2 weight is set to a transformation of the
// spdy::SpdyPriority value in order to support servers which do not honor
// HTTP/2 stream dependencies and instead treat the weight value like a SPDY/3
// priority.
// TODO(rch): Eliminate this gross hack when servers no longer act like this.
class NET_EXPORT_PRIVATE Http2PriorityDependencies {
public:
Http2PriorityDependencies();
~Http2PriorityDependencies();
// Called when a stream is created. This is used for both client-initiated
// and server-initiated (pushed) streams.
// On return, |*parent_stream_id| is set to the stream id that should become
// the parent of this stream, |*exclusive| is set to whether that dependency
// should be exclusive, and |*weight| is set to the relative weight for the
// created stream given this priority.
void OnStreamCreation(spdy::SpdyStreamId id,
spdy::SpdyPriority priority,
spdy::SpdyStreamId* parent_stream_id,
int* weight,
bool* exclusive);
// Called when a stream is destroyed.
void OnStreamDestruction(spdy::SpdyStreamId id);
struct DependencyUpdate {
spdy::SpdyStreamId id;
spdy::SpdyStreamId parent_stream_id;
int weight;
bool exclusive;
};
// Called when a stream's priority has changed. Returns a list of
// dependency updates that should be sent to the server to describe
// the requested priority change. The updates should be sent in the
// given order.
std::vector<DependencyUpdate> OnStreamUpdate(spdy::SpdyStreamId id,
spdy::SpdyPriority new_priority);
private:
// The requirements for the internal data structure for this class are:
// a) Constant time insertion of entries at the end of the list,
// b) Fast removal of any entry based on its id.
// c) Constant time lookup of the entry at the end of the list.
// std::list would satisfy (a) & (c), but some form of map is
// needed for (b). The priority must be included in the map
// entries so that deletion can determine which list in id_priority_lists_
// to erase from.
using IdList = std::list<std::pair<spdy::SpdyStreamId, spdy::SpdyPriority>>;
using EntryMap = std::map<spdy::SpdyStreamId, IdList::iterator>;
IdList id_priority_lists_[spdy::kV3LowestPriority + 1];
// Tracks the location of an id anywhere in the above vector of lists.
// Iterators to list elements remain valid until those particular elements
// are erased.
EntryMap entry_by_stream_id_;
// Finds the lowest-priority stream that has a priority >= |priority|.
// Returns false if there are no such streams.
// Otherwise, returns true and sets |*bound|.
bool PriorityLowerBound(spdy::SpdyPriority priority, IdList::iterator* bound);
// Finds the stream just above |id| in the total order.
// Returns false if there are no streams with a higher priority.
// Otherwise, returns true and sets |*parent|.
bool ParentOfStream(spdy::SpdyStreamId id, IdList::iterator* parent);
// Finds the stream just below |id| in the total order.
// Returns false if there are no streams with a lower priority.
// Otherwise, returns true and sets |*child|.
bool ChildOfStream(spdy::SpdyStreamId id, IdList::iterator* child);
};
} // namespace net
#endif // NET_SPDY_HTTP2_PRIORITY_DEPENDENCIES_H_
| 1,345 |
435 | //
// Copyright (C) 2014 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <jni.h>
#include <jni_utils/jni_utils.hpp>
#include <cxxporthelper/cstdint>
#include <cxxporthelper/memory>
#include <oslmp/OpenSLMediaPlayer.hpp>
#include "OpenSLMediaPlayerJNIBinder.hpp"
extern "C" bool OpenSLMediaPlayerContext_GetInstanceFromJniHandle(jlong handle,
android::sp<oslmp::OpenSLMediaPlayerContext> &dest);
class OpenSLMediaPlayerJniContextHolder {
public:
android::sp<oslmp::OpenSLMediaPlayerContext> context;
android::sp<oslmp::OpenSLMediaPlayer> mp;
android::sp<oslmp::jni::OpenSLMediaPlayerJNIBinder> binder;
public:
OpenSLMediaPlayerJniContextHolder() : context(), mp(), binder() {}
~OpenSLMediaPlayerJniContextHolder()
{
if (binder.get()) {
binder->unbind();
}
mp.clear();
binder.clear();
context.clear();
}
static jlong toJniHandle(OpenSLMediaPlayerJniContextHolder *holder) noexcept
{
return static_cast<jlong>(reinterpret_cast<uintptr_t>(holder));
}
static OpenSLMediaPlayerJniContextHolder *fromJniHandle(jlong handle) noexcept
{
return reinterpret_cast<OpenSLMediaPlayerJniContextHolder *>(handle);
}
};
typedef OpenSLMediaPlayerJniContextHolder Holder;
#ifdef __cplusplus
extern "C" {
#endif
// utility methods
bool OpenSLMediaPlayer_GetInstanceFromJniHandle(jlong handle, android::sp<oslmp::OpenSLMediaPlayer> &dest) noexcept
{
Holder *holder = Holder::fromJniHandle(handle);
dest.clear();
if (!holder) {
return false;
}
dest = holder->mp;
return dest.get();
}
// ---
JNIEXPORT jlong JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_createNativeImplHandle(JNIEnv *env, jclass clazz,
jlong contextHandle, jobject weak_thiz,
jintArray params) noexcept
{
const_jint_array params_(env, params);
if (!params) {
return 0;
}
try
{
std::unique_ptr<Holder> holder(new Holder());
OpenSLMediaPlayerContext_GetInstanceFromJniHandle(contextHandle, holder->context);
if (!(holder->context.get()))
return 0;
oslmp::OpenSLMediaPlayer::initialize_args_t init_args;
init_args.use_fade = (params_[0] != 0);
holder->mp = new oslmp::OpenSLMediaPlayer(holder->context);
holder->binder = new oslmp::jni::OpenSLMediaPlayerJNIBinder(env, clazz, weak_thiz);
holder->binder->bind(holder->context, holder->mp);
if (holder->mp->initialize(init_args) != OSLMP_RESULT_SUCCESS)
return 0;
return Holder::toJniHandle(holder.release());
}
catch (const std::bad_alloc & /*e*/) {}
return 0;
}
JNIEXPORT void JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_deleteNativeImplHandle(JNIEnv *env, jclass clazz,
jlong handle) noexcept
{
if (handle) {
Holder *holder = Holder::fromJniHandle(handle);
delete holder;
}
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setDataSourcePathImplNative(JNIEnv *env, jclass clazz,
jlong handle, jstring path) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
jstring_wrapper path_w(env, path);
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->setDataSourcePath(path_w.data());
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setDataSourceUriImplNative(JNIEnv *env, jclass clazz,
jlong handle, jstring uri) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
jstring_wrapper uri_w(env, uri);
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->setDataSourceUri(uri_w.data());
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setDataSourceFdImplNative__JI(JNIEnv *env, jclass clazz,
jlong handle, jint fd) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->setDataSourceFd(fd);
}
JNIEXPORT jint JNICALL Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setDataSourceFdImplNative__JIJJ(
JNIEnv *env, jclass clazz, jlong handle, jint fd, jlong offset, jlong length) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->setDataSourceFd(fd, offset, length);
}
JNIEXPORT jint JNICALL Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_prepareImplNative(JNIEnv *env,
jclass clazz,
jlong handle) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->prepare();
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_prepareAsyncImplNative(JNIEnv *env, jclass clazz,
jlong handle) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->prepareAsync();
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_startImplNative(JNIEnv *env, jclass clazz, jlong handle) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->start();
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_stopImplNative(JNIEnv *env, jclass clazz, jlong handle) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->stop();
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_pauseImplNative(JNIEnv *env, jclass clazz, jlong handle) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->pause();
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_resetImplNative(JNIEnv *env, jclass clazz, jlong handle) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->reset();
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setVolumeImplNative(JNIEnv *env, jclass clazz, jlong handle,
jfloat leftVolume,
jfloat rightVolume) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->setVolume(leftVolume, rightVolume);
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_getAudioSessionIdImplNative(JNIEnv *env, jclass clazz, jlong handle,
jintArray audioSessionId) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
jint_array audioSessionId_(env, audioSessionId);
if (!audioSessionId_) {
return OSLMP_RESULT_ERROR;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->getAudioSessionId(audioSessionId_.data());
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_getDurationImplNative(JNIEnv *env, jclass clazz, jlong handle,
jintArray duration) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
jint_array duration_(env, duration);
if (!duration_) {
return OSLMP_RESULT_ERROR;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->getDuration(duration_.data());
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_getCurrentPositionImplNative(JNIEnv *env, jclass clazz,
jlong handle,
jintArray position) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
jint_array position_(env, position);
if (!position_) {
return OSLMP_RESULT_ERROR;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->getCurrentPosition(position_.data());
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_seekToImplNative(JNIEnv *env, jclass clazz, jlong handle,
jint msec) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->seekTo(msec);
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setLoopingImplNative(JNIEnv *env, jclass clazz, jlong handle,
jboolean looping) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->setLooping((looping == JNI_TRUE));
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_isLoopingImplNative(JNIEnv *env, jclass clazz, jlong handle,
jbooleanArray looping) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
jboolean_array looping_(env, looping);
if (!looping_) {
return OSLMP_RESULT_ERROR;
}
Holder *holder = Holder::fromJniHandle(handle);
bool value = false;
int result = holder->mp->isLooping(&value);
looping_[0] = (value) ? JNI_TRUE : JNI_FALSE;
return result;
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_isPlayingImplNative(JNIEnv *env, jclass clazz, jlong handle,
jbooleanArray playing) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
jboolean_array playing_(env, playing);
if (!playing_) {
return OSLMP_RESULT_ERROR;
}
Holder *holder = Holder::fromJniHandle(handle);
bool value = false;
int result = holder->mp->isPlaying(&value);
playing_[0] = (value) ? JNI_TRUE : JNI_FALSE;
return result;
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setAudioStreamTypeImplNative(JNIEnv *env, jclass clazz,
jlong handle,
jint streamtype) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
int result = holder->mp->setAudioStreamType(streamtype);
return result;
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setNextMediaPlayerImplNative(JNIEnv *env, jclass clazz,
jlong handle,
jlong nextHandle) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
Holder *nextHolder = Holder::fromJniHandle(nextHandle);
android::sp<oslmp::OpenSLMediaPlayer> next;
if (nextHolder) {
next = nextHolder->mp;
}
return holder->mp->setNextMediaPlayer(&next);
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_attachAuxEffectImplNative(JNIEnv *env, jclass clazz,
jlong handle, jint effectId) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->attachAuxEffect(effectId);
}
JNIEXPORT jint JNICALL
Java_com_h6ah4i_android_media_opensl_OpenSLMediaPlayer_setAuxEffectSendLevelImplNative(JNIEnv *env, jclass clazz,
jlong handle,
jfloat level) noexcept
{
if (!handle) {
return OSLMP_RESULT_INVALID_HANDLE;
}
Holder *holder = Holder::fromJniHandle(handle);
return holder->mp->setAuxEffectSendLevel(level);
}
#ifdef __cplusplus
}
#endif
| 7,066 |
1,848 | namespace std
{
template < typename > struct char_traits;
}
namespace __gnu_cxx
{
template < typename > struct char_traits;
}
namespace std
{
template < class _CharT > struct char_traits:__gnu_cxx::char_traits <
_CharT >
{
};
}
| 94 |
848 | <filename>tensorflow/lite/delegates/gpu/cl/texture2d.cc
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/delegates/gpu/cl/texture2d.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
// Creates new 4-channel 2D texture with cl_channel_type elements
Status CreateTexture2D(int width, int height, cl_channel_type type, void* data,
CLContext* context, Texture2D* result) {
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = width;
desc.image_height = height;
desc.image_depth = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
format.image_channel_order = CL_RGBA;
format.image_channel_data_type = type;
cl_mem_flags flags = CL_MEM_READ_WRITE;
if (data != nullptr) {
flags |= CL_MEM_COPY_HOST_PTR;
}
cl_int error_code;
cl_mem texture = CreateImage2DLegacy(context->context(), flags, &format,
&desc, data, &error_code);
if (error_code != CL_SUCCESS) {
return UnknownError(
absl::StrCat("Failed to create Texture2D (clCreateImage)",
CLErrorCodeToString(error_code)));
}
*result = Texture2D(texture, width, height, type);
return OkStatus();
}
} // namespace
Texture2D::Texture2D(cl_mem texture, int width, int height,
cl_channel_type type)
: texture_(texture), width_(width), height_(height), channel_type_(type) {}
Texture2D::Texture2D(Texture2D&& texture)
: texture_(texture.texture_),
width_(texture.width_),
height_(texture.height_),
channel_type_(texture.channel_type_) {
texture.texture_ = nullptr;
texture.width_ = 0;
texture.height_ = 0;
}
Texture2D& Texture2D::operator=(Texture2D&& texture) {
if (this != &texture) {
Release();
std::swap(channel_type_, texture.channel_type_);
std::swap(width_, texture.width_);
std::swap(height_, texture.height_);
std::swap(texture_, texture.texture_);
}
return *this;
}
Texture2D::~Texture2D() { Release(); }
void Texture2D::Release() {
if (texture_) {
clReleaseMemObject(texture_);
texture_ = nullptr;
width_ = 0;
height_ = 0;
}
}
// Creates new 4-channel 2D texture with f32 elements
Status CreateTexture2DRGBA32F(int width, int height, CLContext* context,
Texture2D* result) {
return CreateTexture2D(width, height, CL_FLOAT, nullptr, context, result);
}
// Creates new 4-channel 2D texture with f16 elements
Status CreateTexture2DRGBA16F(int width, int height, CLContext* context,
Texture2D* result) {
return CreateTexture2D(width, height, CL_HALF_FLOAT, nullptr, context,
result);
}
Status CreateTexture2DRGBA(DataType type, int width, int height,
CLContext* context, Texture2D* result) {
if (type == DataType::FLOAT32) {
return CreateTexture2D(width, height, CL_FLOAT, nullptr, context, result);
} else {
return CreateTexture2D(width, height, CL_HALF_FLOAT, nullptr, context,
result);
}
}
Status CreateTexture2DRGBA(DataType type, int width, int height, void* data,
CLContext* context, Texture2D* result) {
if (type == DataType::FLOAT32) {
return CreateTexture2D(width, height, CL_FLOAT, data, context, result);
} else {
return CreateTexture2D(width, height, CL_HALF_FLOAT, data, context, result);
}
}
} // namespace cl
} // namespace gpu
} // namespace tflite
| 1,645 |
523 | __version__ = '2.20201202'
| 12 |
538 | <filename>ICPOdometry.h
/*
* ICPOdometry.h
*
* Created on: 17 Sep 2012
* Author: thomas
*/
#ifndef ICPODOMETRY_H_
#define ICPODOMETRY_H_
#include "Cuda/internal.h"
#include <sophus/se3.hpp>
#include <vector>
#include <Eigen/Core>
#include <Eigen/Geometry>
class ICPOdometry {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
ICPOdometry(int width, int height, float cx, float cy, float fx, float fy,
float distThresh = 0.10f,
float angleThresh = sinf(20.f * 3.14159254f / 180.f));
virtual ~ICPOdometry();
void initICP(unsigned short *depth, const float depthCutoff = 20.0f);
void initICPModel(unsigned short *depth, const float depthCutoff = 20.0f);
void getIncrementalTransformation(Sophus::SE3d &T_prev_curr, int threads,
int blocks);
float lastError;
float lastInliers;
private:
std::vector<DeviceArray2D<unsigned short>> depth_tmp;
std::vector<DeviceArray2D<float>> vmaps_prev;
std::vector<DeviceArray2D<float>> nmaps_prev;
std::vector<DeviceArray2D<float>> vmaps_curr;
std::vector<DeviceArray2D<float>> nmaps_curr;
Intr intr;
DeviceArray<Eigen::Matrix<float, 29, 1, Eigen::DontAlign>> sumData;
DeviceArray<Eigen::Matrix<float, 29, 1, Eigen::DontAlign>> outData;
static const int NUM_PYRS = 3;
std::vector<int> iterations;
float dist_thresh;
float angle_thresh;
const int width;
const int height;
const float cx, cy, fx, fy;
};
#endif /* ICPODOMETRY_H_ */
| 617 |
470 | <filename>tableauserverclient/server/endpoint/dqw_endpoint.py
import logging
from .. import RequestFactory, DQWItem
from .endpoint import Endpoint
from .exceptions import MissingRequiredFieldError
logger = logging.getLogger(__name__)
class _DataQualityWarningEndpoint(Endpoint):
def __init__(self, parent_srv, resource_type):
super(_DataQualityWarningEndpoint, self).__init__(parent_srv)
self.resource_type = resource_type
@property
def baseurl(self):
return "{0}/sites/{1}/dataQualityWarnings/{2}".format(
self.parent_srv.baseurl, self.parent_srv.site_id, self.resource_type
)
def add(self, resource, warning):
url = "{baseurl}/{content_luid}".format(baseurl=self.baseurl, content_luid=resource.id)
add_req = RequestFactory.DQW.add_req(warning)
response = self.post_request(url, add_req)
warnings = DQWItem.from_response(response.content, self.parent_srv.namespace)
logger.info("Added dqw for resource {0}".format(resource.id))
return warnings
def update(self, resource, warning):
url = "{baseurl}/{content_luid}".format(baseurl=self.baseurl, content_luid=resource.id)
add_req = RequestFactory.DQW.update_req(warning)
response = self.put_request(url, add_req)
warnings = DQWItem.from_response(response.content, self.parent_srv.namespace)
logger.info("Added dqw for resource {0}".format(resource.id))
return warnings
def clear(self, resource):
url = "{baseurl}/{content_luid}".format(baseurl=self.baseurl, content_luid=resource.id)
return self.delete_request(url)
def populate(self, item):
if not item.id:
error = "Server item is missing ID. Item must be retrieved from server first."
raise MissingRequiredFieldError(error)
def dqw_fetcher():
return self._get_data_quality_warnings(item)
item._set_data_quality_warnings(dqw_fetcher)
logger.info("Populated permissions for item (ID: {0})".format(item.id))
def _get_data_quality_warnings(self, item, req_options=None):
url = "{baseurl}/{content_luid}".format(baseurl=self.baseurl, content_luid=item.id)
server_response = self.get_request(url, req_options)
dqws = DQWItem.from_response(server_response.content, self.parent_srv.namespace)
return dqws
| 965 |
1,204 | <filename>collections/src/main/java/com/gs/collections/impl/block/comparator/FunctionComparator.java
/*
* Copyright 2011 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gs.collections.impl.block.comparator;
import java.util.Comparator;
import com.gs.collections.api.block.SerializableComparator;
import com.gs.collections.api.block.function.Function;
/**
* Simple {@link Comparator} that uses a {@link Function}
* to select a value from the underlying object and compare it against a known value to determine ordering.
*/
public class FunctionComparator<T, V>
implements SerializableComparator<T>
{
private static final long serialVersionUID = 1L;
private final Function<? super T, ? extends V> function;
private final Comparator<V> comparator;
public FunctionComparator(Function<? super T, ? extends V> function, Comparator<V> comparator)
{
this.function = function;
this.comparator = comparator;
}
public int compare(T o1, T o2)
{
V attrValue1 = this.function.valueOf(o1);
V attrValue2 = this.function.valueOf(o2);
return this.comparator.compare(attrValue1, attrValue2);
}
}
| 540 |
9,957 | <gh_stars>1000+
#!/usr/bin/env python
import csv
import email
from email import policy
import imaplib
import logging
import os
import ssl
from bs4 import BeautifulSoup
credential_path = "credentials.txt"
csv_path = "mails.csv"
logger = logging.getLogger('imap_poller')
host = "imap.gmail.com"
port = 993
ssl_context = ssl.create_default_context()
def connect_to_mailbox():
# get mail connection
mail = imaplib.IMAP4_SSL(host, port, ssl_context=ssl_context)
with open(credential_path, "rt") as fr:
user = fr.readline().strip()
pw = fr.readline().strip()
mail.login(user, pw)
# get mail box response and select a mail box
status, messages = mail.select("INBOX")
return mail, messages
# get plain text out of html mails
def get_text(email_body):
soup = BeautifulSoup(email_body, "lxml")
return soup.get_text(separator="\n", strip=True)
def write_to_csv(mail, writer, N, total_no_of_mails):
for i in range(total_no_of_mails, total_no_of_mails - N, -1):
res, data = mail.fetch(str(i), "(RFC822)")
response = data[0]
if isinstance(response, tuple):
msg = email.message_from_bytes(response[1], policy=policy.default)
# get header data
email_subject = msg["subject"]
email_from = msg["from"]
email_date = msg["date"]
email_text = ""
# if the email message is multipart
if msg.is_multipart():
# iterate over email parts
for part in msg.walk():
# extract content type of email
content_type = part.get_content_type()
content_disposition = str(part.get("Content-Disposition"))
try:
# get the email email_body
email_body = part.get_payload(decode=True)
if email_body:
email_text = get_text(email_body.decode('utf-8'))
except Exception as exc:
logger.warning('Caught exception: %r', exc)
if (
content_type == "text/plain"
and "attachment" not in content_disposition
):
# print text/plain emails and skip attachments
# print(email_text)
pass
elif "attachment" in content_disposition:
pass
else:
# extract content type of email
content_type = msg.get_content_type()
# get the email email_body
email_body = msg.get_payload(decode=True)
if email_body:
email_text = get_text(email_body.decode('utf-8'))
if email_text is not None:
# Write data in the csv file
row = [email_date, email_from, email_subject, email_text]
writer.writerow(row)
else:
logger.warning('%s:%i: No message extracted', "INBOX", i)
def main():
mail, messages = connect_to_mailbox()
logging.basicConfig(level=logging.WARNING)
total_no_of_mails = int(messages[0])
# no. of latest mails to fetch
# set it equal to total_no_of_emails to fetch all mail in the inbox
N = 2
with open(csv_path, "wt", encoding="utf-8", newline="") as fw:
writer = csv.writer(fw)
writer.writerow(["Date", "From", "Subject", "Text mail"])
try:
write_to_csv(mail, writer, N, total_no_of_mails)
except Exception as exc:
logger.warning('Caught exception: %r', exc)
if __name__ == "__main__":
main()
| 1,850 |
660 | /*
* Copyright (c) 2017, Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
//!
//! \file media_libva_caps_g9_kbl.cpp
//! \brief This file implements the C++ class/interface for KBL media capbilities.
//!
#include "media_libva_util.h"
#include "media_libva.h"
#include "media_libva_caps_g9_kbl.h"
#include "media_libva_caps_factory.h"
MediaLibvaCapsG9Kbl::MediaLibvaCapsG9Kbl(DDI_MEDIA_CONTEXT *mediaCtx) : MediaLibvaCapsG9(mediaCtx)
{
return;
}
VAStatus MediaLibvaCapsG9Kbl::GetMbProcessingRateEnc(
MEDIA_FEATURE_TABLE *skuTable,
uint32_t tuIdx,
uint32_t codecMode,
bool vdencActive,
uint32_t *mbProcessingRatePerSec)
{
DDI_CHK_NULL(skuTable, "Null pointer", VA_STATUS_ERROR_INVALID_PARAMETER);
DDI_CHK_NULL(mbProcessingRatePerSec, "Null pointer", VA_STATUS_ERROR_INVALID_PARAMETER);
uint32_t gtIdx = 0;
// Calculate the GT index based on GT type
if (MEDIA_IS_SKU(skuTable, FtrGT1))
{
gtIdx = 4;
}
else if (MEDIA_IS_SKU(skuTable, FtrGT1_5))
{
gtIdx = 3;
}
else if (MEDIA_IS_SKU(skuTable, FtrGT2))
{
gtIdx = 2;
}
else if (MEDIA_IS_SKU(skuTable, FtrGT3))
{
gtIdx = 1;
}
else if (MEDIA_IS_SKU(skuTable, FtrGT4))
{
gtIdx = 0;
}
else
{
return VA_STATUS_ERROR_INVALID_PARAMETER;
}
if (codecMode == CODECHAL_ENCODE_MODE_AVC)
{
if (MEDIA_IS_SKU(skuTable, FtrULX))
{
const uint32_t mbRate[7][5] =
{
// GT4 | GT3 | GT2 | GT1.5 | GT1
{ 0, 0, 1029393, 1029393, 676280 },
{ 0, 0, 975027, 975027, 661800 },
{ 0, 0, 776921, 776921, 640000 },
{ 0, 0, 776921, 776921, 640000 },
{ 0, 0, 776921, 776921, 640000 },
{ 0, 0, 416051, 416051, 317980 },
{ 0, 0, 214438, 214438, 180655 }
};
if (gtIdx == 0 || gtIdx == 1)
{
return VA_STATUS_ERROR_INVALID_PARAMETER;
}
*mbProcessingRatePerSec = mbRate[tuIdx][gtIdx];
}
else if (MEDIA_IS_SKU(skuTable, FtrULT))
{
const uint32_t KBLULT_MB_RATE[7][5] =
{
// GT4 | GT3 | GT2 | GT1.5 | GT1
{ 1544090, 1544090, 1544090, 1029393, 676280 },
{ 1462540, 1462540, 1462540, 975027, 661800 },
{ 1165381, 1165381, 1165381, 776921, 640000 },
{ 1165381, 1165381, 1165381, 776921, 640000 },
{ 1165381, 1165381, 1165381, 776921, 640000 },
{ 624076, 624076, 624076, 416051, 317980 },
{ 321657, 321657, 321657, 214438, 180655 }
};
*mbProcessingRatePerSec = KBLULT_MB_RATE[tuIdx][gtIdx];
}
else
{
// regular KBL
const uint32_t mbRate[7][5] =
{
// GT4 | GT3 | GT2 | GT1.5 | GT1
{ 1544090, 1544090, 1544090, 1029393, 676280 },
{ 1462540, 1462540, 1462540, 975027, 661800 },
{ 1165381, 1165381, 1165381, 776921, 640000 },
{ 1165381, 1165381, 1165381, 776921, 640000 },
{ 1165381, 1165381, 1165381, 776921, 640000 },
{ 624076, 624076, 624076, 416051, 317980 },
{ 321657, 321657, 321657, 214438, 180655 }
};
*mbProcessingRatePerSec = mbRate[tuIdx][gtIdx];
}
}
else if (codecMode == CODECHAL_ENCODE_MODE_HEVC)
{
const uint32_t mbRate[7][5] =
{
// GT4 | GT3 | GT2 | GT1.5 | GT1
{ 500000, 500000, 500000, 500000, 500000 },
{ 500000, 500000, 500000, 500000, 500000 },
{ 250000, 250000, 250000, 250000, 250000 },
{ 250000, 250000, 250000, 250000, 250000 },
{ 250000, 250000, 250000, 250000, 250000 },
{ 125000, 125000, 125000, 125000, 125000 },
{ 125000, 125000, 125000, 125000, 125000 }
};
*mbProcessingRatePerSec = mbRate[tuIdx][gtIdx];
}
return VA_STATUS_SUCCESS;
}
extern template class MediaLibvaCapsFactory<MediaLibvaCaps, DDI_MEDIA_CONTEXT>;
bool kblRegistered = MediaLibvaCapsFactory<MediaLibvaCaps, DDI_MEDIA_CONTEXT>::
RegisterCaps<MediaLibvaCapsG9Kbl>((uint32_t)IGFX_KABYLAKE);
| 2,786 |
910 | from stone.backends.python_rsrc.stone_base import *
| 17 |
1,379 | /*
* Copyright 2008-2013 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.client.protocol.pb;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import voldemort.VoldemortException;
import voldemort.client.protocol.pb.VAdminProto.ROStoreVersionDirMap;
import voldemort.client.protocol.pb.VAdminProto.RebalanceTaskInfoMap;
import voldemort.client.protocol.pb.VAdminProto.StoreToPartitionsIds;
import voldemort.client.protocol.pb.VProto.KeyedVersions;
import voldemort.client.rebalance.RebalanceTaskInfo;
import voldemort.store.ErrorCodeMapper;
import voldemort.utils.ByteArray;
import voldemort.versioning.ClockEntry;
import voldemort.versioning.VectorClock;
import voldemort.versioning.Version;
import voldemort.versioning.Versioned;
import voldemort.xml.ClusterMapper;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.CodedOutputStream;
import com.google.protobuf.Message;
/**
* Helper functions for serializing or deserializing client requests in protocol
* buffers
*
*
*/
public class ProtoUtils {
/*
* Begin decode RebalanceTaskInfoMap methods
*/
/**
* Given a protobuf rebalance-partition info, converts it into our
* rebalance-partition info
*
* @param rebalanceTaskInfoMap Proto-buff version of
* RebalanceTaskInfoMap
* @return RebalanceTaskInfo object.
*/
public static RebalanceTaskInfo decodeRebalanceTaskInfoMap(VAdminProto.RebalanceTaskInfoMap rebalanceTaskInfoMap) {
RebalanceTaskInfo rebalanceTaskInfo = new RebalanceTaskInfo(
rebalanceTaskInfoMap.getStealerId(),
rebalanceTaskInfoMap.getDonorId(),
decodeStoreToPartitionIds(rebalanceTaskInfoMap.getPerStorePartitionIdsList()),
new ClusterMapper().readCluster(new StringReader(rebalanceTaskInfoMap.getInitialCluster())));
return rebalanceTaskInfo;
}
public static HashMap<String, List<Integer>> decodeStoreToPartitionIds(List<StoreToPartitionsIds> storeToPartitionIds) {
HashMap<String, List<Integer>> storeToPartitionIdsList = Maps.newHashMap();
for(StoreToPartitionsIds tuple: storeToPartitionIds) {
storeToPartitionIdsList.put(tuple.getStoreName(), tuple.getPartitionIdsList());
}
return storeToPartitionIdsList;
}
/*
* End decode RebalanceTaskInfoMap methods
*/
/*
* Begin encode RebalanceTaskInfo methods
*/
/**
* Given a rebalance-task info, convert it into the protobuf equivalent
*
* @param stealInfo Rebalance task info
* @return Protobuf equivalent of the same
*/
public static RebalanceTaskInfoMap encodeRebalanceTaskInfoMap(RebalanceTaskInfo stealInfo) {
return RebalanceTaskInfoMap.newBuilder()
.setStealerId(stealInfo.getStealerId())
.setDonorId(stealInfo.getDonorId())
.addAllPerStorePartitionIds(ProtoUtils.encodeStoreToPartitionsTuple(stealInfo.getStoreToPartitionIds()))
.setInitialCluster(new ClusterMapper().writeCluster(stealInfo.getInitialCluster()))
.build();
}
public static List<StoreToPartitionsIds> encodeStoreToPartitionsTuple(HashMap<String, List<Integer>> storeToPartitionIds) {
List<StoreToPartitionsIds> perStorePartitionTuples = Lists.newArrayList();
for(Entry<String, List<Integer>> entry: storeToPartitionIds.entrySet()) {
StoreToPartitionsIds.Builder tupleBuilder = StoreToPartitionsIds.newBuilder();
tupleBuilder.setStoreName(entry.getKey());
tupleBuilder.addAllPartitionIds(entry.getValue());
perStorePartitionTuples.add(tupleBuilder.build());
}
return perStorePartitionTuples;
}
/*
* End encode RebalanceTaskInfo methods
*/
public static Map<String, String> encodeROMap(List<ROStoreVersionDirMap> metadataMap) {
Map<String, String> storeToValue = Maps.newHashMap();
for(ROStoreVersionDirMap currentStore: metadataMap) {
storeToValue.put(currentStore.getStoreName(), currentStore.getStoreDir());
}
return storeToValue;
}
public static VProto.Error.Builder encodeError(ErrorCodeMapper mapper, VoldemortException e) {
return VProto.Error.newBuilder()
.setErrorCode(mapper.getCode(e))
.setErrorMessage(e.getMessage());
}
public static VProto.Versioned.Builder encodeVersioned(Versioned<byte[]> versioned) {
return VProto.Versioned.newBuilder()
.setValue(ByteString.copyFrom(versioned.getValue()))
.setVersion(ProtoUtils.encodeClock(versioned.getVersion()));
}
public static Versioned<byte[]> decodeVersioned(VProto.Versioned versioned) {
return new Versioned<byte[]>(versioned.getValue().toByteArray(),
decodeClock(versioned.getVersion()));
}
/**
* Given a list of value versions for the metadata keys we are just
* interested in the value at index 0 This is because even if we have to
* update the cluster.xml we marshall a single key into a versioned list
* Hence we just look at the value at index 0
*
*/
public static Versioned<byte[]> decodeVersionedMetadataKeyValue(KeyedVersions keyValue) {
return decodeVersioned(keyValue.getVersions(0));
}
public static List<Versioned<byte[]>> decodeVersions(List<VProto.Versioned> versioned) {
List<Versioned<byte[]>> values = new ArrayList<Versioned<byte[]>>(versioned.size());
for(VProto.Versioned v: versioned)
values.add(decodeVersioned(v));
return values;
}
public static VectorClock decodeClock(VProto.VectorClock encoded) {
List<ClockEntry> entries = new ArrayList<ClockEntry>(encoded.getEntriesCount());
for(VProto.ClockEntry entry: encoded.getEntriesList())
entries.add(new ClockEntry((short) entry.getNodeId(), entry.getVersion()));
return new VectorClock(entries, encoded.getTimestamp());
}
public static VProto.VectorClock.Builder encodeClock(Version version) {
VectorClock clock = (VectorClock) version;
VProto.VectorClock.Builder encoded = VProto.VectorClock.newBuilder();
encoded.setTimestamp(clock.getTimestamp());
for(ClockEntry entry: clock.getEntries())
encoded.addEntries(VProto.ClockEntry.newBuilder()
.setNodeId(entry.getNodeId())
.setVersion(entry.getVersion()));
return encoded;
}
public static ByteArray decodeBytes(ByteString string) {
return new ByteArray(string.toByteArray());
}
public static ByteString encodeBytes(ByteArray array) {
return ByteString.copyFrom(array.get());
}
public static ByteString encodeTransform(byte[] transform) {
return ByteString.copyFrom(transform);
}
public static void writeMessage(DataOutputStream output, Message message) throws IOException {
/*
* We don't use varints here because the c++ version of the protocol
* buffer classes seem to be buggy requesting more data than necessary
* from the underlying stream causing it to block forever
*/
output.writeInt(message.getSerializedSize());
CodedOutputStream codedOut = CodedOutputStream.newInstance(output);
message.writeTo(codedOut);
codedOut.flush();
}
public static void writeEndOfStream(DataOutputStream output) throws IOException {
output.writeInt(-1);
}
public static <T extends Message.Builder> T readToBuilder(DataInputStream input, T builder)
throws IOException {
int size = input.readInt();
CodedInputStream codedIn = CodedInputStream.newInstance(input);
codedIn.pushLimit(size);
builder.mergeFrom(codedIn);
return builder;
}
}
| 3,662 |
1,269 | <gh_stars>1000+
/*
This file is part of the iText (R) project.
Copyright (c) 1998-2021 iText Group NV
Authors: iText Software.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License version 3
as published by the Free Software Foundation with the addition of the
following permission added to Section 15 as permitted in Section 7(a):
FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED BY
ITEXT GROUP. ITEXT GROUP DISCLAIMS THE WARRANTY OF NON INFRINGEMENT
OF THIRD PARTY RIGHTS
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, see http://www.gnu.org/licenses or write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA, 02110-1301 USA, or download the license from the following URL:
http://itextpdf.com/terms-of-use/
The interactive user interfaces in modified source and object code versions
of this program must display Appropriate Legal Notices, as required under
Section 5 of the GNU Affero General Public License.
In accordance with Section 7(b) of the GNU Affero General Public License,
a covered work must retain the producer line in every PDF that is created
or manipulated using iText.
You can be released from the requirements of the license by purchasing
a commercial license. Buying such a license is mandatory as soon as you
develop commercial activities involving the iText software without
disclosing the source code of your own applications.
These activities include: offering paid services to customers as an ASP,
serving PDFs on the fly in a web application, shipping iText with a closed
source product.
For more information, please contact iText Software Corp. at this
address: <EMAIL>
*/
package com.itextpdf.forms.xfdf;
/**
* Represents ids element, child of the xfdf element.
* Corresponds to the ID key in the file dictionary.
* The two attributes are file identifiers for the source or target file designated by the f element, taken
* from the ID entry in the file’s trailer dictionary.
* Attributes: original, modified.
* For more details see paragraph 6.2.3 in Xfdf document specification.
*/
public class IdsObject {
/**
* This attribute corresponds to the permanent identifier which
* is based on the contents of the file at the time it was originally created.
* This value does not change when the file is incrementally updated.
* The value shall be a hexadecimal number.
* A common value for this is an MD5 checksum.
*/
private String original;
/**
* The attribute contains a unique identifier for the
* modified version of the pdf and corresponding xfdf document. The
* modified attribute corresponds to the changing identifier that is based
* on the file's contents at the time it was last updated.
* The value shall be a hexadecimal number.
* A common value for this is an MD5 checksum.
*/
private String modified;
public IdsObject() {
}
/**
* Gets the string value of the permanent identifier which
* is based on the contents of the file at the time it was originally created.
* This value does not change when the file is incrementally updated.
* The value shall be a hexadecimal number.
*
* @return the permanent identifier value
*/
public String getOriginal() {
return original;
}
/**
* Sets the string value of the permanent identifier which
* is based on the contents of the file at the time it was originally created.
* This value does not change when the file is incrementally updated.
* The value shall be a hexadecimal number.
* A common value for this is an MD5 checksum.
*
* @param original the permanent identifier value
* @return current {@link IdsObject ids object}
*/
public IdsObject setOriginal(String original) {
this.original = original;
return this;
}
/**
* Gets the string value of the unique identifier for the
* modified version of the pdf and corresponding xfdf document. The
* modified attribute corresponds to the changing identifier that is based
* on the file's contents at the time it was last updated.
* The value shall be a hexadecimal number.
*
* @return the unique identifier value
*/
public String getModified() {
return modified;
}
/**
* Sets the string value of the unique identifier for the
* modified version of the pdf and corresponding xfdf document. The
* modified attribute corresponds to the changing identifier that is based
* on the file's contents at the time it was last updated.
* The value shall be a hexadecimal number.
* A common value for this is an MD5 checksum.
*
* @param modified the unique identifier value
* @return current {@link IdsObject ids object}
*/
public IdsObject setModified(String modified) {
this.modified = modified;
return this;
}
}
| 1,608 |
316 | #include <string.h>
struct S {
int *a;
int b;
};
int main(void) {
struct S S1, S2;
S1.a = &S1.b;
memcpy(&S2, &S1, sizeof(S1));
*S2.a = 9;
test_assert(S1.b == 9);
return 0;
}
| 121 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.