max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
14,668 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_SERVICES_PATCH_CONTENT_PATCH_SERVICE_H_
#define COMPONENTS_SERVICES_PATCH_CONTENT_PATCH_SERVICE_H_
#include "base/callback.h"
#include "components/services/patch/public/mojom/file_patcher.mojom-forward.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
namespace patch {
// Launches a new instance of the FilePatcher service in an isolated, sandboxed
// process, and returns a remote interface to control the service. The lifetime
// of the process is tied to that of the Remote. May be called from any thread.
mojo::PendingRemote<mojom::FilePatcher> LaunchFilePatcher();
} // namespace patch
#endif // COMPONENTS_SERVICES_PATCH_CONTENT_PATCH_SERVICE_H_
| 275 |
4,612 | <filename>src/twisted/internet/iocpreactor/iocpsupport.py
__all__ = [
"CompletionPort",
"Event",
"accept",
"connect",
"get_accept_addrs",
"have_connectex",
"makesockaddr",
"maxAddrLen",
"recv",
"recvfrom",
"send",
]
from twisted_iocpsupport.iocpsupport import ( # type: ignore[import]
CompletionPort,
Event,
accept,
connect,
get_accept_addrs,
have_connectex,
makesockaddr,
maxAddrLen,
recv,
recvfrom,
send,
)
| 238 |
307 | #pragma once
#include "globalincs/pstypes.h"
void gr_opengl_deferred_init();
void opengl_clear_deferred_buffers();
void gr_opengl_deferred_lighting_begin();
void gr_opengl_deferred_lighting_end();
void gr_opengl_deferred_lighting_finish();
void gr_opengl_deferred_light_sphere_init(int rings, int segments);
void gr_opengl_deferred_light_cylinder_init(int segments);
void gr_opengl_draw_deferred_light_sphere(const vec3d *position);
void gr_opengl_draw_deferred_light_cylinder(const vec3d *position, const matrix *orient);
void gr_opengl_deferred_shutdown();
void opengl_draw_sphere();
| 223 |
1,700 | <reponame>LinuxUserGD/godot-opengl-4<gh_stars>1000+
// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "coneline_intersector.h"
#include "intersector_epilog.h"
namespace embree
{
namespace isa
{
template<int M, bool filter>
struct ConeCurveMiIntersector1
{
typedef LineMi<M> Primitive;
typedef CurvePrecalculations1 Precalculations;
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& line)
{
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
const vbool<M> valid = line.valid();
ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
{
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
const vbool<M> valid = line.valid();
return ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
return false;
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& line)
{
return PrimitivePointQuery1<Primitive>::pointQuery(query, context, line);
}
};
template<int M, bool filter>
struct ConeCurveMiMBIntersector1
{
typedef LineMi<M> Primitive;
typedef CurvePrecalculations1 Precalculations;
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& line)
{
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time());
const vbool<M> valid = line.valid();
ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
{
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time());
const vbool<M> valid = line.valid();
return ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
return false;
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& line)
{
return PrimitivePointQuery1<Primitive>::pointQuery(query, context, line);
}
};
template<int M, int K, bool filter>
struct ConeCurveMiIntersectorK
{
typedef LineMi<M> Primitive;
typedef CurvePrecalculationsK<K> Precalculations;
static __forceinline void intersect(const Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
{
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
const vbool<M> valid = line.valid();
ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
{
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
const vbool<M> valid = line.valid();
return ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
template<int M, int K, bool filter>
struct ConeCurveMiMBIntersectorK
{
typedef LineMi<M> Primitive;
typedef CurvePrecalculationsK<K> Precalculations;
static __forceinline void intersect(const Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
{
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time()[k]);
const vbool<M> valid = line.valid();
ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
{
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time()[k]);
const vbool<M> valid = line.valid();
return ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
}
}
| 2,854 |
3,084 | /*++
Copyright (c) 2005 Microsoft Corporation
All rights reserved.
THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
PARTICULAR PURPOSE.
File Name:
xpsfilew.cpp
Abstract:
Implementation of an XPS file writer. This implements ISequentialStream::Write
and essentially wraps a buffer that recieves and stores the part information
so that in can be later compressed and written out.
--*/
#include "precomp.h"
#include "debug.h"
#include "globals.h"
#include "xpsfilew.h"
/*++
Routine Name:
CXPSWriteFile::CXPSWriteFile
Routine Description:
CXPSWriteFile class constructor
Arguments:
None
Return Value:
None
--*/
CXPSWriteFile::CXPSWriteFile() :
CUnknown<ISequentialStream>(IID_ISequentialStream),
m_cbWritten(0)
{
}
/*++
Routine Name:
CXPSWriteFile::CXPSWriteFile
Routine Description:
CXPSWriteFile class constructor
Arguments:
szFileName - The name of the file to write to
Return Value:
None
--*/
CXPSWriteFile::CXPSWriteFile(
PCSTR szFileName
) :
CUnknown<ISequentialStream>(IID_ISequentialStream),
m_cbWritten(0),
m_cstrFileName(szFileName)
{
}
/*++
Routine Name:
CXPSWriteFile::~CXPSWriteFile
Routine Description:
CXPSWriteFile class destructor
Arguments:
None
Return Value:
None
--*/
CXPSWriteFile::~CXPSWriteFile()
{
}
//
// ISequentialStream members
//
/*++
Routine Name:
CXPSWriteFile::Read
Routine Description:
This is the ISequentialStream read method - this is not implemented in
the file writter
Arguments:
Unused
Return Value:
HRESULT
E_NOTIMPL - This method is not implemented
--*/
HRESULT STDMETHODCALLTYPE
CXPSWriteFile::Read(
_Out_writes_bytes_to_(cb, *pcbRead) void*,
_In_ ULONG cb,
_Out_opt_ ULONG* pcbRead
)
{
UNREFERENCED_PARAMETER(cb);
UNREFERENCED_PARAMETER(pcbRead);
return E_NOTIMPL;
}
/*++
Routine Name:
CXPSWriteFile::Write
Routine Description:
This routine implements the ISequentialStream wrte interface allowing
clients to write file data.
Arguments:
pData - Pointer to the source data
cbData - The size of the data buffer
pcbWritten - Pointer to a ULONG that recieves the number of bytes written
Return Value:
HRESULT
S_OK - On success
E_* - On error
--*/
HRESULT STDMETHODCALLTYPE
CXPSWriteFile::Write(
_In_reads_bytes_(cbData) CONST void* pData,
_In_ ULONG cbData,
_Out_ ULONG* pcbWritten
)
{
HRESULT hr = S_OK;
if (SUCCEEDED(hr = CHECK_POINTER(pData, E_POINTER)) &&
SUCCEEDED(hr = CHECK_POINTER(pcbWritten, E_POINTER)))
{
*pcbWritten = 0;
PBYTE pDest = NULL;
if (SUCCEEDED(hr = m_workFile.GetBuffer(m_cbWritten + cbData, reinterpret_cast<PVOID*>(&pDest))))
{
pDest += m_cbWritten;
CopyMemory(pDest, pData, cbData);
m_cbWritten += cbData;
*pcbWritten = cbData;
}
}
ERR_ON_HR(hr);
return hr;
}
/*++
Routine Name:
CXPSWriteFile::GetBuffer
Routine Description:
This routine retrieves the buffer containing the file data written by
a client
Arguments:
ppv - Pointer to a VOID pointer that recieves the buffer
pcb - Pointer to a ULONG that recieves the size of the buffer
Return Value:
HRESULT
S_OK - On success
E_* - On error
--*/
HRESULT
CXPSWriteFile::GetBuffer(
_Outptr_result_bytebuffer_(*pcb) PVOID* ppv,
_Out_ ULONG* pcb
)
{
HRESULT hr = S_OK;
if (SUCCEEDED(hr = CHECK_POINTER(ppv, E_POINTER)) &&
SUCCEEDED(hr = CHECK_POINTER(pcb, E_POINTER)))
{
*ppv = NULL;
*pcb = 0;
if (SUCCEEDED(hr = m_workFile.GetBuffer(m_cbWritten, ppv)))
{
*pcb = static_cast<ULONG>(m_cbWritten);
}
}
ERR_ON_HR(hr);
return hr;
}
/*++
Routine Name:
CXPSWriteFile::GetFileName
Routine Description:
This routine retrieves the file name of the currently opened file
Arguments:
pszFileName - Pointer to a string that recieves the filename
Return Value:
HRESULT
S_OK - On success
E_* - On error
--*/
HRESULT
CXPSWriteFile::GetFileName(
_Outptr_ PSTR* pszFileName
)
{
HRESULT hr = S_OK;
if (SUCCEEDED(hr = CHECK_POINTER(pszFileName, E_POINTER)))
{
try
{
if (m_cstrFileName.GetLength() > 0)
{
*pszFileName = m_cstrFileName.GetBuffer();
}
else
{
hr = E_PENDING;
}
}
catch (CXDException& e)
{
hr = e;
}
}
ERR_ON_HR(hr);
return hr;
}
| 2,528 |
3,710 | <gh_stars>1000+
#pragma once
#ifndef T_COLOR_FUNCTIONS_INCLUDED
#define T_COLOR_FUNCTIONS_INCLUDED
#include "tpixel.h"
#undef DVAPI
#undef DVVAR
#ifdef TCOLOR_EXPORTS
#define DVAPI DV_EXPORT_API
#define DVVAR DV_EXPORT_VAR
#else
#define DVAPI DV_IMPORT_API
#define DVVAR DV_IMPORT_VAR
#endif
//-----------------------------------------------------------------------------
class DVAPI TColorFunction {
public:
virtual TPixel32 operator()(
const TPixel32 &color) const = 0; // {return color;};
struct Parameters { // outX = tcrop(inX * m_mX + m_cX, 0, 1); 0<=inX<=1
double m_mR, m_mG, m_mB, m_mM;
double m_cR, m_cG, m_cB, m_cM;
Parameters()
: m_mR(1)
, m_mG(1)
, m_mB(1)
, m_mM(1)
, m_cR(0)
, m_cG(0)
, m_cB(0)
, m_cM(0) {}
};
virtual TColorFunction *clone() const = 0;
virtual bool getParameters(
Parameters &p) const = 0; //{ p = Parameters(); return true; }
virtual ~TColorFunction() {}
};
//-----------------------------------------------------------------------------
class DVAPI TGenericColorFunction final : public TColorFunction {
double m_m[4], m_c[4];
public:
TGenericColorFunction(const double m[4], const double c[4]);
TColorFunction *clone() const override {
return new TGenericColorFunction(m_m, m_c);
}
TPixel32 operator()(const TPixel32 &color) const override;
bool getParameters(Parameters &p) const override;
};
//-----------------------------------------------------------------------------
class DVAPI TColorFader final : public TColorFunction {
TPixel32 m_color;
double m_fade;
public:
TColorFader() : m_color(), m_fade(0.5) {}
TColorFader(const TPixel32 &color, double fade)
: m_color(color), m_fade(fade) {}
TColorFunction *clone() const override {
return new TColorFader(m_color, m_fade);
}
TPixel32 operator()(const TPixel32 &color) const override;
bool getParameters(Parameters &p) const override;
};
//-----------------------------------------------------------------------------
class DVAPI TOnionFader final : public TColorFunction {
TPixel32 m_color;
double m_fade;
public:
TOnionFader() : m_color(), m_fade(0.5) {}
TOnionFader(const TPixel32 &color, double fade)
: m_color(color), m_fade(fade) {}
TColorFunction *clone() const override {
return new TOnionFader(m_color, m_fade);
}
TPixel32 operator()(const TPixel32 &color) const override;
bool getParameters(Parameters &p) const override;
};
class DVAPI TTranspFader final : public TColorFunction {
double m_transp;
public:
TTranspFader() : m_transp(0.5) {}
TTranspFader(double transp) : m_transp(transp) {}
TColorFunction *clone() const override { return new TTranspFader(m_transp); }
TPixel32 operator()(const TPixel32 &color) const override;
bool getParameters(Parameters &p) const override;
};
//-----------------------------------------------------------------------------
class DVAPI TColumnColorFilterFunction final : public TColorFunction {
TPixel32 m_colorScale;
public:
TColumnColorFilterFunction() : m_colorScale() {}
TColumnColorFilterFunction(const TPixel32 &color) : m_colorScale(color) {}
TColorFunction *clone() const override {
return new TColumnColorFilterFunction(m_colorScale);
}
TPixel32 operator()(const TPixel32 &color) const override;
bool getParameters(Parameters &p) const override;
};
#endif
| 1,205 |
611 | //
// BSubview.h
// ZIKViewRouterTests
//
// Created by zuik on 2018/4/18.
// Copyright © 2018 zuik. All rights reserved.
//
#import "ZIKPlatformCapabilities.h"
#if ZIK_HAS_UIKIT
#import <UIKit/UIKit.h>
#else
#import <AppKit/AppKit.h>
#endif
#import "BSubviewInput.h"
@interface BSubview : UIView <BSubviewInput>
@property (nonatomic, copy, nullable) NSString *title;
@property (nonatomic, strong) id router;
@end
| 169 |
17,085 | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/p_norm_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class PnormNPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in_x = ctx.Input<framework::Tensor>("X");
auto* out_norm = ctx.Output<framework::Tensor>("Out");
out_norm->mutable_data<T>(ctx.GetPlace());
float porder = ctx.Attr<float>("porder");
int axis = ctx.Attr<int>("axis");
bool keepdim = ctx.Attr<bool>("keepdim");
auto xdim = in_x->dims();
if (axis < 0) axis = xdim.size() + axis;
auto stream =
ctx.template device_context<paddle::platform::NPUDeviceContext>()
.stream();
int p = 0;
bool combine_op =
!(porder == 0 || porder == INFINITY || porder == -INFINITY);
if (porder == INFINITY) {
p = INT_MAX;
} else if (porder == -INFINITY) {
p = INT_MIN;
} else {
p = static_cast<int>(porder);
float t = 0;
float diff = abs(std::modf(porder, &t));
if (diff < 1e-5) {
combine_op = false;
}
}
if (!combine_op) {
const auto& runner = NpuOpRunner("LpNorm", {*in_x}, {*out_norm},
{{"p", p},
{"axes", std::vector<int32_t>({axis})},
{"keep_dims", keepdim}});
runner.Run(stream);
} else {
Tensor tmp_x;
tmp_x.mutable_data<T>(xdim, ctx.GetPlace());
const auto& power_runner1 =
NpuOpRunner("Power", {*in_x}, {tmp_x},
{{"power", porder}, {"scale", 1.0f}, {"shift", 0.0f}});
power_runner1.Run(stream);
const auto& reduce_runner = NpuOpRunner(
"ReduceSumD", {tmp_x}, {*out_norm},
{{"axes", std::vector<int32_t>({axis})}, {"keep_dims", keepdim}});
reduce_runner.Run(stream);
const auto& power_runner2 = NpuOpRunner(
"Power", {*out_norm}, {*out_norm},
{{"power", 1 / porder}, {"scale", 1.0f}, {"shift", 0.0f}});
power_runner2.Run(stream);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_NPU_KERNEL(
p_norm, ops::PnormNPUKernel<plat::NPUDeviceContext, float>,
ops::PnormNPUKernel<plat::NPUDeviceContext, plat::float16>);
| 1,324 |
553 | <reponame>u014427391/ecshop
package com.muses.taoshop.common.cas.constant;
/**
* <pre>
* 单点登录配置类
* </pre>
*
* @author nicky
* @version 1.00.00
* <pre>
* 修改记录
* 修改后版本: 修改人: 修改日期: 2018.10.28 22:40 修改内容:
* </pre>
*/
public class CasConsts {
//CAS server地址
public static final String CAS_SERVER_URL_PREFIX = "http://127.0.0.1:8080/cas";
//单点登录地址
public static final String CAS_SERVER_LOGIN_URL = CAS_SERVER_URL_PREFIX + "/login";
//单点登出地址
public static final String CAS_SERVER_LOGOUT_URL = CAS_SERVER_LOGIN_URL + "/logout";
//对外提供的服务地址
public static final String SERVER_URL_PREFIX = "http://127.0.0.1:8080/";
//casFilter utlPattern
public static final String CAS_FILTER_URL_PATTERN = "/cas";
//登录地址
public static final String LOGIN_URL = CAS_SERVER_LOGIN_URL + "?server=" +SERVER_URL_PREFIX + CAS_FILTER_URL_PATTERN;
//登出地址
public static final String LOGOUT_URL = CAS_SERVER_LOGOUT_URL + "?server=" + SERVER_URL_PREFIX;
//登录成功地址
public static final String LOGIN_SUCCESS_URL = "/toIndex";
//权限认证失败跳转地址
public static final String UNUATHORIZED_URL = "/error/403.html";
}
| 635 |
636 | package cn.org.atool.fluent.mybatis.base.crud;
import cn.org.atool.fluent.mybatis.base.free.FreeQuery;
import cn.org.atool.fluent.mybatis.segment.JoinOn;
/**
* 关联查询构造方式一: 使用直接传入设置好别名和参数的Query
*
* @param <QL> 查询表一
* @author wudarui
*/
@SuppressWarnings({"unchecked", "unused"})
public interface JoinToBuilder<QL extends BaseQuery<?, QL>> extends JoinBuilder<QL> {
/**
* from left.table join right.table on condition
*
* @param query 关联查询右表及右表条件设置
* @param <QR> join right表类型
* @return ignore
*/
<QR extends BaseQuery<?, QR>> JoinOn<QL, QR, JoinToBuilder<QL>> join(QR query);
/**
* from table1 join (select query) alias ...
*
* @param query 子查询
* @param alias 子查询别名
* @param <QR> 右查询类型
* @return ignore
*/
default <QR extends BaseQuery<?, QR>> JoinOn<QL, QR, JoinToBuilder<QL>> join(QR query, String alias) {
return this.join((QR) new FreeQuery(query, alias));
}
/**
* from left.table left join right.table on condition
*
* @param query 关联查询右表及右表条件设置
* @param <QR> join right 表类型
* @return ignore
*/
<QR extends BaseQuery<?, QR>> JoinOn<QL, QR, JoinToBuilder<QL>> leftJoin(QR query);
/**
* from table1 left join (select query) alias ...
*
* @param query 子查询
* @param alias 子查询别名
* @param <QR> 右查询类型
* @return ignore
*/
default <QR extends BaseQuery<?, QR>> JoinOn<QL, QR, JoinToBuilder<QL>> leftJoin(QR query, String alias) {
return this.leftJoin((QR) new FreeQuery(query, alias));
}
/**
* from left.table right join right.table on condition
*
* @param query 关联查询右表及右表条件设置
* @param <QR> join right 表类型
* @return ignore
*/
<QR extends BaseQuery<?, QR>> JoinOn<QL, QR, JoinToBuilder<QL>> rightJoin(QR query);
/**
* from table1 right join (select query) alias ...
*
* @param query 子查询
* @param alias 子查询别名
* @param <QR> 右查询类型
* @return ignore
*/
default <QR extends BaseQuery<?, QR>> JoinOn<QL, QR, JoinToBuilder<QL>> rightJoin(QR query, String alias) {
return this.rightJoin((QR) new FreeQuery(query, alias));
}
} | 1,146 |
5,169 | <gh_stars>1000+
{
"name": "FunAccordionView",
"platforms": {
"ios": "11.0"
},
"summary": "FunAccordionView provides a three level accordion view (forked of ThreeLevelAccordian).",
"requires_arc": true,
"version": "1.0.0",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"homepage": "https://github.com/marcandreappel/FunAccordionView",
"source": {
"git": "https://github.com/marcandreappel/FunAccordionView.git",
"tag": "1.0.0"
},
"frameworks": "UIKit",
"source_files": [
"FunAccordionView/**/*.{swift}",
"FunAccordionView/**/*.{h}"
],
"resources": "FunAccordionView/**/*.{png,xib,pdf}"
}
| 301 |
488 | typedef long long int64_t;
// extern __attribute__ (( cdecl )) int64_t __divdi3 ( int64_t num, int64_t den );
#include "test2015_89.h"
#if 1
/* The attribute is not handled properly (dropped) by ROSE currently */
__attribute__ (( cdecl )) int64_t __divdi3(int64_t num, int64_t den)
#else
int64_t __divdi3(int64_t num, int64_t den)
#endif
{
int64_t v;
return v;
}
| 151 |
1,240 | package com.eventyay.organizer.data.repository;
import com.eventyay.organizer.common.Constants;
import com.eventyay.organizer.data.AbstractObservable;
import com.eventyay.organizer.data.Repository;
import com.eventyay.organizer.data.attendee.Attendee;
import com.eventyay.organizer.data.attendee.AttendeeApi;
import com.eventyay.organizer.data.attendee.AttendeeRepositoryImpl;
import com.raizlabs.android.dbflow.sql.language.SQLOperator;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
import java.util.Arrays;
import java.util.List;
import io.reactivex.Completable;
import io.reactivex.Observable;
import io.reactivex.android.plugins.RxAndroidPlugins;
import io.reactivex.observers.TestObserver;
import io.reactivex.plugins.RxJavaPlugins;
import io.reactivex.schedulers.Schedulers;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
public class AttendeeRepositoryTest {
@Rule
public MockitoRule mockitoRule = MockitoJUnit.rule();
private AttendeeRepositoryImpl attendeeRepository;
@Mock
private AttendeeApi attendeeApi;
@Mock
private Repository repository;
private static final List<Attendee> ATTENDEES = Arrays.asList(
new Attendee(),
new Attendee(),
new Attendee()
);
@Before
public void setUp() {
when(repository.observableOf(Attendee.class)).thenReturn(new AbstractObservable.AbstractObservableBuilder<>(repository));
attendeeRepository = new AttendeeRepositoryImpl(repository, attendeeApi);
RxJavaPlugins.setIoSchedulerHandler(scheduler -> Schedulers.trampoline());
RxAndroidPlugins.setInitMainThreadSchedulerHandler(schedulerCallable -> Schedulers.trampoline());
}
@After
public void tearDown() {
RxJavaPlugins.reset();
RxAndroidPlugins.reset();
}
@Test
public void shouldSaveAttendeesInCache() {
TestObserver testObserver = TestObserver.create();
Completable completable = Completable.complete()
.doOnSubscribe(testObserver::onSubscribe);
when(repository.isConnected()).thenReturn(true);
when(repository.getItems(eq(Attendee.class), any(SQLOperator.class))).thenReturn(Observable.empty());
when(repository.syncSave(eq(Attendee.class), any(), any(), any())).thenReturn(completable);
when(attendeeApi.getAttendees(43)).thenReturn(Observable.just(ATTENDEES));
// No force reload ensures use of cache
attendeeRepository.getAttendees(43, false).test();
testObserver.assertSubscribed();
// Verify loads from network
verify(attendeeApi).getAttendees(43);
}
@Test
public void shouldLoadAttendeesFromCache() {
when(repository.getItems(eq(Attendee.class), any(SQLOperator.class)))
.thenReturn(Observable.fromIterable(ATTENDEES));
// No force reload ensures use of cache
Observable<Attendee> attendeeObservable = attendeeRepository.getAttendees(67, false);
attendeeObservable
.toList()
.test()
.assertNoErrors()
.assertValue(ATTENDEES);
verifyZeroInteractions(attendeeApi);
}
@Test
public void shouldFetchAttendeesOnForceReload() {
when(repository.isConnected()).thenReturn(true);
when(attendeeApi.getAttendees(23)).thenReturn(Observable.just(ATTENDEES));
when(repository.syncSave(eq(Attendee.class), any(), any(), any())).thenReturn(Completable.complete());
when(repository.getItems(eq(Attendee.class), any(SQLOperator.class)))
.thenReturn(Observable.fromIterable(ATTENDEES));
// Force reload ensures no use of cache
Observable<List<Attendee>> attendeeObservable = attendeeRepository.getAttendees(23, true)
.toList()
.toObservable();
attendeeObservable.
test()
.assertNoErrors()
.assertValue(ATTENDEES);
// Verify loads from network
verify(attendeeApi).getAttendees(23);
}
@Test
public void shouldSendErrorOnNetworkDown() {
when(repository.isConnected()).thenReturn(false);
when(repository.getItems(any(), any())).thenReturn(Observable.empty());
attendeeRepository.getAttendees(43, false)
.test()
.assertErrorMessage(Constants.NO_NETWORK);
attendeeRepository.getAttendees(43, true)
.test()
.assertErrorMessage(Constants.NO_NETWORK);
attendeeRepository.toggleAttendeeCheckStatus(ATTENDEES.get(0))
.test()
.assertErrorMessage(Constants.NO_NETWORK);
verifyZeroInteractions(attendeeApi);
}
@Test
public void shouldSaveToggledAttendeeCheck() {
Attendee attendee = Attendee.builder().id(89).build();
attendee.setCheckedIn(true);
TestObserver testObserver = TestObserver.create();
Completable completable = Completable.complete()
.doOnSubscribe(testObserver::onSubscribe);
when(repository.isConnected()).thenReturn(true);
when(repository.getItems(eq(Attendee.class), any())).thenReturn(Observable.just(attendee));
when(repository.update(Attendee.class, attendee)).thenReturn(completable);
when(attendeeApi.patchAttendee(89, attendee)).thenReturn(Observable.just(attendee));
Observable<Attendee> attendeeObservable = attendeeRepository.toggleAttendeeCheckStatus(attendee);
attendeeObservable.test();
testObserver.assertSubscribed();
}
}
| 2,382 |
600 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import mock
from bravado.fido_client import FidoResponseAdapter
def test_header_conversion():
fido_response = mock.Mock(
name='fido_response',
headers={
b'Content-Type': [b'application/json'],
'x-weird-ä'.encode('latin1'): ['ümläüt'.encode('utf8')],
b'X-Multiple': [b'donotuse', b'usethis'],
},
)
response_adapter = FidoResponseAdapter(fido_response)
assert response_adapter.headers == {
'content-type': 'application/json',
'X-WEIRD-ä': 'ümläüt',
'X-Multiple': 'usethis',
}
| 312 |
1,511 | <filename>etc/release/gen_requirements_dev.py<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import click
import utils_requirements
@click.command()
@click.option('-s', '--site-packages-dir',
type=click.Path(exists=True, readable=True, path_type=str, file_okay=False, resolve_path=True),
required=True,
metavar='DIR',
help='Path to the "site-packages" directory where wheels are installed such as lib/python3.6/site-packages',
)
@click.option('-d', '--dev-requirements-file',
type=click.Path(path_type=str, dir_okay=False),
metavar='FILE',
default='requirements-dev.txt',
show_default=True,
help='Path to the dev requirements file to update or create.',
)
@click.option('-r', '--main-requirements-file',
type=click.Path(path_type=str, dir_okay=False),
default='requirements.txt',
metavar='FILE',
show_default=True,
help='Path to the main requirements file. Its requirements will be excluded '
'from the generated dev requirements.',
)
@click.help_option('-h', '--help')
def gen_dev_requirements(site_packages_dir, dev_requirements_file, main_requirements_file):
"""
Create or overwrite the `--dev-requirements-file` pip requirements FILE with
all Python packages found installed in `--site-packages-dir`. Exclude
package names also listed in the --main-requirements-file pip requirements
FILE (that are assume to the production requirements and therefore to always
be present in addition to the development requirements).
"""
utils_requirements.lock_dev_requirements(
dev_requirements_file=dev_requirements_file,
main_requirements_file=main_requirements_file,
site_packages_dir=site_packages_dir
)
if __name__ == '__main__':
gen_dev_requirements()
| 741 |
372 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.spanner.v1.model;
/**
* ScanData contains Cloud Key Visualizer scan data used by the caller to construct a visualization.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Spanner API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class ScanData extends com.google.api.client.json.GenericJson {
/**
* Cloud Key Visualizer scan data. The range of time this information covers is captured via the
* above time range fields. Note, this field is not available to the ListScans method.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private VisualizationData data;
/**
* The upper bound for when the contained data is defined.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private String endTime;
/**
* A range of time (inclusive) for when the contained data is defined. The lower bound for when
* the contained data is defined.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private String startTime;
/**
* Cloud Key Visualizer scan data. The range of time this information covers is captured via the
* above time range fields. Note, this field is not available to the ListScans method.
* @return value or {@code null} for none
*/
public VisualizationData getData() {
return data;
}
/**
* Cloud Key Visualizer scan data. The range of time this information covers is captured via the
* above time range fields. Note, this field is not available to the ListScans method.
* @param data data or {@code null} for none
*/
public ScanData setData(VisualizationData data) {
this.data = data;
return this;
}
/**
* The upper bound for when the contained data is defined.
* @return value or {@code null} for none
*/
public String getEndTime() {
return endTime;
}
/**
* The upper bound for when the contained data is defined.
* @param endTime endTime or {@code null} for none
*/
public ScanData setEndTime(String endTime) {
this.endTime = endTime;
return this;
}
/**
* A range of time (inclusive) for when the contained data is defined. The lower bound for when
* the contained data is defined.
* @return value or {@code null} for none
*/
public String getStartTime() {
return startTime;
}
/**
* A range of time (inclusive) for when the contained data is defined. The lower bound for when
* the contained data is defined.
* @param startTime startTime or {@code null} for none
*/
public ScanData setStartTime(String startTime) {
this.startTime = startTime;
return this;
}
@Override
public ScanData set(String fieldName, Object value) {
return (ScanData) super.set(fieldName, value);
}
@Override
public ScanData clone() {
return (ScanData) super.clone();
}
}
| 1,163 |
11,356 | // Copyright (c) 2001-2010 <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(SPIRIT_LEXER_EXAMPLE_WORD_COUNT_TOKENS_FEB_10_2008_0739PM)
#define SPIRIT_LEXER_EXAMPLE_WORD_COUNT_TOKENS_FEB_10_2008_0739PM
///////////////////////////////////////////////////////////////////////////////
// Token definition: We keep the base class for the token definition as a
// template parameter to allow this class to be used for
// both: the code generation and the lexical analysis
///////////////////////////////////////////////////////////////////////////////
//[wc_static_tokenids
enum tokenids
{
IDANY = boost::spirit::lex::min_token_id + 1,
};
//]
//[wc_static_tokendef
// This token definition class can be used without any change for all three
// possible use cases: a dynamic lexical analyzer, a code generator, and a
// static lexical analyzer.
template <typename BaseLexer>
struct word_count_tokens : boost::spirit::lex::lexer<BaseLexer>
{
word_count_tokens()
: word_count_tokens::base_type(
boost::spirit::lex::match_flags::match_not_dot_newline)
{
// define tokens and associate them with the lexer
word = "[^ \t\n]+";
this->self = word | '\n' | boost::spirit::lex::token_def<>(".", IDANY);
}
boost::spirit::lex::token_def<std::string> word;
};
//]
#endif
| 536 |
3,934 | <filename>packages/pyright-internal/src/tests/samples/typeAlias5.py
# This sample tests type aliases that are unions that include
# TypeVars.
from datetime import datetime
from typing import IO, Dict, Generic, List, Literal, Type, TypeVar, Union
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
MyUnion1 = Union[int, _T1, str, _T2, List[_T1]]
MyUnion2 = Union[float, datetime]
MyUnion3 = MyUnion1[MyUnion2]
MyUnion4 = MyUnion1[MyUnion2, IO]
# This should generate an error because only two type
# arguments are expected.
MyUnion5 = MyUnion1[MyUnion2, IO, str]
MyUnion6 = MyUnion1[Literal[0], Literal["a"]]
reveal_type(
MyUnion6,
expected_text="Type[int] | Type[str] | Type[List[Literal[0]]] | Type[Literal[0, 'a']]",
)
class Foo:
def __int__(self) -> int:
return 0
FooT = TypeVar("FooT", bound=Foo)
FooIsh = Union[int, FooT]
class Bar(Foo):
def __int__(self) -> int:
return super().__int__() + 1
v1: FooIsh[Bar] = 42
v2: FooIsh[Bar] = Bar()
# This should generate an error.
v3: FooIsh[Type[Bar]] = 42
MyTypeAlias = Dict[_T1, _T2]
class MyClass1(Generic[_T1, _T2]):
# This should generate an error because S and T are bound
# type variables.
MyTypeAlias = Dict[_T1, _T2]
| 503 |
372 | <reponame>mjhopkins/google-api-java-client-services
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.dataflow.model;
/**
* A task which describes what action should be performed for the specified streaming computation
* ranges.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Dataflow API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class StreamingComputationTask extends com.google.api.client.json.GenericJson {
/**
* Contains ranges of a streaming computation this task should apply to.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<StreamingComputationRanges> computationRanges;
static {
// hack to force ProGuard to consider StreamingComputationRanges used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(StreamingComputationRanges.class);
}
/**
* Describes the set of data disks this task should apply to.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List<MountedDataDisk> dataDisks;
static {
// hack to force ProGuard to consider MountedDataDisk used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(MountedDataDisk.class);
}
/**
* A type of streaming computation task.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String taskType;
/**
* Contains ranges of a streaming computation this task should apply to.
* @return value or {@code null} for none
*/
public java.util.List<StreamingComputationRanges> getComputationRanges() {
return computationRanges;
}
/**
* Contains ranges of a streaming computation this task should apply to.
* @param computationRanges computationRanges or {@code null} for none
*/
public StreamingComputationTask setComputationRanges(java.util.List<StreamingComputationRanges> computationRanges) {
this.computationRanges = computationRanges;
return this;
}
/**
* Describes the set of data disks this task should apply to.
* @return value or {@code null} for none
*/
public java.util.List<MountedDataDisk> getDataDisks() {
return dataDisks;
}
/**
* Describes the set of data disks this task should apply to.
* @param dataDisks dataDisks or {@code null} for none
*/
public StreamingComputationTask setDataDisks(java.util.List<MountedDataDisk> dataDisks) {
this.dataDisks = dataDisks;
return this;
}
/**
* A type of streaming computation task.
* @return value or {@code null} for none
*/
public java.lang.String getTaskType() {
return taskType;
}
/**
* A type of streaming computation task.
* @param taskType taskType or {@code null} for none
*/
public StreamingComputationTask setTaskType(java.lang.String taskType) {
this.taskType = taskType;
return this;
}
@Override
public StreamingComputationTask set(String fieldName, Object value) {
return (StreamingComputationTask) super.set(fieldName, value);
}
@Override
public StreamingComputationTask clone() {
return (StreamingComputationTask) super.clone();
}
}
| 1,333 |
764 | {
"symbol": "LLWF",
"address": "0x8618e0E4120f96eCCBC280A37445C4d444b2952d",
"overview": {
"en": "Was established in July 7, 2016, the company cooperates with many industry leaders signing, also has the global system of cross-border manufacturer provides straightly, leave lotus culture has deep cooperation with more than 300000 + brands, service to all families with high cost performance products.",
"zh": "成立于2016年7月7日,公司与众多行业领军企业签约合作,同时拥有全球跨境厂家直供体系,留莲忘返目前已与超过300000+品牌商进行深度合作,以超高的性价比商品向所有家庭提供服务。"
},
"email": "<EMAIL>",
"website": "http://llwf.liulianwangfan7676.com/",
"whitepaper": "http://llwf.liulianwangfan7676.com/upload/baipishu.pdf",
"state": "NORMAL",
"published_on": "2019-09-01",
"initial_price": {
"ETH": "0.008443564 ETH"
}
} | 457 |
1,085 | /*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.services.credentials;
import java.net.URI;
import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
import com.google.inject.Inject;
/**
* Env Credential Provider.
*/
public class EnvCredentialsProvider extends AbstractSimpleCredentialsProvider implements CredentialsProvider {
private final Map<String, String> env;
@Inject
public EnvCredentialsProvider() {
this(null);
}
@VisibleForTesting
public EnvCredentialsProvider(Map<String, String> env) {
super("env");
this.env = env;
}
@Override
protected String doLookup(URI uri) throws IllegalArgumentException {
String key = uri.getSchemeSpecificPart();
if (env == null) {
return System.getenv().get(key);
}
return env.get(key);
}
}
| 428 |
1,418 | package aima.core.probability.domain;
/**
* A Domain over a continuous not countable set of objects (e.g. the Real
* numbers).
*
* @author <NAME>
*/
public interface ContinuousDomain extends Domain {
}
| 74 |
2,360 | <filename>var/spack/repos/builtin/packages/pegtl/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
# package has a Makefile, but only to build examples
class Pegtl(CMakePackage):
"""The Parsing Expression Grammar Template Library (PEGTL) is a
zero-dependency C++11 header-only library for creating parsers
according to a Parsing Expression Grammar (PEG).
"""
homepage = "https://github.com/taocpp/PEGTL"
url = "https://github.com/taocpp/PEGTL/tarball/2.1.4"
git = "https://github.com/taocpp/PEGTL.git"
version('master', branch='master')
version('3.2.0', sha256='91aa6529ef9e6b57368e7b5b1f04a3bd26a39419d30e35a3c5c66ef073926b56')
version('2.8.3', sha256='370afd0fbe6d73c448a33c10fbe4a7254f92077f5a217317d0a32a9231293015')
version('2.1.4', sha256='d990dccc07b4d9ba548326d11c5c5e34fa88b34fe113cb5377da03dda29f23f2')
version('2.0.0', sha256='5aae0505077e051cae4d855c38049cc6cf71103a6cc8d0ddef01a576e8a60cc0')
# Ref: https://github.com/taocpp/PEGTL/blob/master/src/example/pegtl/json_classes.hpp
patch('change_to_virtual_destructor.patch', when='@:2.4')
# Ref: https://bugs.gentoo.org/733678
patch_url = 'https://gitweb.gentoo.org/repo/gentoo.git/plain/dev-libs/pegtl/files/pegtl-2.8.3-gcc-10.patch'
patch_checksum = 'fc40b0c7390f8c0473f2cb4821bda7a5e107f93ca9d2fafeff2065445bb39981'
patch(patch_url, sha256=patch_checksum, level=0, when='@2.1.4:2.8.3')
def cmake_args(self):
args = []
if self.run_tests:
args.extend([
'-DPEGTL_BUILD_EXAMPLES=ON',
'-DPEGTL_BUILD_TESTS=ON'
])
else:
args.extend([
'-DPEGTL_BUILD_EXAMPLES=OFF',
'-DPEGTL_BUILD_TESTS=OFF'
])
return args
@run_after('build')
@on_package_attributes(run_tests=True)
def check(self):
with working_dir(self.build_directory):
make('test', parallel=False)
| 1,051 |
345 | <reponame>mickaelseznec/Pytorch-Correlation-extension
from __future__ import division
from __future__ import print_function
import argparse
import time
import torch
from spatial_correlation_sampler import SpatialCorrelationSampler
from tqdm import trange
TIME_SCALES = {'s': 1, 'ms': 1000, 'us': 1000000}
parser = argparse.ArgumentParser()
parser.add_argument('backend', choices=['cpu', 'cuda'], default='cuda')
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('-k', '--kernel-size', type=int, default=3)
parser.add_argument('--patch', type=int, default=3)
parser.add_argument('--patch_dilation', type=int, default=2)
parser.add_argument('-c', '--channel', type=int, default=64)
parser.add_argument('--height', type=int, default=100)
parser.add_argument('-w', '--width', type=int, default=100)
parser.add_argument('-s', '--stride', type=int, default=2)
parser.add_argument('-p', '--pad', type=int, default=1)
parser.add_argument('--scale', choices=['s', 'ms', 'us'], default='us')
parser.add_argument('-r', '--runs', type=int, default=100)
parser.add_argument('--dilation', type=int, default=2)
parser.add_argument('-d', '--dtype', choices=['half', 'float', 'double'])
args = parser.parse_args()
device = torch.device(args.backend)
if args.dtype == 'half':
dtype = torch.float16
elif args.dtype == 'float':
dtype = torch.float32
else:
dtype = torch.float64
input1 = torch.randn(args.batch_size,
args.channel,
args.height,
args.width,
dtype=dtype,
device=device,
requires_grad=True)
input2 = torch.randn_like(input1)
correlation_sampler = SpatialCorrelationSampler(
args.kernel_size,
args.patch,
args.stride,
args.pad,
args.dilation,
args.patch_dilation)
# Force CUDA initialization
output = correlation_sampler(input1, input2)
print(output.size())
output.mean().backward()
forward_min = float('inf')
forward_time = 0
backward_min = float('inf')
backward_time = 0
for _ in trange(args.runs):
correlation_sampler.zero_grad()
start = time.time()
output = correlation_sampler(input1, input2)
elapsed = time.time() - start
forward_min = min(forward_min, elapsed)
forward_time += elapsed
output = output.mean()
start = time.time()
(output.mean()).backward()
elapsed = time.time() - start
backward_min = min(backward_min, elapsed)
backward_time += elapsed
scale = TIME_SCALES[args.scale]
forward_min *= scale
backward_min *= scale
forward_average = forward_time / args.runs * scale
backward_average = backward_time / args.runs * scale
print('Forward: {0:.3f}/{1:.3f} {4} | Backward {2:.3f}/{3:.3f} {4}'.format(
forward_min, forward_average, backward_min, backward_average,
args.scale))
| 1,119 |
2,366 | /*
* =============================================================================
*
* Copyright (c) 2011-2018, The THYMELEAF team (http://www.thymeleaf.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
package org.thymeleaf.engine;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.thymeleaf.context.IEngineContext;
import org.thymeleaf.inline.IInliner;
import org.thymeleaf.model.IModel;
import org.thymeleaf.processor.templateboundaries.ITemplateBoundariesStructureHandler;
import org.thymeleaf.util.Validate;
/**
* <p>
* Structure handler implementation, internally used by the engine.
* </p>
* <p>
* This class should not be directly used from outside the engine.
* </p>
*
* @author <NAME>
* @since 3.0.0
*
*/
public final class TemplateBoundariesStructureHandler implements ITemplateBoundariesStructureHandler {
boolean insertText;
String insertTextValue;
boolean insertTextProcessable;
boolean insertModel;
IModel insertModelValue;
boolean insertModelProcessable;
boolean setLocalVariable;
Map<String,Object> addedLocalVariables;
boolean removeLocalVariable;
Set<String> removedLocalVariableNames;
boolean setSelectionTarget;
Object selectionTargetObject;
boolean setInliner;
IInliner setInlinerValue;
TemplateBoundariesStructureHandler() {
super();
reset();
}
public void insert(final String text, final boolean processable) {
resetAllButLocalVariables();
Validate.notNull(text, "Text cannot be null");
this.insertText = true;
this.insertTextValue = text;
this.insertTextProcessable = processable;
}
public void insert(final IModel model, final boolean processable) {
resetAllButLocalVariables();
Validate.notNull(model, "Model cannot be null");
this.insertModel = true;
this.insertModelValue = model;
this.insertModelProcessable = processable;
}
public void removeLocalVariable(final String name) {
// Can be combined with others, no need to resetGathering
this.removeLocalVariable = true;
if (this.removedLocalVariableNames == null) {
this.removedLocalVariableNames = new HashSet<String>(3);
}
this.removedLocalVariableNames.add(name);
}
public void setLocalVariable(final String name, final Object value) {
// Can be combined with others, no need to resetGathering
this.setLocalVariable = true;
if (this.addedLocalVariables == null) {
this.addedLocalVariables = new HashMap<String, Object>(3);
}
this.addedLocalVariables.put(name, value);
}
public void setSelectionTarget(final Object selectionTarget) {
// Can be combined with others, no need to resetGathering
this.setSelectionTarget = true;
this.selectionTargetObject = selectionTarget;
}
public void setInliner(final IInliner inliner) {
this.setInliner = true;
this.setInlinerValue = inliner;
}
public void reset() {
resetAllButLocalVariables();
this.setLocalVariable = false;
if (this.addedLocalVariables != null) {
this.addedLocalVariables.clear();
}
this.removeLocalVariable = false;
if (this.removedLocalVariableNames != null) {
this.removedLocalVariableNames.clear();
}
this.setSelectionTarget = false;
this.selectionTargetObject = null;
this.setInliner = false;
this.setInlinerValue = null;
}
private void resetAllButLocalVariables() {
this.insertText = false;
this.insertTextValue = null;
this.insertTextProcessable = false;
this.insertModel = false;
this.insertModelValue = null;
this.insertModelProcessable = false;
}
void applyContextModifications(final IEngineContext engineContext) {
if (this.setLocalVariable) {
engineContext.setVariables(this.addedLocalVariables);
}
if (this.removeLocalVariable) {
for (final String variableName : this.removedLocalVariableNames) {
engineContext.removeVariable(variableName);
}
}
if (this.setSelectionTarget) {
engineContext.setSelectionTarget(this.selectionTargetObject);
}
if (this.setInliner) {
engineContext.setInliner(this.setInlinerValue);
}
}
}
| 1,865 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-793h-vw32-6xg7",
"modified": "2022-05-01T02:27:36Z",
"published": "2022-05-01T02:27:36Z",
"aliases": [
"CVE-2005-4465"
],
"details": "The Internet Key Exchange version 1 (IKEv1) implementation in NEC UNIVERGE IX1000, IX2000, and IX3000 allows remote attackers to cause a denial of service and possibly execute arbitrary code via crafted IKE packets, as demonstrated by the PROTOS ISAKMP Test Suite for IKEv1. NOTE: due to the lack of details in the advisory, it is unclear which of CVE-2005-3666, CVE-2005-3667, and/or CVE-2005-3668 this issue applies to.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2005-4465"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/18166"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/16027"
},
{
"type": "WEB",
"url": "http://www.sw.nec.co.jp/ixseries/ix1k2k/Support/CERT/NISCC273756.html"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2005/3028"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 567 |
479 | // Copyright (C) 2010 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.reviewdb.client;
import com.google.gwtorm.client.Column;
import com.google.gwtorm.client.CompoundKey;
import com.google.gwtorm.client.StringKey;
/** External tracking id associated with a {@link Change} */
public final class TrackingId {
public static final int TRACKING_ID_MAX_CHAR = 32;
public static final int TRACKING_SYSTEM_MAX_CHAR = 10;
/** External tracking id */
public static class Id extends StringKey<com.google.gwtorm.client.Key<?>> {
private static final long serialVersionUID = 1L;
@Column(id = 1, length = TrackingId.TRACKING_ID_MAX_CHAR)
protected String id;
protected Id() {}
public Id(String id) {
this.id = id;
}
@Override
public String get() {
return id;
}
@Override
protected void set(String newValue) {
id = newValue;
}
}
/** Name of external tracking system */
public static class System extends StringKey<com.google.gwtorm.client.Key<?>> {
private static final long serialVersionUID = 1L;
@Column(id = 1, length = TrackingId.TRACKING_SYSTEM_MAX_CHAR)
protected String system;
protected System() {}
public System(String s) {
this.system = s;
}
@Override
public String get() {
return system;
}
@Override
protected void set(String newValue) {
system = newValue;
}
}
public static class Key extends CompoundKey<Change.Id> {
private static final long serialVersionUID = 1L;
@Column(id = 1)
protected Change.Id changeId;
@Column(id = 2)
protected Id trackingKey;
@Column(id = 3)
protected System trackingSystem;
protected Key() {
changeId = new Change.Id();
trackingKey = new Id();
trackingSystem = new System();
}
protected Key(Change.Id ch, Id id, System s) {
changeId = ch;
trackingKey = id;
trackingSystem = s;
}
@Override
public Change.Id getParentKey() {
return changeId;
}
public TrackingId.Id getTrackingId() {
return trackingKey;
}
public TrackingId.System getTrackingSystem() {
return trackingSystem;
}
@Override
public com.google.gwtorm.client.Key<?>[] members() {
return new com.google.gwtorm.client.Key<?>[] {trackingKey, trackingSystem};
}
}
@Column(id = 1, name = Column.NONE)
protected Key key;
protected TrackingId() {}
public TrackingId(Change.Id ch, TrackingId.Id id, TrackingId.System s) {
key = new Key(ch, id, s);
}
public TrackingId(Change.Id ch, String id, String s) {
key = new Key(ch, new TrackingId.Id(id), new TrackingId.System(s));
}
public TrackingId.Key getKey() {
return key;
}
public Change.Id getChangeId() {
return key.changeId;
}
public String getTrackingId() {
return key.trackingKey.get();
}
public String getSystem() {
return key.trackingSystem.get();
}
@Override
public int hashCode() {
return key.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof TrackingId) {
final TrackingId tr = (TrackingId) obj;
return key.equals(tr.key);
}
return false;
}
}
| 1,314 |
1,724 | <filename>driver-core/src/test/unit/com/mongodb/client/model/TestWindows.java
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mongodb.client.model;
import com.mongodb.client.model.Windows.Bound;
import org.bson.BsonArray;
import org.bson.BsonDecimal128;
import org.bson.BsonDocument;
import org.bson.BsonDouble;
import org.bson.BsonInt32;
import org.bson.BsonInt64;
import org.bson.BsonString;
import org.bson.Document;
import org.bson.types.Decimal128;
import org.junit.jupiter.api.Test;
import static com.mongodb.client.model.MongoTimeUnit.SECOND;
import static com.mongodb.client.model.Windows.Bound.CURRENT;
import static com.mongodb.client.model.Windows.Bound.UNBOUNDED;
import static com.mongodb.client.model.MongoTimeUnit.MILLISECOND;
import static com.mongodb.client.model.MongoTimeUnit.HOUR;
import static com.mongodb.client.model.MongoTimeUnit.MONTH;
import static com.mongodb.client.model.Windows.documents;
import static com.mongodb.client.model.Windows.range;
import static com.mongodb.client.model.Windows.timeRange;
import static java.util.Arrays.asList;
import static org.junit.jupiter.api.Assertions.assertAll;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
final class TestWindows {
@Test
void of() {
Window expected = Windows.timeRange(-1, SECOND, CURRENT);
Document windowDocument = new Document("range", asList(-1L, "current")).append("unit", "second");
Window actualFromDocument = Windows.of(windowDocument);
Window actualFromBsonDocument = Windows.of(windowDocument.toBsonDocument());
assertAll(
() -> assertEquals(expected.toBsonDocument(), actualFromDocument.toBsonDocument()),
() -> assertEquals(expected.toBsonDocument(), actualFromBsonDocument.toBsonDocument()));
}
@Test
void positionBased() {
assertAll(
() -> assertEquals(
new BsonDocument("documents", new BsonArray(asList(new BsonInt32(-2), new BsonInt32(0)))),
documents(-2, 0).toBsonDocument()),
() -> assertEquals(
new BsonDocument("documents", new BsonArray(asList(
new BsonString(CURRENT.value()), new BsonInt32(Integer.MAX_VALUE)))),
documents(CURRENT, Integer.MAX_VALUE).toBsonDocument()),
() -> assertEquals(
new BsonDocument("documents", new BsonArray(asList(new BsonInt32(0), new BsonString(UNBOUNDED.value())))),
documents(0, UNBOUNDED).toBsonDocument()),
() -> assertEquals(
new BsonDocument("documents", new BsonArray(asList(
new BsonString(CURRENT.value()), new BsonString(UNBOUNDED.value())))),
documents(CURRENT, UNBOUNDED).toBsonDocument()));
assertAll(
() -> assertThrows(IllegalArgumentException.class, () -> documents(1, -1)),
() -> assertThrows(IllegalArgumentException.class, () -> documents(CURRENT, -1)),
() -> assertThrows(IllegalArgumentException.class, () -> documents(1, CURRENT)),
() -> assertThrows(IllegalArgumentException.class, () -> documents(null, 1)),
() -> assertThrows(IllegalArgumentException.class, () -> documents(1, null)),
() -> assertThrows(IllegalArgumentException.class, () -> documents(null, null)));
}
@Test
void rangeBased() {
assertAll(
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(new BsonInt64(-1), new BsonInt64(0)))),
range(-1, 0).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(new BsonDouble(0), new BsonDouble(0)))),
range(0d, 0d).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(
new BsonDecimal128(new Decimal128(1)), new BsonDecimal128(new Decimal128(2))))),
range(new Decimal128(1), new Decimal128(2)).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(new BsonString(CURRENT.value()), new BsonDouble(0.1)))),
range(CURRENT, 0.1).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(new BsonDouble(0.1), new BsonString(UNBOUNDED.value())))),
range(0.1, UNBOUNDED).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(
new BsonString(CURRENT.value()), new BsonDecimal128(new Decimal128(Long.MAX_VALUE))))),
range(CURRENT, new Decimal128(Long.MAX_VALUE)).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(
new BsonDecimal128(new Decimal128(Long.MAX_VALUE)), new BsonString(UNBOUNDED.value())))),
range(new Decimal128(Long.MAX_VALUE), UNBOUNDED).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(new BsonInt64(-1), new BsonInt64(0))))
.append("unit", new BsonString("millisecond")),
timeRange(-1, 0, MILLISECOND).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(new BsonString(CURRENT.value()), new BsonInt64(1))))
.append("unit", new BsonString("hour")),
timeRange(CURRENT, 1, HOUR).toBsonDocument()),
() -> assertEquals(
new BsonDocument("range", new BsonArray(asList(new BsonInt64(1), new BsonString(UNBOUNDED.value()))))
.append("unit", new BsonString("month")),
timeRange(1, MONTH, UNBOUNDED).toBsonDocument()));
assertAll(
() -> assertThrows(IllegalArgumentException.class, () -> range(1, -1)),
() -> assertThrows(IllegalArgumentException.class, () -> range(null, 1)),
() -> assertThrows(IllegalArgumentException.class, () -> range(null, 0.1)),
() -> assertThrows(IllegalArgumentException.class, () -> range((Bound) null, Decimal128.POSITIVE_ZERO)),
() -> assertThrows(IllegalArgumentException.class, () -> range(1, null)),
() -> assertThrows(IllegalArgumentException.class, () -> range(0.1, null)),
() -> assertThrows(IllegalArgumentException.class, () -> range(Decimal128.POSITIVE_ZERO, (Bound) null)),
() -> assertThrows(IllegalArgumentException.class, () -> range((Decimal128) null, Decimal128.POSITIVE_ZERO)),
() -> assertThrows(IllegalArgumentException.class, () -> range(Decimal128.POSITIVE_ZERO, (Decimal128) null)),
() -> assertThrows(IllegalArgumentException.class, () -> range((Decimal128) null, (Decimal128) null)),
() -> assertThrows(IllegalArgumentException.class, () -> timeRange(1, -1, MongoTimeUnit.DAY)),
() -> assertThrows(IllegalArgumentException.class, () -> timeRange(1, 2, null)));
}
}
| 3,702 |
678 | /* Cycript - The Truly Universal Scripting Language
* Copyright (C) 2009-2016 <NAME> (saurik)
* Copyright (C) 2016 NowSecure <<EMAIL>>
*/
/* GNU Affero General Public License, Version 3 {{{ */
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
**/
/* }}} */
#include "Driver.hpp"
#include "JavaScript.hpp"
#include "Syntax.hpp"
#include <sstream>
#include <node_api.h>
namespace cynode {
class Binding {
public:
static void Dispose(void *user_data) {
CYDestroyContext();
}
static napi_value Attach(napi_env env, napi_callback_info info) {
napi_value argv[3];
size_t argc = 3;
napi_get_cb_info(env, info, &argc, argv, NULL, NULL);
if (argc != 3) {
napi_throw_error(env, "EINVAL", "Missing one or more arguments");
return NULL;
}
CYPool pool;
const char *device_id;
if (!GetOptionalStringArg(env, pool, argv[0], &device_id))
return NULL;
const char *host;
if (!GetOptionalStringArg(env, pool, argv[1], &host))
return NULL;
const char *target;
if (!GetOptionalStringArg(env, pool, argv[2], &target))
return NULL;
try {
CYAttach(device_id, host, target);
} catch (const CYException &error) {
napi_throw_error(env, NULL, error.PoolCString(pool));
}
return NULL;
}
static napi_value Execute(napi_env env, napi_callback_info info) {
napi_value command_value;
size_t argc = 1;
napi_get_cb_info(env, info, &argc, &command_value, NULL, NULL);
if (argc != 1) {
napi_throw_error(env, "EINVAL", "Missing command value");
return NULL;
}
CYPool pool;
const char *command;
if (!GetStringArg(env, pool, command_value, &command))
return NULL;
try {
std::stringbuf stream(command);
CYDriver driver(pool, stream);
driver.strict_ = false;
if (driver.Parse() || !driver.errors_.empty()) {
for (CYDriver::Errors::const_iterator error(driver.errors_.begin()); error != driver.errors_.end(); ++error) {
auto message(error->message_);
napi_throw_error(env, "EINVAL", message.c_str());
return NULL;
}
napi_throw_error(env, "EINVAL", "Invalid code");
return NULL;
}
if (driver.script_ == NULL) {
napi_throw_error(env, "EINVAL", "Invalid code");
return NULL;
}
std::stringbuf str;
CYOptions options;
CYOutput out(str, options);
out.pretty_ = false;
driver.Replace(options);
out << *driver.script_;
auto code(str.str());
auto json(CYExecute(pool, CYUTF8String(code.c_str(), code.size())));
napi_value result_value;
if (json != NULL)
napi_create_string_utf8(env, json, NAPI_AUTO_LENGTH, &result_value);
else
napi_get_null(env, &result_value);
return result_value;
} catch (const CYException &error) {
napi_throw_error(env, NULL, error.PoolCString(pool));
return NULL;
}
}
private:
static bool GetStringArg(napi_env env, CYPool &pool, napi_value value, const char **result) {
if (!GetOptionalStringArg(env, pool, value, result))
return false;
if (*result == NULL) {
napi_throw_type_error(env, "EINVAL", "Expected a string");
return false;
}
return true;
}
static bool GetOptionalStringArg(napi_env env, CYPool &pool, napi_value value, const char **result) {
size_t size;
if (napi_get_value_string_utf8(env, value, NULL, 0, &size) != napi_ok) {
napi_value null_value;
napi_get_null(env, &null_value);
bool is_null = false;
napi_strict_equals(env, value, null_value, &is_null);
if (is_null) {
*result = NULL;
return true;
}
napi_throw_type_error(env, "EINVAL", "Expected a string");
return false;
}
size += 1;
char *str = pool.malloc<char>(size);
napi_get_value_string_utf8(env, value, str, size, &size);
*result = str;
return true;
}
};
NAPI_MODULE_INIT() {
napi_property_descriptor desc[] = {
{"attach", NULL, Binding::Attach, NULL, NULL, NULL, napi_default, NULL},
{"execute", NULL, Binding::Execute, NULL, NULL, NULL, napi_default, NULL},
};
if (napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc) != napi_ok)
return NULL;
napi_add_env_cleanup_hook(env, Binding::Dispose, NULL);
return exports;
}
}
| 2,569 |
569 | /*
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.template.soy.passes;
import static com.google.common.collect.ImmutableList.toImmutableList;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.template.soy.base.internal.IdGenerator;
import com.google.template.soy.error.ErrorReporter;
import com.google.template.soy.error.SoyErrorKind;
import com.google.template.soy.exprtree.ExprNode;
import com.google.template.soy.exprtree.FunctionNode;
import com.google.template.soy.logging.LoggingFunction;
import com.google.template.soy.shared.internal.BuiltinFunction;
import com.google.template.soy.soytree.CallBasicNode;
import com.google.template.soy.soytree.HtmlCloseTagNode;
import com.google.template.soy.soytree.HtmlOpenTagNode;
import com.google.template.soy.soytree.MsgFallbackGroupNode;
import com.google.template.soy.soytree.MsgNode;
import com.google.template.soy.soytree.PrintNode;
import com.google.template.soy.soytree.SoyFileNode;
import com.google.template.soy.soytree.SoyNode;
import com.google.template.soy.soytree.SoyNode.ExprHolderNode;
import com.google.template.soy.soytree.SoyNode.MsgBlockNode;
import com.google.template.soy.soytree.SoyNode.MsgSubstUnitNode;
import com.google.template.soy.soytree.SoyNode.ParentSoyNode;
import com.google.template.soy.soytree.SoyNode.StandaloneNode;
import com.google.template.soy.soytree.SoyTreeUtils;
import com.google.template.soy.soytree.TemplateNode;
import com.google.template.soy.soytree.VeLogNode;
import com.google.template.soy.types.BoolType;
import com.google.template.soy.types.SoyType;
import com.google.template.soy.types.SoyType.Kind;
import com.google.template.soy.types.SoyTypeRegistry;
import com.google.template.soy.types.SoyTypes;
import com.google.template.soy.types.VeType;
import java.util.List;
import java.util.Objects;
/**
* Validates uses of the {@code velog} command and {@code ve_data} expression.
*
* <p>Must run after:
*
* <ul>
* <li>VeRewritePass since that rewrites VE syntactic sugar
* <li>ResolveTypesPass since we rely on type resolution data
* <li>ResolveFunctions pass since we need to validate the use of {@link LoggingFunction}
* invocations
* <li>VeLogRewritePass since that rewrites more VE syntactic sugar
* </ul>
*/
final class VeLogValidationPass implements CompilerFileSetPass {
private static final SoyErrorKind UNEXPECTED_DATA =
SoyErrorKind.of(
"Unexpected data argument. The VE is type ''{0}'' which means there cannot be any data. "
+ "The data is typed ''{1}'' and must match with the VE.");
private static final SoyErrorKind WRONG_TYPE =
SoyErrorKind.of("Expected an expression of type ''{0}'', instead got ''{1}''.");
private static final SoyErrorKind LOGONLY_DISALLOWED_IN_MSG =
SoyErrorKind.of(
"The logonly attribute may not be set on '''{velog}''' nodes in '''{msg}''' context. "
+ "Consider moving the logonly content into another template and calling it, or "
+ "refactoring your '''{msg}''' into multiple distinct messages.");
private static final SoyErrorKind REQUIRE_STRICTHTML =
SoyErrorKind.of(
"The '{'velog ...'}' command can only be used in templates with stricthtml=\"true\".");
private static final SoyErrorKind INVALID_LOGGING_FUNCTION_LOCATION =
SoyErrorKind.of(
"The logging function ''{0}'' can only be evaluated in a print command that is the "
+ "only direct child of an html attribute value.{1}",
SoyErrorKind.StyleAllowance.NO_PUNCTUATION);
private static final SoyErrorKind NO_PRINT_DIRECTIVES =
SoyErrorKind.of(
"The logging function ''{0}'' can only be evaluated in a print command with no print "
+ "directives.");
private static final SoyErrorKind UNKNOWN_PROTO =
SoyErrorKind.of("Unknown proto type ''{0}'' configured for use with this VE.");
private static final SoyErrorKind BAD_DATA_TYPE =
SoyErrorKind.of(
"Illegal VE metadata type ''{0}'' for this VE. The metadata must be a proto.");
private static final SoyErrorKind INVALID_VE =
SoyErrorKind.of(
"The velog command requires a VE identifier, an expression of the ''ve'' type or an "
+ "expression of the ''ve_data'' type. Found an expression of type ''{0}''.");
private static final SoyErrorKind VE_UNION_WITH_DATA =
SoyErrorKind.of(
"It is illegal to set the data parameter if the ve type is a union (''{0}'').");
private static final SoyErrorKind LOG_WITHIN_MESSAGE_REQUIRES_ELEMENT =
SoyErrorKind.of("'{velog'} within '{msg'} must directly wrap an HTML element.");
private final ErrorReporter reporter;
private final SoyTypeRegistry typeRegistry;
VeLogValidationPass(ErrorReporter reporter, SoyTypeRegistry typeRegistry) {
this.reporter = reporter;
this.typeRegistry = typeRegistry;
}
@Override
public Result run(ImmutableList<SoyFileNode> sourceFiles, IdGenerator idGenerator) {
for (SoyFileNode file : sourceFiles) {
for (TemplateNode template : file.getTemplates()) {
run(template);
}
}
return Result.CONTINUE;
}
private void run(TemplateNode template) {
SoyTreeUtils.allFunctionInvocations(template, BuiltinFunction.VE_DATA)
.forEach(this::validateVeDataFunctionNode);
for (VeLogNode node : SoyTreeUtils.getAllNodesOfType(template, VeLogNode.class)) {
if (template.isStrictHtml()) {
validateVelogElementStructure(node);
validateVeLogNode(node);
} else {
reporter.report(node.getVeDataExpression().getSourceLocation(), REQUIRE_STRICTHTML);
}
}
// We need to validate logging functions. The rules are
// 1. logging functions can only be the direct children of PrintNodes
// 2. the print nodes must be direct children of HtmlAttributeValueNodes
//
// However, because there is no way (currently) to navigate from an ExprNode to the SoyNode
// which owns it, we need to do this multi-phase traversal to ensure the correct parenting
// hierarchy.
SoyTreeUtils.visitExprNodesWithHolder(
template,
FunctionNode.class,
(holderNode, function) -> {
if (function.isResolved() && function.getSoyFunction() instanceof LoggingFunction) {
validateLoggingFunction(holderNode, function);
}
});
}
private void validateLoggingFunction(ExprHolderNode holderNode, FunctionNode function) {
if (function.getParent().getKind() != ExprNode.Kind.EXPR_ROOT_NODE) {
reporter.report(
function.getSourceLocation(),
INVALID_LOGGING_FUNCTION_LOCATION,
function.getStaticFunctionName(),
" It is part of complex expression.");
return;
}
if (holderNode.getKind() != SoyNode.Kind.PRINT_NODE) {
reporter.report(
function.getSourceLocation(),
INVALID_LOGGING_FUNCTION_LOCATION,
function.getStaticFunctionName(),
" It isn't in a print node.");
return;
}
PrintNode printNode = (PrintNode) holderNode;
if (printNode.numChildren() != 0) {
reporter.report(
printNode.getChild(0).getSourceLocation(),
NO_PRINT_DIRECTIVES,
function.getStaticFunctionName());
}
if (holderNode.getParent().getKind() != SoyNode.Kind.HTML_ATTRIBUTE_VALUE_NODE) {
reporter.report(
function.getSourceLocation(),
INVALID_LOGGING_FUNCTION_LOCATION,
function.getStaticFunctionName(),
" It isn't the direct child of an attribute value.");
return;
}
if (holderNode.getParent().numChildren() > 1) {
reporter.report(
function.getSourceLocation(),
INVALID_LOGGING_FUNCTION_LOCATION,
function.getStaticFunctionName(),
" It has sibling nodes in the attribute value.");
return;
}
}
private void validateVelogElementStructure(VeLogNode node) {
List<StandaloneNode> children =
node.getChildren().stream()
.filter(child -> !SoyElementPass.ALLOWED_CHILD_NODES.contains(child.getKind()))
.collect(toImmutableList());
// TODO(b/133428199): Support {velog} around calls in messages.
if (node.getNearestAncestor(MsgFallbackGroupNode.class) == null
&& children.size() == 1
&& Iterables.getLast(children) instanceof CallBasicNode) {
node.setNeedsSyntheticVelogNode(true);
return;
}
// If {velog} is empty, or does not have a single root, we must output a synthetic VE log node
// on the client.
if (node.numChildren() == 0) {
node.setNeedsSyntheticVelogNode(true);
return;
}
HtmlOpenTagNode firstTag = node.getOpenTagNode();
// If the first child of {velog} is not an open tag, output a synthetic VE log node.
if (firstTag == null) {
node.setNeedsSyntheticVelogNode(true);
return;
}
if (!firstTag.getTagName().isStatic() && !firstTag.getTagName().isLegacyDynamicTagName()) {
node.setNeedsSyntheticVelogNode(true);
return;
}
// If the first child is self-closing or is a void tag, output a synthetic VE log node if we see
// anything after it. If it is the only thing, we don't need a synthetic VE log node.
if (firstTag.isSelfClosing() || firstTag.getTagName().isDefinitelyVoid()) {
if (node.numChildren() > 1) {
node.setNeedsSyntheticVelogNode(true);
}
return;
}
HtmlCloseTagNode lastTag = node.getCloseTagNode();
// If the last child is not a close tag, output a synthetic VE log node.
if (lastTag == null) {
node.setNeedsSyntheticVelogNode(true);
return;
}
// This check make sures that there is exactly one top-level element -- the last tag must
// close the first tag within {velog} command. Otherwise, we need to output a synthetic VE log
// node.
if (lastTag.getTaggedPairs().size() != 1
|| !Objects.equals(lastTag.getTaggedPairs().get(0), firstTag)) {
node.setNeedsSyntheticVelogNode(true);
}
}
/** Type checks the VE and logonly expressions. */
private void validateVeLogNode(VeLogNode node) {
if (node.getVeDataExpression().getRoot().getType().getKind() != Kind.VE_DATA) {
reporter.report(
node.getVeDataExpression().getSourceLocation(),
INVALID_VE,
node.getVeDataExpression().getRoot().getType());
}
if (node.needsSyntheticVelogNode() && isInMsgNode(node)) {
reporter.report(node.getSourceLocation(), LOG_WITHIN_MESSAGE_REQUIRES_ELEMENT);
}
if (node.getLogonlyExpression() != null) {
// check to see if it is in a msg node. logonly is disallowed in msg nodes because we don't
// have an implementation strategy.
if (isInMsgNode(node)) {
reporter.report(node.getLogonlyExpression().getSourceLocation(), LOGONLY_DISALLOWED_IN_MSG);
}
SoyType type = node.getLogonlyExpression().getType();
if (type.getKind() != Kind.BOOL) {
reporter.report(
node.getLogonlyExpression().getSourceLocation(),
WRONG_TYPE,
BoolType.getInstance(),
type);
}
}
}
private void validateVeDataFunctionNode(FunctionNode node) {
if (node.numChildren() < 1 || node.numChildren() > 2) {
return; // an error has already been reported
}
ExprNode veExpr = node.getChild(0);
ExprNode dataExpr = node.getChild(1);
if (veExpr.getType().getKind() == Kind.VE) {
if (dataExpr.getType().getKind() != Kind.NULL) {
VeType veType = (VeType) veExpr.getType();
SoyType dataType = dataExpr.getType();
if (!veType.getDataType().isPresent()) {
reporter.report(dataExpr.getSourceLocation(), UNEXPECTED_DATA, veType, dataType);
} else {
SoyType veDataType =
typeRegistry.getProtoRegistry().getProtoType(veType.getDataType().get());
if (veDataType == null) {
reporter.report(veExpr.getSourceLocation(), UNKNOWN_PROTO, veType.getDataType().get());
} else if (veDataType.getKind() != Kind.PROTO) {
reporter.report(veExpr.getSourceLocation(), BAD_DATA_TYPE, veDataType);
} else if (!dataType.equals(veDataType)) {
reporter.report(
dataExpr.getSourceLocation(), WRONG_TYPE, veType.getDataType().get(), dataType);
}
}
}
} else if (SoyTypes.isKindOrUnionOfKind(veExpr.getType(), Kind.VE)) {
// This is a union of VE types with different data types, so it's okay to wrap in ve_data as
// long as ve_data's data parameter is null.
if (dataExpr.getType().getKind() != Kind.NULL) {
reporter.report(dataExpr.getSourceLocation(), VE_UNION_WITH_DATA, veExpr.getType());
}
} else {
reporter.report(veExpr.getSourceLocation(), WRONG_TYPE, "ve", veExpr.getType());
}
}
private static boolean isInMsgNode(SoyNode node) {
if (node instanceof MsgNode) {
return true;
}
ParentSoyNode<?> parent = node.getParent();
if (parent instanceof MsgBlockNode || parent instanceof MsgSubstUnitNode) {
return isInMsgNode(parent);
}
return false;
}
}
| 5,265 |
571 | /**
* @file batchDistance.cpp
* @brief mex interface for cv::batchDistance
* @ingroup core
* @author Amro
* @date 2015
*/
#include "mexopencv.hpp"
using namespace std;
using namespace cv;
/**
* Main entry called from Matlab
* @param nlhs number of left-hand-side arguments
* @param plhs pointers to mxArrays in the left-hand-side
* @param nrhs number of right-hand-side arguments
* @param prhs pointers to mxArrays in the right-hand-side
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// Check the number of arguments
nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=2);
// Argument vector
vector<MxArray> rhs(prhs, prhs+nrhs);
// Option processing
int dtype = -1;
int normType = cv::NORM_L2;
int K = 0;
Mat mask;
int update = 0;
bool crosscheck = false;
for (int i=2; i<nrhs; i+=2) {
string key(rhs[i].toString());
if (key == "DType")
dtype = (rhs[i+1].isChar()) ?
ClassNameMap[rhs[i+1].toString()] : rhs[i+1].toInt();
else if (key == "NormType")
normType = NormType[rhs[i+1].toString()];
else if (key == "K")
K = rhs[i+1].toInt();
else if (key == "Mask")
mask = rhs[i+1].toMat(CV_8U);
else if (key == "Update")
update = rhs[i+1].toInt();
else if (key == "CrossCheck")
crosscheck = rhs[i+1].toBool();
else
mexErrMsgIdAndTxt("mexopencv:error",
"Unrecognized option %s", key.c_str());
}
// Process
Mat src1(rhs[0].toMat(rhs[0].isUint8() ? CV_8U : CV_32F)),
src2(rhs[1].toMat(rhs[1].isUint8() ? CV_8U : CV_32F));
Mat dst, nidx;
batchDistance(src1, src2, dst, dtype, (K>0 ? nidx : noArray()),
normType, K, mask, update, crosscheck);
plhs[0] = MxArray(dst);
if (nlhs>1)
plhs[1] = MxArray(nidx);
}
| 931 |
1,125 | // Auto-generated file. Do not edit!
// Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x8(
size_t n,
const void* input,
float* output,
const void* params)
{
assert(n != 0);
assert(n % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsign_mask = wasm_i32x4_const_splat(0x80000000);
const v128_t vexp_offset = wasm_i32x4_const_splat(0x70000000);
const v128_t vexp_scale = wasm_f32x4_const_splat(0x1.0p-112f);
const v128_t vmagic_mask = wasm_i32x4_const_splat(0x3F000000);
const v128_t vmagic_bias = wasm_f32x4_const_splat(0.5f);
const v128_t vdenorm_cutoff = wasm_i32x4_const_splat(0x04000000);
const uint16_t* i = (const uint16_t*) input;
for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
const v128_t vh = wasm_v128_load(i);
i += 8;
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_mask), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_mask), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
wasm_v128_store(output, vf_lo);
wasm_v128_store(output + 4, vf_hi);
output += 8;
}
if XNN_UNLIKELY(n != 0) {
assert(n >= 1 * sizeof(float));
assert(n <= 7 * sizeof(float));
const v128_t vh = wasm_v128_load(i);
const v128_t vzero = wasm_i16x8_const_splat(0);
const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0, 8, 1, 9, 2, 10, 3, 11);
const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_mask), vmagic_bias);
const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_mask), vmagic_bias);
const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
if (n & (4 * sizeof(float))) {
wasm_v128_store(output, vf);
output += 4;
const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
}
if (n & (2 * sizeof(float))) {
*((double*) output) = wasm_f64x2_extract_lane(vf, 0);
output += 2;
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (n & (1 * sizeof(float))) {
*((float*) output) = wasm_f32x4_extract_lane(vf, 0);
}
}
}
| 2,203 |
2,032 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from ...context import init_test_context
init_test_context()
from zvt.consts import SAMPLE_STOCK_CODES
from zvt.recorders.eastmoney.holder.eastmoney_top_ten_holder_recorder import TopTenHolderRecorder
from zvt.recorders.eastmoney.holder.eastmoney_top_ten_tradable_holder_recorder import TopTenTradableHolderRecorder
def test_top_ten_holder_recorder():
recorder = TopTenHolderRecorder(codes=SAMPLE_STOCK_CODES)
try:
recorder.run()
except:
assert False
def test_top_ten_tradable_holder_recorder():
recorder = TopTenTradableHolderRecorder(codes=SAMPLE_STOCK_CODES)
try:
recorder.run()
except:
assert False
| 281 |
988 | <gh_stars>100-1000
/*
* Copyright (c) 2018 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.codinjutsu.tools.mongo.utils;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class StringUtilsTest {
@Test
public void abbreviateInCenter() {
String value = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz";
assertThat(StringUtils.abbreviateInCenter(value, 40)).isEqualTo("abcdefghijklmnopq...ghijklmnopqrstuvwxyz");
}
@Test
public void parseNumber() {
assertThat(StringUtils.parseNumber("1")).isEqualTo(1);
assertThat(StringUtils.parseNumber("1.000000000001")).isEqualTo(1.000000000001d);
assertThat(StringUtils.parseNumber("1000000000000000")).isEqualTo(1000000000000000L);
}
}
| 471 |
521 | <filename>src/elle/reactor/modules/dokany/dokan/dokanc.h<gh_stars>100-1000
/*
Dokan : user-mode file system library for Windows
Copyright (C) 2015 - 2016 <NAME>. <<EMAIL>> and <NAME>. <<EMAIL>>
Copyright (C) 2007 - 2011 <NAME> <<EMAIL>>
http://dokan-dev.github.io
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef DOKANC_H_
#define DOKANC_H_
#include "dokan.h"
#include <malloc.h>
#include <sec_api/stdio_s.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DOKAN_GLOBAL_DEVICE_NAME L"\\\\.\\Dokan_" DOKAN_MAJOR_API_VERSION
#define DOKAN_DRIVER_SERVICE L"Dokan" DOKAN_MAJOR_API_VERSION
#define DOKAN_CONTROL_OPTION_FORCE_UNMOUNT 1
#define DOKAN_CONTROL_SUCCESS 1
#define DOKAN_CONTROL_FAIL 0
#define DOKAN_SERVICE_START 1
#define DOKAN_SERVICE_STOP 2
#define DOKAN_SERVICE_DELETE 3
#define DOKAN_KEEPALIVE_TIME 3000 // in miliseconds
#define DOKAN_MAX_THREAD 15
// DokanOptions->DebugMode is ON?
extern BOOL g_DebugMode;
// DokanOptions->UseStdErr is ON?
extern BOOL g_UseStdErr;
static VOID DokanDbgPrint(LPCSTR format, ...) {
const char *outputString;
char *buffer;
size_t length;
va_list argp;
va_start(argp, format);
length = _vscprintf(format, argp) + 1;
buffer = (char *)_malloca(length * sizeof(char));
if (buffer) {
vsprintf_s(buffer, length, format, argp);
outputString = buffer;
} else {
outputString = format;
}
if (g_UseStdErr)
fputs(outputString, stderr);
else
OutputDebugStringA(outputString);
if (buffer)
_freea(buffer);
va_end(argp);
if (g_UseStdErr)
fflush(stderr);
}
static VOID DokanDbgPrintW(LPCWSTR format, ...) {
const WCHAR *outputString;
WCHAR *buffer;
size_t length;
va_list argp;
va_start(argp, format);
length = _vscwprintf(format, argp) + 1;
buffer = (WCHAR *)_malloca(length * sizeof(WCHAR));
if (buffer) {
vswprintf_s(buffer, length, format, argp);
outputString = buffer;
} else {
outputString = format;
}
if (g_UseStdErr)
fputws(outputString, stderr);
else
OutputDebugStringW(outputString);
if (buffer)
_freea(buffer);
va_end(argp);
}
#define DbgPrint(format, ...) \
do { \
if (g_DebugMode) { \
DokanDbgPrint(format, ## __VA_ARGS__); \
} \
} \
while (0)
#define DbgPrintW(format, ...) \
do { \
if (g_DebugMode) { \
DokanDbgPrintW(format, ## __VA_ARGS__); \
} \
} \
while (0)
VOID DOKANAPI DokanUseStdErr(BOOL Status);
VOID DOKANAPI DokanDebugMode(BOOL Status);
BOOL DOKANAPI DokanServiceInstall(LPCWSTR ServiceName, DWORD ServiceType,
LPCWSTR ServiceFullPath);
BOOL DOKANAPI DokanServiceDelete(LPCWSTR ServiceName);
BOOL DOKANAPI DokanNetworkProviderInstall();
BOOL DOKANAPI DokanNetworkProviderUninstall();
BOOL DOKANAPI DokanSetDebugMode(ULONG Mode);
#ifdef __cplusplus
}
#endif
#endif // DOKANC_H_
| 2,100 |
6,098 | # -*- encoding: utf-8 -*-
import random
import warnings
from contextlib import contextmanager
from collections import OrderedDict, Counter, defaultdict
try:
from StringIO import StringIO # py2 (first as py2 also has io.StringIO, but only with unicode support)
except:
from io import StringIO # py3
import h2o
import numpy as np
from h2o.utils.ext_dependencies import get_matplotlib_pyplot
from h2o.exceptions import H2OValueError
def _display(object):
"""
Display the object.
:param object: An object to be displayed.
:returns: the input
"""
import matplotlib.figure
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if isinstance(object, matplotlib.figure.Figure) and matplotlib.get_backend().lower() != "agg":
plt.show()
else:
try:
import IPython.display
IPython.display.display(object)
except ImportError:
print(object)
if isinstance(object, matplotlib.figure.Figure):
plt.close(object)
print("\n")
return object
def _dont_display(object):
"""
Don't display the object
:param object: that should not be displayed
:returns: input
"""
import matplotlib.figure
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if isinstance(object, matplotlib.figure.Figure):
plt.close()
return object
# UTILS
class Header:
"""
Class representing a Header with pretty printing for IPython.
"""
def __init__(self, content, level=1):
self.content = content
self.level = level
def _repr_html_(self):
return "<h{level}>{content}</h{level}>".format(level=self.level, content=self.content)
def _repr_markdown_(self):
return "\n\n{} {}".format("#" * self.level, self.content)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __str__(self):
return self._repr_markdown_()
class Description:
"""
Class representing a Description with pretty printing for IPython.
"""
DESCRIPTIONS = dict(
leaderboard="Leaderboard shows models with their metrics. When provided with H2OAutoML object, "
"the leaderboard shows 5-fold cross-validated metrics by default (depending on the "
"H2OAutoML settings), otherwise it shows metrics computed on the frame. "
"At most 20 models are shown by default.",
leaderboard_row="Leaderboard shows models with their metrics and their predictions for a given row. "
"When provided with H2OAutoML object, the leaderboard shows 5-fold cross-validated "
"metrics by default (depending on the H2OAutoML settings), otherwise it shows "
"metrics computed on the frame. At most 20 models are shown by default.",
confusion_matrix="Confusion matrix shows a predicted class vs an actual class.",
residual_analysis="Residual Analysis plots the fitted values vs residuals on a test dataset. Ideally, "
"residuals should be randomly distributed. Patterns in this plot can indicate potential "
"problems with the model selection, e.g., using simpler model than necessary, not accounting "
"for heteroscedasticity, autocorrelation, etc. Note that if you see \"striped\" lines of "
"residuals, that is an artifact of having an integer valued (vs a real valued) "
"response variable.",
variable_importance="The variable importance plot shows the relative importance of the most "
"important variables in the model.",
varimp_heatmap="Variable importance heatmap shows variable importance across multiple models. "
"Some models in H2O return variable importance for one-hot (binary indicator) "
"encoded versions of categorical columns (e.g. Deep Learning, XGBoost). "
"In order for the variable importance of categorical columns to be compared "
"across all model types we compute a summarization of the the variable importance "
"across all one-hot encoded features and return a single variable importance for the "
"original categorical feature. By default, the models and variables are ordered by "
"their similarity.",
model_correlation_heatmap="This plot shows the correlation between the predictions of the models. "
"For classification, frequency of identical predictions is used. By default, "
"models are ordered by their similarity (as computed by hierarchical clustering). "
"Interpretable models, such as GAM, GLM, and RuleFit are highlighted using "
"red colored text.",
shap_summary="SHAP summary plot shows the contribution of the features for each instance (row of data). "
"The sum of the feature contributions and the bias term is equal to the raw prediction of "
"the model, i.e., prediction before applying inverse link function.",
pdp="Partial dependence plot (PDP) gives a graphical depiction of the marginal effect of a variable on "
"the response. The effect of a variable is measured in change in the mean response. PDP assumes "
"independence between the feature for which is the PDP computed and the rest.",
ice="An Individual Conditional Expectation (ICE) plot gives a graphical depiction of the marginal effect "
"of a variable on the response. ICE plots are similar to partial dependence plots (PDP); PDP shows the "
"average effect of a feature while ICE plot shows the effect for a single instance. This function will "
"plot the effect for each decile. In contrast to the PDP, ICE plots can provide more insight, especially "
"when there is stronger feature interaction.",
ice_row="Individual conditional expectations (ICE) plot gives a graphical depiction of the marginal "
"effect of a variable on the response for a given row. ICE plot is similar to partial "
"dependence plot (PDP), PDP shows the average effect of a feature while ICE plot shows "
"the effect for a single instance.",
shap_explain_row="SHAP explanation shows contribution of features for a given instance. The sum "
"of the feature contributions and the bias term is equal to the raw prediction of "
"the model, i.e., prediction before applying inverse link function. H2O implements "
"TreeSHAP which when the features are correlated, can increase contribution of a feature "
"that had no influence on the prediction.",
)
def __init__(self, for_what):
self.content = self.DESCRIPTIONS[for_what]
def _repr_html_(self):
return "<blockquote>{}</blockquote>".format(self.content)
def _repr_markdown_(self):
return "\n> {}".format(self.content)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __str__(self):
return self._repr_markdown_()
class H2OExplanation(OrderedDict):
def _ipython_display_(self):
from IPython.display import display
for v in self.values():
display(v)
@contextmanager
def no_progress():
"""
A context manager that temporarily blocks showing the H2O's progress bar.
Used when a multiple models are evaluated.
"""
progress = h2o.job.H2OJob.__PROGRESS_BAR__
if progress:
h2o.no_progress()
try:
yield
finally:
if progress:
h2o.show_progress()
class NumpyFrame:
"""
Simple class that very vaguely emulates Pandas DataFrame.
Main purpose is to keep parsing from the List of Lists format to numpy.
This class is meant to be used just in the explain module.
Due to that fact it encodes the factor variables similarly to R/pandas -
factors are mapped to numeric column which in turn makes it easier to plot it.
"""
def __init__(self, h2o_frame):
# type: ("NumpyFrame", Union[h2o.H2OFrame, h2o.two_dim_table.H2OTwoDimTable]) -> None
if isinstance(h2o_frame, h2o.two_dim_table.H2OTwoDimTable):
self._columns = h2o_frame.col_header
_is_numeric = np.array([type_ in ["double", "float", "long", "integer"]
for type_ in h2o_frame.col_types], dtype=bool)
_is_factor = np.array([type_ in ["string"] for type_ in h2o_frame.col_types],
dtype=bool)
df = h2o_frame.cell_values
self._factors = dict()
for col in range(len(self._columns)):
if _is_factor[col]:
levels = set(row[col] for row in df)
self._factors[self._columns[col]] = list(levels)
self._data = np.empty((len(df), len(self._columns)))
df = [self._columns] + df
elif isinstance(h2o_frame, h2o.H2OFrame):
_is_factor = np.array(h2o_frame.isfactor(), dtype=np.bool) | np.array(
h2o_frame.ischaracter(), dtype=np.bool)
_is_numeric = h2o_frame.isnumeric()
self._columns = h2o_frame.columns
self._factors = {col: h2o_frame[col].asfactor().levels()[0] for col in
np.array(h2o_frame.columns)[_is_factor]}
df = h2o_frame.as_data_frame(False)
self._data = np.empty((h2o_frame.nrow, h2o_frame.ncol))
else:
raise RuntimeError("Unexpected type of \"h2o_frame\": {}".format(type(h2o_frame)))
for idx, col in enumerate(df[0]):
if _is_factor[idx]:
convertor = self.from_factor_to_num(col)
self._data[:, idx] = np.array(
[float(convertor.get(
row[idx] if not (len(row) == 0 or row[idx] == "") else "nan", "nan"))
for row in df[1:]], dtype=np.float32)
elif _is_numeric[idx]:
self._data[:, idx] = np.array(
[float(row[idx] if not (len(row) == 0 or row[idx] == "") else "nan") for row in
df[1:]],
dtype=np.float32)
else:
try:
self._data[:, idx] = np.array([row[idx] if not (len(row) == 0 or row[idx] == "")
else "nan" for row in df[1:]],
dtype=np.datetime64)
except Exception:
raise RuntimeError("Unexpected type of column {}!".format(col))
def isfactor(self, column):
# type: ("NumpyFrame", str) -> bool
"""
Is column a factor/categorical column?
:param column: string containing the column name
:returns: boolean
"""
return column in self._factors or self._get_column_and_factor(column)[0] in self._factors
def from_factor_to_num(self, column):
# type: ("NumpyFrame", str) -> Dict[str, int]
"""
Get a dictionary mapping a factor to its numerical representation in the NumpyFrame
:param column: string containing the column name
:returns: dictionary
"""
fact = self._factors[column]
return dict(zip(fact, range(len(fact))))
def from_num_to_factor(self, column):
# type: ("NumpyFrame", str) -> Dict[int, str]
"""
Get a dictionary mapping numerical representation of a factor to the category names.
:param column: string containing the column name
:returns: dictionary
"""
fact = self._factors[column]
return dict(zip(range(len(fact)), fact))
def _get_column_and_factor(self, column):
# type: ("NumpyFrame", str) -> Tuple[str, Optional[float]]
"""
Get a column name and possibly a factor name.
This is used to get proper column name and factor name when provided
with the output of some algos such as XGBoost which encode factor
columns to "column_name.category_name".
:param column: string containing the column name
:returns: tuple (column_name: str, factor_name: Optional[str])
"""
if column in self.columns:
return column, None
if column.endswith(".") and column[:-1] in self.columns:
return column[:-1], None
col_parts = column.split(".")
for i in range(1, len(col_parts) + 1):
if ".".join(col_parts[:i]) in self.columns:
column = ".".join(col_parts[:i])
factor_name = ".".join(col_parts[i:])
if factor_name == "missing(NA)":
factor = float("nan")
else:
factor = self.from_factor_to_num(column)[factor_name]
return column, factor
def __getitem__(self, indexer):
# type: ("NumpyFrame", Union[str, Tuple[Union[int,List[int]], str]]) -> np.ndarray
"""
A low level way to get a column or a row within a column.
NOTE: Returns numeric representation even for factors.
:param indexer: string for the whole column or a tuple (row_index, column_name)
:returns: a column or a row within a column
"""
row = slice(None)
if isinstance(indexer, tuple):
row = indexer[0]
column = indexer[1]
else:
column = indexer
if column not in self.columns:
column, factor = self._get_column_and_factor(column)
if factor is not None:
if factor != factor:
return np.asarray(np.isnan(self._data[row, self.columns.index(column)]),
dtype=np.float32)
return np.asarray(self._data[row, self.columns.index(column)] == factor,
dtype=np.float32)
return self._data[row, self.columns.index(column)]
def get(self, column, as_factor=True):
# type: ("NumpyFrame", str, bool) -> np.ndarray
"""
Get a column.
:param column: string containing the column name
:param as_factor: if True (default), factor column will contain string
representation; otherwise numerical representation
:returns: A column represented as numpy ndarray
"""
if as_factor and self.isfactor(column):
column, factor_idx = self._get_column_and_factor(column)
if factor_idx is not None:
return self[column] == factor_idx
convertor = self.from_num_to_factor(column)
return np.array([convertor.get(row, "") for row in self[column]])
return self[column]
def levels(self, column):
# type: ("NumpyFrame", str) -> List[str]
"""
Get levels/categories of a factor column.
:param column: a string containing the column name
:returns: list of levels
"""
return self._factors.get(column, [])
def nlevels(self, column):
# type: ("NumpyFrame", str) -> int
"""
Get number of levels/categories of a factor column.
:param column: string containing the column name
:returns: a number of levels within a factor
"""
return len(self.levels(column))
@property
def columns(self):
# type: ("NumpyFrame") -> List[str]
"""
Column within the NumpyFrame.
:returns: list of columns
"""
return self._columns
@property
def nrow(self):
# type: ("NumpyFrame") -> int
"""
Number of rows.
:returns: number of rows
"""
return self._data.shape[0]
@property
def ncol(self):
# type: ("NumpyFrame") -> int
"""
Number of columns.
:returns: number of columns
"""
return self._data.shape[1]
@property
def shape(self):
# type: ("NumpyFrame") -> Tuple[int, int]
"""
Shape of the frame.
:returns: tuple (number of rows, number of columns)
"""
return self._data.shape
def sum(self, axis=0):
# type: ("NumpyFrame", int) -> np.ndarray
"""
Calculate the sum of the NumpyFrame elements over the given axis.
WARNING: This method doesn't care if the column is categorical or numeric. Use with care.
:param axis: Axis along which a sum is performed.
:returns: numpy.ndarray with shape same as NumpyFrame with the `axis` removed
"""
return self._data.sum(axis=axis)
def mean(self, axis=0):
# type: ("NumpyFrame", int) -> np.ndarray
"""
Calculate the mean of the NumpyFrame elements over the given axis.
WARNING: This method doesn't care if the column is categorical or numeric. Use with care.
:param axis: Axis along which a mean is performed.
:returns: numpy.ndarray with shape same as NumpyFrame with the `axis` removed
"""
return self._data.mean(axis=axis)
def items(self, with_categorical_names=False):
# type: ("NumpyFrame", bool) -> Generator[Tuple[str, np.ndarray], None, None]
"""
Make a generator that yield column name and ndarray with values.
:params with_categorical_names: if True, factor columns are returned as string columns;
otherwise numerical
:returns: generator to be iterated upon
"""
for col in self.columns:
yield col, self.get(col, with_categorical_names)
def _get_domain_mapping(model):
"""
Get a mapping between columns and their domains.
:return: Dictionary containing a mapping column -> factors
"""
output = model._model_json["output"]
return dict(zip(output["names"], output["domains"]))
def _shorten_model_ids(model_ids):
import re
regexp = re.compile(r"(.*)_AutoML_[\d_]+((?:_.*)?)$") # nested group needed for Py2
shortened_model_ids = [regexp.sub(r"\1\2", model_id) for model_id in model_ids]
if len(set(shortened_model_ids)) == len(set(model_ids)):
return shortened_model_ids
return model_ids
def _get_algorithm(model, treat_xrt_as_algorithm=False):
# type: (Union[str, h2o.model.ModelBase], bool) -> str
"""
Get algorithm type. Use model id to infer it if possible.
:param model: model or a model_id
:param treat_xrt_as_algorithm: boolean used for best_of_family
:returns: string containing algorithm name
"""
if not isinstance(model, h2o.model.ModelBase):
import re
algo = re.search("^(DeepLearning|DRF|GAM|GBM|GLM|NaiveBayes|StackedEnsemble|RuleFit|XGBoost|XRT)(?=_)", model)
if algo is not None:
algo = algo.group(0).lower()
if algo == "xrt" and not treat_xrt_as_algorithm:
algo = "drf"
return algo
else:
model = h2o.get_model(model)
if treat_xrt_as_algorithm and model.algo == "drf":
if model.actual_params.get("histogram_type") == "Random":
return "xrt"
return model.algo
def _first_of_family(models, all_stackedensembles=False):
# type: (Union[str, h2o.model.ModelBase], bool) -> Union[str, h2o.model.ModelBase]
"""
Get first of family models
:param models: models or model ids
:param all_stackedensembles: if True return all stacked ensembles
:returns: list of models or model ids (the same type as on input)
"""
selected_models = []
included_families = set()
for model in models:
family = _get_algorithm(model, treat_xrt_as_algorithm=True)
if family not in included_families or (all_stackedensembles and "stackedensemble" == family):
selected_models.append(model)
included_families.add(family)
return selected_models
def _density(xs, bins=100):
# type: (np.ndarray, int) -> np.ndarray
"""
Make an approximate density estimation by blurring a histogram (used for SHAP summary plot).
:param xs: numpy vector
:param bins: number of bins
:returns: density values
"""
hist = list(np.histogram(xs, bins=bins))
# gaussian blur
hist[0] = np.convolve(hist[0],
[0.00598, 0.060626, 0.241843,
0.383103,
0.241843, 0.060626, 0.00598])[3:-3]
hist[0] = hist[0] / np.max(hist[0])
hist[1] = (hist[1][:-1] + hist[1][1:]) / 2
return np.interp(xs, hist[1], hist[0])
def _uniformize(data, col_name):
# type: (NumpyFrame, str) -> np.ndarray
"""
Convert to quantiles.
:param data: NumpyFrame
:param col_name: string containing a column name
:returns: quantile values of individual points in the column
"""
if col_name not in data.columns or data.isfactor(col_name):
res = data[col_name]
diff = (np.nanmax(res) - np.nanmin(res))
if diff <= 0 or np.isnan(diff):
return res
res = (res - np.nanmin(res)) / diff
return res
col = data[col_name]
xs = np.linspace(0, 1, 100)
quantiles = np.nanquantile(col, xs)
res = np.interp(col, quantiles, xs)
res = (res - np.nanmin(res)) / (np.nanmax(res) - np.nanmin(res))
return res
# PLOTS
def shap_summary_plot(
model, # type: h2o.model.ModelBase
frame, # type: h2o.H2OFrame
columns=None, # type: Optional[Union[List[int], List[str]]]
top_n_features=20, # type: int
samples=1000, # type: int
colorize_factors=True, # type: bool
alpha=1, # type: float
colormap=None, # type: str
figsize=(12, 12), # type: Union[Tuple[float], List[float]]
jitter=0.35 # type: float
): # type: (...) -> plt.Figure
"""
SHAP summary plot
SHAP summary plot shows contribution of features for each instance. The sum
of the feature contributions and the bias term is equal to the raw prediction
of the model, i.e., prediction before applying inverse link function.
:param model: h2o tree model, such as DRF, XRT, GBM, XGBoost
:param frame: H2OFrame
:param columns: either a list of columns or column indices to show. If specified
parameter top_n_features will be ignored.
:param top_n_features: a number of columns to pick using variable importance (where applicable).
:param samples: maximum number of observations to use; if lower than number of rows in the
frame, take a random sample
:param colorize_factors: if True, use colors from the colormap to colorize the factors;
otherwise all levels will have same color
:param alpha: transparency of the points
:param colormap: colormap to use instead of the default blue to red colormap
:param figsize: figure size; passed directly to matplotlib
:param jitter: amount of jitter used to show the point density
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create SHAP summary plot
>>> gbm.shap_summary_plot(test)
"""
import matplotlib.colors
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
blue_to_red = matplotlib.colors.LinearSegmentedColormap.from_list("blue_to_red",
["#00AAEE", "#FF1166"])
if colormap is None:
colormap = blue_to_red
else:
colormap = plt.get_cmap(colormap)
if top_n_features < 0:
top_n_features = float("inf")
# to prevent problems with data sorted in some logical way
# (overplotting with latest result which might have different values
# then the rest of the data in a given region)
permutation = list(range(frame.nrow))
random.shuffle(permutation)
if samples is not None:
permutation = sorted(permutation[:min(len(permutation), samples)])
frame = frame[permutation, :]
permutation = list(range(frame.nrow))
random.shuffle(permutation)
with no_progress():
contributions = NumpyFrame(model.predict_contributions(frame))
frame = NumpyFrame(frame)
contribution_names = contributions.columns
feature_importance = sorted(
{k: np.abs(v).mean() for k, v in contributions.items() if "BiasTerm" != k}.items(),
key=lambda kv: kv[1])
if columns is None:
top_n = min(top_n_features, len(feature_importance))
top_n_features = [fi[0] for fi in feature_importance[-top_n:]]
else:
picked_cols = []
columns = [frame.columns[col] if isinstance(col, int) else col for col in columns]
for feature in columns:
if feature in contribution_names:
picked_cols.append(feature)
else:
for contrib in contribution_names:
if contrib.startswith(feature + "."):
picked_cols.append(contrib)
top_n_features = picked_cols
plt.figure(figsize=figsize)
plt.grid(True)
plt.axvline(0, c="black")
for i in range(len(top_n_features)):
col_name = top_n_features[i]
col = contributions[permutation, col_name]
dens = _density(col)
plt.scatter(
col,
i + dens * np.random.uniform(-jitter, jitter, size=len(col)),
alpha=alpha,
c=_uniformize(frame, col_name)[permutation]
if colorize_factors or not frame.isfactor(col_name)
else np.full(frame.nrow, 0.5),
cmap=colormap
)
plt.clim(0, 1)
cbar = plt.colorbar()
cbar.set_label('Normalized feature value', rotation=270)
cbar.ax.get_yaxis().labelpad = 15
plt.yticks(range(len(top_n_features)), top_n_features)
plt.xlabel("SHAP value")
plt.ylabel("Feature")
plt.title("SHAP Summary plot for \"{}\"".format(model.model_id))
plt.tight_layout()
fig = plt.gcf()
return fig
def shap_explain_row_plot(
model, # type: h2o.model.ModelBase
frame, # type: h2o.H2OFrame
row_index, # type: int
columns=None, # type: Optional[Union[List[int], List[str]]]
top_n_features=10, # type: int
figsize=(16, 9), # type: Union[List[float], Tuple[float]]
plot_type="barplot", # type: str
contribution_type="both" # type: str
): # type: (...) -> plt.Figure
"""
SHAP local explanation
SHAP explanation shows contribution of features for a given instance. The sum
of the feature contributions and the bias term is equal to the raw prediction
of the model, i.e., prediction before applying inverse link function. H2O implements
TreeSHAP which when the features are correlated, can increase contribution of a feature
that had no influence on the prediction.
:param model: h2o tree model, such as DRF, XRT, GBM, XGBoost
:param frame: H2OFrame
:param row_index: row index of the instance to inspect
:param columns: either a list of columns or column indices to show. If specified
parameter top_n_features will be ignored.
:param top_n_features: a number of columns to pick using variable importance (where applicable).
When plot_type="barplot", then top_n_features will be chosen for each contribution_type.
:param figsize: figure size; passed directly to matplotlib
:param plot_type: either "barplot" or "breakdown"
:param contribution_type: One of "positive", "negative", or "both".
Used only for plot_type="barplot".
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create SHAP row explanation plot
>>> gbm.shap_explain_row_plot(test, row_index=0)
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if top_n_features < 0:
top_n_features = float("inf")
row = frame[row_index, :]
with no_progress():
contributions = NumpyFrame(model.predict_contributions(row))
contribution_names = contributions.columns
prediction = float(contributions.sum(axis=1))
bias = float(contributions["BiasTerm"])
contributions = sorted(filter(lambda pair: pair[0] != "BiasTerm", contributions.items()),
key=lambda pair: -abs(pair[1]))
if plot_type == "barplot":
with no_progress():
prediction = model.predict(row)[0, "predict"]
row = NumpyFrame(row)
if contribution_type == "both":
contribution_type = ["positive", "negative"]
else:
contribution_type = [contribution_type]
if columns is None:
picked_features = []
if "positive" in contribution_type:
positive_features = sorted(filter(lambda pair: pair[1] >= 0, contributions),
key=lambda pair: pair[1])
picked_features.extend(positive_features[-min(top_n_features, len(positive_features)):])
if "negative" in contribution_type:
negative_features = sorted(filter(lambda pair: pair[1] < 0, contributions),
key=lambda pair: pair[1])
picked_features.extend(negative_features[:min(top_n_features, len(negative_features))])
else:
columns = [frame.columns[col] if isinstance(col, int) else col for col in columns]
picked_cols = []
for feature in columns:
if feature in contribution_names:
picked_cols.append(feature)
else:
for contrib in contribution_names:
if contrib.startswith(feature + "."):
picked_cols.append(contrib)
picked_features = [pair for pair in contributions if pair[0] in picked_cols]
picked_features = sorted(picked_features, key=lambda pair: pair[1])
if len(picked_features) < len(contributions):
contribution_subset_note = " using {} out of {} contributions".format(
len(picked_features), len(contributions))
else:
contribution_subset_note = ""
contributions = dict(
feature=np.array(
["{}={}".format(pair[0], str(row.get(pair[0])[0])) for pair in picked_features]),
value=np.array([pair[1][0] for pair in picked_features])
)
plt.figure(figsize=figsize)
plt.barh(range(contributions["feature"].shape[0]), contributions["value"], fc="#b3ddf2")
plt.grid(True)
plt.axvline(0, c="black")
plt.xlabel("SHAP value")
plt.ylabel("Feature")
plt.yticks(range(contributions["feature"].shape[0]), contributions["feature"])
plt.title("SHAP explanation for \"{}\" on row {}{}\nprediction: {}".format(
model.model_id,
row_index,
contribution_subset_note,
prediction
))
plt.gca().set_axisbelow(True)
plt.tight_layout()
fig = plt.gcf()
return fig
elif plot_type == "breakdown":
if columns is None:
if top_n_features + 1 < len(contributions):
contributions = contributions[:top_n_features] + [
("Remaining Features", sum(map(lambda pair: pair[1], contributions[top_n_features:])))]
else:
picked_cols = []
columns = [frame.columns[col] if isinstance(col, int) else col for col in columns]
for feature in columns:
if feature in contribution_names:
picked_cols.append(feature)
else:
for contrib in contribution_names:
if contrib.startswith(feature + "."):
picked_cols.append(contrib)
rest = np.array(sum(pair[1] for pair in contributions if pair[0] not in picked_cols))
contributions = [pair for pair in contributions if pair[0] in picked_cols]
if len(contribution_names) - 1 > len(picked_cols): # Contribution names contain "BiasTerm" as well
contributions += [("Remaining Features", rest)]
contributions = contributions[::-1]
contributions = dict(
feature=np.array([pair[0] for pair in contributions]),
value=np.array([pair[1][0] for pair in contributions]),
color=np.array(["g" if pair[1] >= 0 else "r" for pair in contributions])
)
contributions["cummulative_value"] = [bias] + list(
contributions["value"].cumsum()[:-1] + bias)
plt.figure(figsize=figsize)
plt.barh(contributions["feature"], contributions["value"],
left=contributions["cummulative_value"],
color=contributions["color"])
plt.axvline(prediction, label="Prediction")
plt.axvline(bias, linestyle="dotted", color="gray", label="Bias")
plt.vlines(contributions["cummulative_value"][1:],
ymin=[y - 0.4 for y in range(contributions["value"].shape[0] - 1)],
ymax=[y + 1.4 for y in range(contributions["value"].shape[0] - 1)],
color="k")
plt.legend()
plt.grid(True)
xlim = plt.xlim()
xlim_diff = xlim[1] - xlim[0]
plt.xlim((xlim[0] - 0.02 * xlim_diff, xlim[1] + 0.02 * xlim_diff))
plt.xlabel("SHAP value")
plt.ylabel("Feature")
plt.gca().set_axisbelow(True)
plt.tight_layout()
fig = plt.gcf()
return fig
def _get_top_n_levels(column, top_n):
# type: (h2o.H2OFrame, int) -> List[str]
"""
Get top_n levels from factor column based on their frequency.
:param column: string containing column name
:param top_n: maximum number of levels to be returned
:returns: list of levels
"""
counts = column.table().sort("Count", ascending=[False])[:, 0]
return [
level[0]
for level in counts[:min(counts.nrow, top_n), :].as_data_frame(
use_pandas=False, header=False)
]
def _factor_mapper(mapping):
# type: (Dict) -> Callable
"""
Helper higher order function returning function that applies mapping to each element.
:param mapping: dictionary that maps factor names to floats (for NaN; other values are integers)
:returns: function to be applied on iterable
"""
def _(column):
return [mapping.get(entry, float("nan")) for entry in column]
return _
def _add_histogram(frame, column, add_rug=True, add_histogram=True, levels_order=None):
# type: (H2OFrame, str, bool, bool) -> None
"""
Helper function to add rug and/or histogram to a plot
:param frame: H2OFrame
:param column: string containing column name
:param add_rug: if True, adds rug
:param add_histogram: if True, adds histogram
:returns: None
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
ylims = plt.ylim()
nf = NumpyFrame(frame[column])
if nf.isfactor(column) and levels_order is not None:
new_mapping = dict(zip(levels_order, range(len(levels_order))))
mapping = _factor_mapper({k: new_mapping[v] for k, v in nf.from_num_to_factor(column).items()})
else:
def mapping(x):
return x
if add_rug:
plt.plot(mapping(nf[column]),
[ylims[0] for _ in range(frame.nrow)],
"|", color="k", alpha=0.2, ms=20)
if add_histogram:
if nf.isfactor(column):
cnt = Counter(nf[column][np.isfinite(nf[column])])
hist_x = np.array(list(cnt.keys()), dtype=float)
hist_y = np.array(list(cnt.values()), dtype=float)
width = 1
else:
hist_y, hist_x = np.histogram(
mapping(nf[column][np.isfinite(nf[column])]),
bins=20)
hist_x = hist_x[:-1].astype(float)
hist_y = hist_y.astype(float)
width = hist_x[1] - hist_x[0]
plt.bar(mapping(hist_x),
hist_y / hist_y.max() * ((ylims[1] - ylims[0]) / 1.618), # ~ golden ratio
bottom=ylims[0],
align="center" if nf.isfactor(column) else "edge",
width=width, color="gray", alpha=0.2)
if nf.isfactor(column):
plt.xticks(mapping(range(nf.nlevels(column))), nf.levels(column))
plt.ylim(ylims)
def pd_plot(
model, # type: h2o.model.model_base.ModelBase
frame, # type: h2o.H2OFrame
column, # type: str
row_index=None, # type: Optional[int]
target=None, # type: Optional[str]
max_levels=30, # type: int
figsize=(16, 9), # type: Union[Tuple[float], List[float]]
colormap="Dark2", # type: str
):
"""
Plot partial dependence plot.
Partial dependence plot (PDP) gives a graphical depiction of the marginal effect of a variable
on the response. The effect of a variable is measured in change in the mean response.
PDP assumes independence between the feature for which is the PDP computed and the rest.
:param model: H2O Model object
:param frame: H2OFrame
:param column: string containing column name
:param row_index: if None, do partial dependence, if integer, do individual
conditional expectation for the row specified by this integer
:param target: (only for multinomial classification) for what target should the plot be done
:param max_levels: maximum number of factor levels to show
:param figsize: figure size; passed directly to matplotlib
:param colormap: colormap name; used to get just the first color to keep the api and color scheme similar with
pd_multi_plot
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create partial dependence plot
>>> gbm.pd_plot(test, column="alcohol")
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
is_factor = frame[column].isfactor()[0]
if is_factor:
if frame[column].nlevels()[0] > max_levels:
levels = _get_top_n_levels(frame[column], max_levels)
if row_index is not None:
levels = list(set(levels + [frame[row_index, column]]))
frame = frame[(frame[column].isin(levels)), :]
# decrease the number of levels to the actual number of levels in the subset
frame[column] = frame[column].ascharacter().asfactor()
if target is not None and not isinstance(target, list):
target = [target]
if frame.type(column) == "string":
raise ValueError("String columns are not supported!")
color = plt.get_cmap(colormap)(0)
with no_progress():
plt.figure(figsize=figsize)
is_factor = frame[column].isfactor()[0]
if is_factor:
factor_map = _factor_mapper(NumpyFrame(frame[column]).from_factor_to_num(column))
tmp = NumpyFrame(
model.partial_plot(frame, cols=[column], plot=False,
row_index=row_index, targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0])[0])
encoded_col = tmp.columns[0]
if is_factor:
plt.errorbar(factor_map(tmp.get(encoded_col)), tmp["mean_response"],
yerr=tmp["stddev_response"], fmt='o', color=color,
ecolor=color, elinewidth=3, capsize=0, markersize=10)
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color=color)
plt.fill_between(tmp[encoded_col], tmp["mean_response"] - tmp["stddev_response"],
tmp["mean_response"] + tmp["stddev_response"], color=color, alpha=0.2)
_add_histogram(frame, column)
if row_index is None:
plt.title("Partial Dependence plot for \"{}\"{}".format(
column,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Mean Response")
else:
if is_factor:
plt.axvline(factor_map([frame[row_index, column]]), c="k", linestyle="dotted",
label="Instance value")
else:
plt.axvline(frame[row_index, column], c="k", linestyle="dotted",
label="Instance value")
plt.title("Individual Conditional Expectation for column \"{}\" and row {}{}".format(
column,
row_index,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Response")
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.xlabel(column)
plt.grid(True)
if is_factor:
plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
plt.tight_layout()
fig = plt.gcf()
return fig
def pd_multi_plot(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.model_base]]
frame, # type: h2o.H2OFrame
column, # type: str
best_of_family=True, # type: bool
row_index=None, # type: Optional[int]
target=None, # type: Optional[str]
max_levels=30, # type: int
figsize=(16, 9), # type: Union[Tuple[float], List[float]]
colormap="Dark2", # type: str
markers=["o", "v", "s", "P", "*", "D", "X", "^", "<", ">", "."] # type: List[str]
): # type: (...) -> plt.Figure
"""
Plot partial dependencies of a variable across multiple models.
Partial dependence plot (PDP) gives a graphical depiction of the marginal effect of a variable
on the response. The effect of a variable is measured in change in the mean response.
PDP assumes independence between the feature for which is the PDP computed and the rest.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param frame: H2OFrame
:param column: string containing column name
:param best_of_family: if True, show only the best models per family
:param row_index: if None, do partial dependence, if integer, do individual
conditional expectation for the row specified by this integer
:param target: (only for multinomial classification) for what target should the plot be done
:param max_levels: maximum number of factor levels to show
:param figsize: figure size; passed directly to matplotlib
:param colormap: colormap name
:param markers: List of markers to use for factors, when it runs out of possible markers the last in
this list will get reused
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train an H2OAutoML
>>> aml = H2OAutoML(max_models=10)
>>> aml.train(y=response, training_frame=train)
>>>
>>> # Create a partial dependence plot
>>> aml.pd_multi_plot(test, column="alcohol")
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if target is not None:
if isinstance(target, (list, tuple)):
if len(target) > 1:
raise ValueError("Only one target can be specified!")
target = target[0]
target = [target]
if frame.type(column) == "string":
raise ValueError("String columns are not supported!")
if _is_automl_or_leaderboard(models):
all_models = _get_model_ids_from_automl_or_leaderboard(models)
else:
all_models = models
is_factor = frame[column].isfactor()[0]
if is_factor:
if frame[column].nlevels()[0] > max_levels:
levels = _get_top_n_levels(frame[column], max_levels)
if row_index is not None:
levels = list(set(levels + [frame[row_index, column]]))
frame = frame[(frame[column].isin(levels)), :]
# decrease the number of levels to the actual number of levels in the subset
frame[column] = frame[column].ascharacter().asfactor()
if best_of_family:
models = _first_of_family(all_models)
else:
models = all_models
models = [m if isinstance(m, h2o.model.ModelBase) else h2o.get_model(m) for m in models]
colors = plt.get_cmap(colormap, len(models))(list(range(len(models))))
with no_progress():
plt.figure(figsize=figsize)
is_factor = frame[column].isfactor()[0]
if is_factor:
factor_map = _factor_mapper(NumpyFrame(frame[column]).from_factor_to_num(column))
marker_map = dict(zip(range(len(markers) - 1), markers[:-1]))
model_ids = _shorten_model_ids([model.model_id for model in models])
for i, model in enumerate(models):
tmp = NumpyFrame(
model.partial_plot(frame, cols=[column], plot=False,
row_index=row_index, targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0])[0])
encoded_col = tmp.columns[0]
if is_factor:
plt.scatter(factor_map(tmp.get(encoded_col)), tmp["mean_response"],
color=[colors[i]], label=model_ids[i],
marker=marker_map.get(i, markers[-1]))
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color=colors[i],
label=model_ids[i])
_add_histogram(frame, column)
if row_index is None:
plt.title("Partial Dependence plot for \"{}\"{}".format(
column,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Mean Response")
else:
if is_factor:
plt.axvline(factor_map([frame[row_index, column]]), c="k", linestyle="dotted",
label="Instance value")
else:
plt.axvline(frame[row_index, column], c="k", linestyle="dotted",
label="Instance value")
plt.title("Individual Conditional Expectation for column \"{}\" and row {}{}".format(
column,
row_index,
" with target = \"{}\"".format(target[0]) if target else ""
))
plt.ylabel("Response")
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel(column)
plt.grid(True)
if is_factor:
plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
plt.tight_layout(rect=[0, 0, 0.8, 1])
fig = plt.gcf()
return fig
def ice_plot(
model, # type: h2o.model.ModelBase
frame, # type: h2o.H2OFrame
column, # type: str
target=None, # type: Optional[str]
max_levels=30, # type: int
figsize=(16, 9), # type: Union[Tuple[float], List[float]]
colormap="plasma", # type: str
): # type: (...) -> plt.Figure
"""
Plot Individual Conditional Expectations (ICE) for each decile
Individual conditional expectations (ICE) plot gives a graphical depiction of the marginal
effect of a variable on the response. ICE plot is similar to partial dependence plot (PDP),
PDP shows the average effect of a feature while ICE plot shows the effect for a single
instance. The following plot shows the effect for each decile. In contrast to partial
dependence plot, ICE plot can provide more insight especially when there is stronger feature interaction.
:param model: H2OModel
:param frame: H2OFrame
:param column: string containing column name
:param target: (only for multinomial classification) for what target should the plot be done
:param max_levels: maximum number of factor levels to show
:param figsize: figure size; passed directly to matplotlib
:param colormap: colormap name
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create the individual conditional expectations plot
>>> gbm.ice_plot(test, column="alcohol")
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if target is not None:
if isinstance(target, (list, tuple)):
if len(target) > 1:
raise ValueError("Only one target can be specified!")
target = target[0]
target = [target]
if frame.type(column) == "string":
raise ValueError("String columns are not supported!")
with no_progress():
frame = frame.sort(model.actual_params["response_column"])
is_factor = frame[column].isfactor()[0]
if is_factor:
if frame[column].nlevels()[0] > max_levels:
levels = _get_top_n_levels(frame[column], max_levels)
frame = frame[(frame[column].isin(levels)), :]
# decrease the number of levels to the actual number of levels in the subset
frame[column] = frame[column].ascharacter().asfactor()
factor_map = _factor_mapper(NumpyFrame(frame[column]).from_factor_to_num(column))
deciles = [int(round(frame.nrow * dec / 10)) for dec in range(11)]
colors = plt.get_cmap(colormap, 11)(list(range(11)))
plt.figure(figsize=figsize)
for i, index in enumerate(deciles):
tmp = NumpyFrame(
model.partial_plot(
frame,
cols=[column],
plot=False,
row_index=index,
targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0]
)[0]
)
encoded_col = tmp.columns[0]
if is_factor:
plt.scatter(factor_map(tmp.get(encoded_col)), tmp["mean_response"],
color=[colors[i]],
label="{}th Percentile".format(i * 10))
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color=colors[i],
label="{}th Percentile".format(i * 10))
tmp = NumpyFrame(
model.partial_plot(
frame,
cols=[column],
plot=False,
targets=target,
nbins=20 if not is_factor else 1 + frame[column].nlevels()[0]
)[0]
)
if is_factor:
plt.scatter(factor_map(tmp.get(encoded_col)), tmp["mean_response"], color="k",
label="Partial Dependence")
else:
plt.plot(tmp[encoded_col], tmp["mean_response"], color="k", linestyle="dashed",
label="Partial Dependence")
_add_histogram(frame, column)
plt.title("Individual Conditional Expectation for \"{}\"\non column \"{}\"{}".format(
model.model_id,
column,
" with target = \"{}\"".format(target[0]) if target else ""
))
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid(True)
if is_factor:
plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
plt.tight_layout(rect=[0, 0, 0.85, 1])
fig = plt.gcf()
return fig
def _has_varimp(model):
# type: (h2o.model.ModelBase) -> bool
"""
Does model have varimp?
:param model: model or a string containing model_id
:returns: bool
"""
assert isinstance(model, h2o.model.ModelBase)
# check for cases when variable importance is disabled or
# when a model is stopped sooner than calculating varimp (xgboost can rarely have no varimp).
output = model._model_json["output"]
return output.get("variable_importances") is not None
def _is_automl_or_leaderboard(obj):
# type: (object) -> bool
"""
Is obj an H2OAutoML object or a leaderboard?
:param obj: object to test
:return: bool
"""
return (
isinstance(obj, h2o.automl._base.H2OAutoMLBaseMixin) or
(isinstance(obj, h2o.H2OFrame) and "model_id" in obj.columns)
)
def _get_model_ids_from_automl_or_leaderboard(automl_or_leaderboard, filter_=lambda _: True):
# type: (object) -> List[str]
"""
Get model ids from H2OAutoML object or leaderboard
:param automl_or_leaderboard: AutoML
:return: List[str]
"""
leaderboard = (automl_or_leaderboard.leaderboard
if isinstance(automl_or_leaderboard, h2o.automl._base.H2OAutoMLBaseMixin)
else automl_or_leaderboard)
return [model_id[0] for model_id in leaderboard[:, "model_id"].as_data_frame(use_pandas=False, header=False)
if filter_(model_id[0])]
def _get_models_from_automl_or_leaderboard(automl_or_leaderboard, filter_=lambda _: True):
# type: (object) -> Generator[h2o.model.ModelBase, None, None]
"""
Get model ids from H2OAutoML object or leaderboard
:param automl_or_leaderboard: AutoML
:param filter_: a predicate used to filter model_ids. Signature of the filter is (model) -> bool.
:return: Generator[h2o.model.ModelBase, None, None]
"""
models = (h2o.get_model(model_id) for model_id in _get_model_ids_from_automl_or_leaderboard(automl_or_leaderboard))
return (model for model in models if filter_(model))
def _get_xy(model):
# type: (h2o.model.ModelBase) -> Tuple[List[str], str]
"""
Get features (x) and the response column (y).
:param model: H2O Model
:returns: tuple (x, y)
"""
names = model._model_json["output"]["original_names"] or model._model_json["output"]["names"]
y = model.actual_params["response_column"]
not_x = [
y,
# if there is no fold column, fold_column is set to None, thus using "or {}" instead of the second argument of dict.get
(model.actual_params.get("fold_column") or {}).get("column_name"),
(model.actual_params.get("weights_column") or {}).get("column_name"),
(model.actual_params.get("offset_column") or {}).get("column_name"),
] + (model.actual_params.get("ignored_columns") or [])
x = [feature for feature in names if feature not in not_x]
return x, y
def _consolidate_varimps(model):
# type (h2o.model.ModelBase) -> Dict
"""
Get variable importances just for the columns that are present in the data set, i.e.,
when an encoded variables such as "column_name.level_name" are encountered, those variable
importances are summed to "column_name" variable.
:param model: H2O Model
:returns: dictionary with variable importances
"""
x, y = _get_xy(model)
varimp = {line[0]: line[3] for line in model.varimp()}
consolidated_varimps = {k: v for k, v in varimp.items() if k in x}
to_process = {k: v for k, v in varimp.items() if k not in x}
domain_mapping = _get_domain_mapping(model)
encoded_cols = ["{}.{}".format(name, domain)
for name, domains in domain_mapping.items()
if domains is not None
for domain in domains + ["missing(NA)"]]
if len(encoded_cols) > len(set(encoded_cols)):
duplicates = encoded_cols[:]
for x in set(encoded_cols):
duplicates.remove(x)
warnings.warn("Ambiguous encoding of the column x category pairs: {}".format(set(duplicates)))
varimp_to_col = {"{}.{}".format(name, domain): name
for name, domains in domain_mapping.items()
if domains is not None
for domain in domains + ["missing(NA)"]
}
for feature in to_process.keys():
if feature in varimp_to_col:
column = varimp_to_col[feature]
consolidated_varimps[column] = consolidated_varimps.get(column, 0) + to_process[feature]
else:
raise RuntimeError("Cannot find feature {}".format(feature))
total_value = sum(consolidated_varimps.values())
if total_value != 1:
consolidated_varimps = {k: v / total_value for k, v in consolidated_varimps.items()}
for col in x:
if col not in consolidated_varimps:
consolidated_varimps[col] = 0
return consolidated_varimps
# This plot is meant to be used only in the explain module.
# It provides the same capabilities as `model.varimp_plot` but without
# either forcing "Agg" backend or showing the plot.
# It also mimics the look and feel of the rest of the explain plots.
def _varimp_plot(model, figsize, num_of_features=None):
# type: (h2o.model.ModelBase, Tuple[Float, Float], Optional[int]) -> matplotlib.pyplot.Figure
"""
Variable importance plot.
:param model: H2O model
:param figsize: Figure size
:param num_of_features: Maximum number of variables to plot. Defaults to 10.
:return:
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
importances = model.varimp(use_pandas=False)
feature_labels = [tup[0] for tup in importances]
val = [tup[2] for tup in importances]
pos = range(len(feature_labels))[::-1]
if num_of_features is None:
num_of_features = min(len(val), 10)
plt.figure(figsize=figsize)
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.ylim([min(pos[0:num_of_features]) - 1, max(pos[0:num_of_features]) + 1])
plt.title("Variable Importance for \"{}\"".format(model.model_id))
plt.xlabel("Variable Importance")
plt.ylabel("Variable")
plt.grid()
plt.gca().set_axisbelow(True)
plt.tight_layout()
fig = plt.gcf()
return fig
def _interpretable(model):
# type: (Union[str, h2o.model.ModelBase]) -> bool
"""
Returns True if model_id is easily interpretable.
:param model: model or a string containing a model_id
:returns: bool
"""
return _get_algorithm(model) in ["glm", "gam", "rulefit"]
def _flatten_list(items):
# type: (list) -> Generator[Any, None, None]
"""
Flatten nested lists.
:param items: a list potentionally containing other lists
:returns: flattened list
"""
for x in items:
if isinstance(x, list):
for xx in _flatten_list(x):
yield xx
else:
yield x
def _calculate_clustering_indices(matrix):
# type: (np.ndarray) -> list
"""
Get a hierarchical clustering leaves order calculated from the clustering of columns.
:param matrix: numpy.ndarray
:returns: list of indices of columns
"""
cols = matrix.shape[1]
dist = np.zeros((cols, cols))
for x in range(cols):
for y in range(cols):
if x < y:
dist[x, y] = np.sum(np.power(matrix[:, x] - matrix[:, y], 2))
dist[y, x] = dist[x, y]
elif x == y:
dist[x, x] = float("inf")
indices = [[i] for i in range(cols)]
for i in range(cols - 1):
idx = np.argmin(dist)
x = idx % cols
y = idx // cols
assert x != y
indices[x].append(indices[y])
indices[y] = []
dist[x, :] = np.min(dist[[x, y], :], axis=0)
dist[y, :] = float("inf")
dist[:, y] = float("inf")
dist[x, x] = float("inf")
result = list(_flatten_list(indices))
assert len(result) == cols
return result
def varimp_heatmap(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.ModelBase]]
top_n=None, # type: Option[int]
figsize=(16, 9), # type: Tuple[float]
cluster=True, # type: bool
colormap="RdYlBu_r" # type: str
):
# type: (...) -> plt.Figure
"""
Variable Importance Heatmap across a group of models
Variable importance heatmap shows variable importance across multiple models.
Some models in H2O return variable importance for one-hot (binary indicator)
encoded versions of categorical columns (e.g. Deep Learning, XGBoost). In order
for the variable importance of categorical columns to be compared across all model
types we compute a summarization of the the variable importance across all one-hot
encoded features and return a single variable importance for the original categorical
feature. By default, the models and variables are ordered by their similarity.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param top_n: DEPRECATED. use just top n models (applies only when used with H2OAutoML)
:param figsize: figsize: figure size; passed directly to matplotlib
:param cluster: if True, cluster the models and variables
:param colormap: colormap to use
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train an H2OAutoML
>>> aml = H2OAutoML(max_models=10)
>>> aml.train(y=response, training_frame=train)
>>>
>>> # Create the variable importance heatmap
>>> aml.varimp_heatmap()
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if isinstance(models, h2o.automl._base.H2OAutoMLBaseMixin):
models = _check_deprecated_top_n_argument(models, top_n)
varimps, model_ids, x = varimp(models=models, cluster=cluster, use_pandas=False)
plt.figure(figsize=figsize)
plt.imshow(varimps, cmap=plt.get_cmap(colormap))
plt.xticks(range(len(model_ids)), model_ids,
rotation=45, rotation_mode="anchor", ha="right")
plt.yticks(range(len(x)), x)
plt.colorbar()
plt.xlabel("Model Id")
plt.ylabel("Feature")
plt.title("Variable Importance Heatmap")
plt.grid(False)
fig = plt.gcf()
return fig
def varimp(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.ModelBase]]
cluster=True, # type: bool
use_pandas=True # type: bool
):
# type: (...) -> Union[pandas.DataFrame, Tuple[numpy.ndarray, List[str], List[str]]]
"""
Get data that are used to build varimp_heatmap plot.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param cluster: if True, cluster the models and variables
:param use_pandas: if True, try to return pandas DataFrame. Otherwise return a triple (varimps, model_ids, variable_names)
:returns: either pandas DataFrame (if use_pandas == True) or a triple (varimps, model_ids, variable_names)
"""
if _is_automl_or_leaderboard(models):
models = list(_get_models_from_automl_or_leaderboard(models, filter_=_has_varimp))
else:
# Filter out models that don't have varimp
models = [model for model in models if _has_varimp(model)]
if len(models) == 0:
raise RuntimeError("No model with variable importance")
varimps = [_consolidate_varimps(model) for model in models]
x, y = _get_xy(models[0])
varimps = np.array([[varimp[col] for col in x] for varimp in varimps])
if cluster and len(models) > 2:
order = _calculate_clustering_indices(varimps)
x = [x[i] for i in order]
varimps = varimps[:, order]
varimps = varimps.transpose()
order = _calculate_clustering_indices(varimps)
models = [models[i] for i in order]
varimps = varimps[:, order]
else:
varimps = varimps.transpose()
model_ids = _shorten_model_ids([model.model_id for model in models])
if use_pandas:
import pandas
return pandas.DataFrame(varimps, columns=model_ids, index=x)
return varimps, model_ids, x
def model_correlation_heatmap(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.ModelBase]]
frame, # type: h2o.H2OFrame
top_n=None, # type: Optional[int]
cluster_models=True, # type: bool
triangular=True, # type: bool
figsize=(13, 13), # type: Tuple[float]
colormap="RdYlBu_r" # type: str
):
# type: (...) -> plt.Figure
"""
Model Prediction Correlation Heatmap
This plot shows the correlation between the predictions of the models.
For classification, frequency of identical predictions is used. By default, models
are ordered by their similarity (as computed by hierarchical clustering).
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param frame: H2OFrame
:param top_n: DEPRECATED. show just top n models (applies only when used with H2OAutoML).
:param cluster_models: if True, cluster the models
:param triangular: make the heatmap triangular
:param figsize: figsize: figure size; passed directly to matplotlib
:param colormap: colormap to use
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train an H2OAutoML
>>> aml = H2OAutoML(max_models=10)
>>> aml.train(y=response, training_frame=train)
>>>
>>> # Create the model correlation heatmap
>>> aml.model_correlation_heatmap(test)
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
if isinstance(models, h2o.automl._base.H2OAutoMLBaseMixin):
models = _check_deprecated_top_n_argument(models, top_n)
corr, model_ids = model_correlation(models, frame, cluster_models, use_pandas=False)
if triangular:
corr = np.where(np.triu(np.ones_like(corr), k=1).astype(bool), float("nan"), corr)
plt.figure(figsize=figsize)
plt.imshow(corr, cmap=plt.get_cmap(colormap), clim=(0.5, 1))
plt.xticks(range(len(model_ids)), model_ids, rotation=45, rotation_mode="anchor", ha="right")
plt.yticks(range(len(model_ids)), model_ids)
plt.colorbar()
plt.title("Model Correlation")
plt.xlabel("Model Id")
plt.ylabel("Model Id")
plt.grid(False)
for t in plt.gca().xaxis.get_ticklabels():
if _interpretable(t.get_text()):
t.set_color("red")
for t in plt.gca().yaxis.get_ticklabels():
if _interpretable(t.get_text()):
t.set_color("red")
fig = plt.gcf()
return fig
def _check_deprecated_top_n_argument(models, top_n):
if top_n is not None:
import warnings
from h2o.exceptions import H2ODeprecationWarning
warnings.warn("Setting the `top_n` parameter is deprecated, use a leaderboard (sub)frame "
"instead, e.g., aml.leaderboard.head({}).".format(top_n), category=H2ODeprecationWarning)
models = models.leaderboard.head(top_n)
else:
models = models.leaderboard.head(20)
return models
def model_correlation(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.ModelBase]]
frame, # type: h2o.H2OFrame
cluster_models=True, # type: bool
use_pandas=True # type: bool
):
# type: (...) -> Union[pandas.DataFrame, Tuple[numpy.ndarray, List[str]]]
"""
Get data that are used to build model_correlation_heatmap plot.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param frame: H2OFrame
:param cluster_models: if True, cluster the models
:param use_pandas: if True, try to return pandas DataFrame. Otherwise return a tuple (correlation_matrix, model_ids)
:returns: either pandas DataFrame (if use_pandas == True) or a tuple (correlation_matrix, model_ids)
"""
if _is_automl_or_leaderboard(models):
models = list(_get_models_from_automl_or_leaderboard(models))
is_classification = frame[models[0].actual_params["response_column"]].isfactor()[0]
predictions = []
with no_progress():
for idx, model in enumerate(models):
predictions.append(model.predict(frame)["predict"])
if is_classification:
corr = np.zeros((len(models), len(models)))
for i in range(len(models)):
for j in range(len(models)):
if i <= j:
corr[i, j] = (predictions[i] == predictions[j]).mean()[0]
corr[j, i] = corr[i, j]
else:
corr = np.genfromtxt(StringIO(predictions[0].cbind(predictions[1:]).cor().get_frame_data()),
delimiter=",", missing_values="", skip_header=True)
if cluster_models:
order = _calculate_clustering_indices(corr)
corr = corr[order, :]
corr = corr[:, order]
models = [models[i] for i in order]
model_ids = _shorten_model_ids([model.model_id for model in models])
if use_pandas:
import pandas
return pandas.DataFrame(corr, columns=model_ids, index=model_ids)
return corr, model_ids
def residual_analysis_plot(
model, # type: h2o.model.ModelBase
frame, # type: h2o.H2OFrame
figsize=(16, 9) # type: Tuple[float]
):
# type: (...) -> plt.Figure
"""
Residual Analysis
Do Residual Analysis and plot the fitted values vs residuals on a test dataset.
Ideally, residuals should be randomly distributed. Patterns in this plot can indicate
potential problems with the model selection, e.g., using simpler model than necessary,
not accounting for heteroscedasticity, autocorrelation, etc. If you notice "striped"
lines of residuals, that is just an indication that your response variable was integer
valued instead of real valued.
:param model: H2OModel
:param frame: H2OFrame
:param figsize: figure size; passed directly to matplotlib
:returns: a matplotlib figure object
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create the residual analysis plot
>>> gbm.residual_analysis_plot(test)
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
_, y = _get_xy(model)
with no_progress():
predicted = NumpyFrame(model.predict(frame)["predict"])
actual = NumpyFrame(frame[y])
residuals = predicted["predict"] - actual[y]
plt.figure(figsize=figsize)
plt.axhline(y=0, c="k")
plt.scatter(predicted["predict"], residuals)
plt.grid(True)
plt.xlabel("Fitted")
plt.ylabel("Residuals")
plt.title("Residual Analysis for \"{}\"".format(model.model_id))
# Rugs
xlims = plt.xlim()
ylims = plt.ylim()
plt.plot([xlims[0] for _ in range(frame.nrow)], residuals,
"_", color="k", alpha=0.2, ms=20)
plt.plot(predicted.get("predict"),
[ylims[0] for _ in range(frame.nrow)],
"|", color="k", alpha=0.2, ms=20)
# Fit line
X = np.vstack([predicted["predict"], np.ones(frame.nrow)]).T
slope, const = np.linalg.lstsq(X, residuals, rcond=-1)[0]
plt.plot(xlims, [xlims[0] * slope + const, xlims[1] * slope + const], c="b")
plt.xlim(xlims)
plt.ylim(ylims)
plt.tight_layout()
fig = plt.gcf()
return fig
def learning_curve_plot(
model, # type: h2o.model.ModelBase
metric="AUTO", # type: str
cv_ribbon=None, # type: Optional[bool]
cv_lines=None, # type: Optional[bool]
figsize=(16,9), # type: Tuple[float]
colormap=None # type: Optional[str]
):
# type: (...) -> plt.Figure
"""
Learning curve
Create learning curve plot for an H2O Model. Learning curves show error metric dependence on
learning progress, e.g., RMSE vs number of trees trained so far in GBM. There can be up to 4 curves
showing Training, Validation, Training on CV Models, and Cross-validation error.
:param model: an H2O model
:param metric: a stopping metric
:param cv_ribbon: if True, plot the CV mean as a and CV standard deviation as a ribbon around the mean,
if None, it will attempt to automatically determine if this is suitable visualisation
:param cv_lines: if True, plot scoring history for individual CV models, if None, it will attempt to
automatically determine if this is suitable visualisation
:param figsize: figure size; passed directly to matplotlib
:param colormap: colormap to use
:return: a matplotlib figure
:examples:
>>> import h2o
>>> from h2o.estimators import H2OGradientBoostingEstimator
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train a GBM
>>> gbm = H2OGradientBoostingEstimator()
>>> gbm.train(y=response, training_frame=train)
>>>
>>> # Create the learning curve plot
>>> gbm.learning_curve_plot()
"""
if model.algo == "stackedensemble":
model = model.metalearner()
if model.algo not in ("stackedensemble", "glm", "gam", "glrm", "deeplearning",
"drf", "gbm", "xgboost", "coxph", "isolationforest"):
raise H2OValueError("Algorithm {} doesn't support learning curve plot!".format(model.algo))
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
metric_mapping = {'anomaly_score': 'mean_anomaly_score',
'custom': 'custom',
'custom_increasing': 'custom',
'deviance': 'deviance',
'logloss': 'logloss',
'rmse': 'rmse',
'mae': 'mae',
'auc': 'auc',
'aucpr': 'pr_auc',
'lift_top_group': 'lift',
'misclassification': 'classification_error',
'objective': 'objective',
'convergence': 'convergence',
'negative_log_likelihood': 'negative_log_likelihood',
'sumetaieta02': 'sumetaieta02'}
inverse_metric_mappping = {v: k for k, v in metric_mapping.items()}
inverse_metric_mappping["custom"] = "custom, custom_increasing"
# Using the value from output to keep it simple - only one version required - (no need to use pandas for small data)
scoring_history = model._model_json["output"]["scoring_history"] or model._model_json["output"].get("glm_scoring_history")
if scoring_history is None:
raise RuntimeError("Could not retrieve scoring history for {}".format(model.algo))
scoring_history = _preprocess_scoring_history(model, scoring_history)
allowed_metrics = []
allowed_timesteps = []
if model.algo in ("glm", "gam"):
if model.actual_params["lambda_search"]:
import h2o.two_dim_table
allowed_timesteps = ["iteration"]
elif model.actual_params.get("HGLM"):
allowed_timesteps = ["iterations", "duration"]
else:
allowed_timesteps = ["iterations", "duration"]
allowed_metrics = ["deviance", "objective", "negative_log_likelihood", "convergence", "sumetaieta02",
"logloss", "auc", "classification_error", "rmse", "lift", "pr_auc", "mae"]
allowed_metrics = [m for m in allowed_metrics
if m in scoring_history.col_header or
"training_{}".format(m) in scoring_history.col_header or
"{}_train".format(m) in scoring_history.col_header]
elif model.algo == "glrm":
allowed_metrics = ["objective"]
allowed_timesteps = ["iterations"]
elif model.algo in ("deeplearning", "drf", "gbm", "xgboost"):
model_category = model._model_json["output"]["model_category"]
if "Binomial" == model_category:
allowed_metrics = ["logloss", "auc", "classification_error", "rmse", "lift", "pr_auc"]
elif model_category in ["Multinomial", "Ordinal"]:
allowed_metrics = ["logloss", "classification_error", "rmse", "pr_auc", "auc"]
elif "Regression" == model_category:
allowed_metrics = ["rmse", "deviance", "mae"]
if model.algo in ["drf", "gbm"]:
allowed_metrics += ["custom"]
elif model.algo == "coxph":
allowed_metrics = ["loglik"]
allowed_timesteps = ["iterations"]
elif model.algo == "isolationforest":
allowed_timesteps = ["number_of_trees"]
allowed_metrics = ["mean_anomaly_score"]
if model.algo == "deeplearning":
allowed_timesteps = ["epochs", "iterations", "samples"]
elif model.algo in ["drf", "gbm", "xgboost"]:
allowed_timesteps = ["number_of_trees"]
if metric.lower() == "auto":
metric = allowed_metrics[0]
else:
metric = metric_mapping.get(metric.lower())
if metric not in allowed_metrics:
raise H2OValueError("for {}, metric must be one of: {}".format(
model.algo.upper(),
", ".join(inverse_metric_mappping[m.lower()] for m in allowed_metrics)
))
timestep = allowed_timesteps[0]
if "deviance" == metric and model.algo in ["glm", "gam"] and not model.actual_params.get("HGLM", False):
training_metric = "deviance_train"
validation_metric = "deviance_test"
elif metric in ("objective", "convergence", "loglik", "mean_anomaly_score"):
training_metric = metric
validation_metric = "UNDEFINED"
else:
training_metric = "training_{}".format(metric)
validation_metric = "validation_{}".format(metric)
selected_timestep_value = None
if "number_of_trees" == timestep:
selected_timestep_value = model.actual_params["ntrees"]
elif timestep in ["iteration", "iterations"]:
if "coxph" == model.algo:
selected_timestep_value = model._model_json["output"]["iter"]
else:
selected_timestep_value = model.summary()["number_of_iterations"][0]
elif "epochs" == timestep:
selected_timestep_value = model.actual_params["epochs"]
if colormap is None:
col_train, col_valid = "#785ff0", "#ff6000"
col_cv_train, col_cv_valid = "#648fff", "#ffb000"
else:
col_train, col_valid, col_cv_train, col_cv_valid = plt.get_cmap(colormap, 4)(list(range(4)))
# Get scoring history with only filled in entries
scoring_history = _preprocess_scoring_history(model, scoring_history, training_metric)
plt.figure(figsize=figsize)
plt.grid(True)
if model._model_json["output"].get("cv_scoring_history"):
if cv_ribbon or cv_ribbon is None:
cvsh_train = defaultdict(list)
cvsh_valid = defaultdict(list)
for cvsh in model._model_json["output"]["cv_scoring_history"]:
cvsh = _preprocess_scoring_history(model, cvsh, training_metric)
for i in range(len(cvsh[timestep])):
cvsh_train[cvsh[timestep][i]].append(cvsh[training_metric][i])
if validation_metric in cvsh.col_header:
for i in range(len(cvsh[timestep])):
cvsh_valid[cvsh[timestep][i]].append(cvsh[validation_metric][i])
mean_train = np.array(sorted(
[(k, np.mean(v)) for k, v in cvsh_train.items()],
key=lambda k: k[0]
))
sd_train = np.array(sorted(
[(k, np.std(v)) for k, v in cvsh_train.items()],
key=lambda k: k[0]
))[:, 1]
len_train = np.array(sorted(
[(k, len(v)) for k, v in cvsh_train.items()],
key=lambda k: k[0]
))[:, 1]
if len(len_train) > 1 and (cv_ribbon or len_train.mean() > 2 and np.mean(len_train[:-1] == len_train[1:]) >= 0.5):
plt.plot(mean_train[:, 0], mean_train[:, 1], c=col_cv_train,
label="Training (CV Models)")
plt.fill_between(mean_train[:, 0],
mean_train[:, 1] - sd_train,
mean_train[:, 1] + sd_train,
color=col_cv_train, alpha=0.25)
if len(cvsh_valid) > 0:
mean_valid = np.array(sorted(
[(k, np.mean(v)) for k, v in cvsh_valid.items()],
key=lambda k: k[0]
))
sd_valid = np.array(sorted(
[(k, np.std(v)) for k, v in cvsh_valid.items()],
key=lambda k: k[0]
))[:, 1]
plt.plot(mean_valid[:, 0], mean_valid[:, 1], c=col_cv_valid,
label="Cross-validation")
plt.fill_between(mean_valid[:, 0],
mean_valid[:, 1] - sd_valid,
mean_valid[:, 1] + sd_valid,
color=col_cv_valid, alpha=0.25)
else:
cv_lines = cv_lines is None or cv_lines
if cv_lines:
for cvsh in model._model_json["output"]["cv_scoring_history"]:
cvsh = _preprocess_scoring_history(model, cvsh, training_metric)
plt.plot(cvsh[timestep],
cvsh[training_metric],
label="Training (CV Models)",
c=col_cv_train,
linestyle="dotted")
if validation_metric in cvsh.col_header:
plt.plot(cvsh[timestep],
cvsh[validation_metric],
label="Cross-validation",
c=col_cv_valid,
linestyle="dotted"
)
plt.plot(scoring_history[timestep],
scoring_history[training_metric],
"o-",
label="Training",
c=col_train)
if validation_metric in scoring_history.col_header:
plt.plot(scoring_history[timestep],
scoring_history[validation_metric],
"o-",
label="Validation",
c=col_valid)
if selected_timestep_value is not None:
plt.axvline(x=selected_timestep_value, label="Selected\n{}".format(timestep), c="#2FBB24")
plt.title("Learning Curve\nfor {}".format(_shorten_model_ids([model.model_id])[0]))
plt.xlabel(timestep)
plt.ylabel(metric)
handles, labels = plt.gca().get_legend_handles_labels()
labels_and_handles = dict(zip(labels, handles))
labels_and_handles_ordered = OrderedDict()
for lbl in ["Training", "Training (CV Models)", "Validation", "Cross-validation", "Selected\n{}".format(timestep)]:
if lbl in labels_and_handles:
labels_and_handles_ordered[lbl] = labels_and_handles[lbl]
plt.legend(list(labels_and_handles_ordered.values()), list(labels_and_handles_ordered.keys()))
return plt.gcf()
def _preprocess_scoring_history(model, scoring_history, training_metric=None):
empty_columns = [all(row[col_idx] == "" for row in scoring_history.cell_values)
for col_idx in range(len(scoring_history.col_header))]
scoring_history = h2o.two_dim_table.H2OTwoDimTable(
table_header=scoring_history._table_header,
table_description=scoring_history._table_description,
col_header=[ch for i, ch in enumerate(scoring_history.col_header) if not empty_columns[i]],
col_types=[ct for i, ct in enumerate(scoring_history.col_types) if not empty_columns[i]],
cell_values=[[v for i, v in enumerate(vals) if not empty_columns[i]]
for vals in scoring_history.cell_values])
if model.algo in ("glm", "gam") and model.actual_params["lambda_search"]:
alpha_best = model._model_json["output"]["alpha_best"]
alpha_idx = scoring_history.col_header.index("alpha")
iteration_idx = scoring_history.col_header.index("iteration")
scoring_history = h2o.two_dim_table.H2OTwoDimTable(
table_header=scoring_history._table_header,
table_description=scoring_history._table_description,
col_header=scoring_history.col_header,
col_types=scoring_history.col_types,
cell_values=sorted([list(v) for v in scoring_history.cell_values if v[alpha_idx] == alpha_best],
key=lambda row: row[iteration_idx]))
if training_metric is not None:
# Remove empty metric values, e.g., from GLM when score_each_iteration = False
training_metric_idx = scoring_history.col_header.index(training_metric)
scoring_history = h2o.two_dim_table.H2OTwoDimTable(
table_header=scoring_history._table_header,
table_description=scoring_history._table_description,
col_header=scoring_history.col_header,
col_types=scoring_history.col_types,
cell_values=[list(v) for v in scoring_history.cell_values if v[training_metric_idx] != ""])
return scoring_history
def _is_tree_model(model):
# type: (Union[str, h2o.model.ModelBase]) -> bool
"""
Is the model a tree model id?
:param model: model or a string containing a model_id
:returns: bool
"""
return _get_algorithm(model) in ["drf", "gbm", "xgboost"]
def _get_tree_models(
models, # type: Union[h2o.H2OFrame, List[h2o.model.ModelBase]]
top_n=float("inf") # type: Union[float, int]
):
# type: (...) -> List[h2o.model.ModelBase]
"""
Get list of top_n tree models.
:param models: either H2OAutoML object or list of H2O Models
:param top_n: maximum number of tree models to return
:returns: list of tree models
"""
if _is_automl_or_leaderboard(models):
model_ids = _get_model_ids_from_automl_or_leaderboard(models, filter_=_is_tree_model)
return [
h2o.get_model(model_id)
for model_id in model_ids[:min(top_n, len(model_ids))]
]
elif isinstance(models, h2o.model.ModelBase):
if _is_tree_model(models):
return [models]
else:
return []
models = [
model
for model in models
if _is_tree_model(model)
]
return models[:min(len(models), top_n)]
def _get_leaderboard(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, List[h2o.model.ModelBase]]
frame, # type: h2o.H2OFrame
row_index=None, # type: Optional[int]
top_n=20 # type: int
):
# type: (...) -> h2o.H2OFrame
"""
Get leaderboard either from AutoML or list of models.
:param models: H2OAutoML object or list of models
:param frame: H2OFrame used for calculating prediction when row_index is specified
:param row_index: if specified, calculated prediction for the given row
:param top_n: show just top n models in the leaderboard
:returns: H2OFrame
"""
if _is_automl_or_leaderboard(models):
leaderboard = models if isinstance(models, h2o.H2OFrame) else h2o.automl.get_leaderboard(models, extra_columns="ALL")
leaderboard = leaderboard.head(rows=min(leaderboard.nrow, top_n))
if row_index is not None:
model_ids = [m[0] for m in
leaderboard["model_id"].as_data_frame(use_pandas=False, header=False)]
with no_progress():
preds = h2o.get_model(model_ids[0]).predict(frame[row_index, :])
for model_id in model_ids[1:]:
preds = preds.rbind(h2o.get_model(model_id).predict(frame[row_index, :]))
leaderboard = leaderboard.cbind(preds)
return leaderboard
else:
METRICS = [
"AUC",
"mean_residual_deviance",
"mean_per_class_error",
"logloss",
"pr_auc",
"RMSE",
"MSE",
"mae",
"rmsle",
]
import math
from collections import defaultdict
task = None
result = defaultdict(list)
predictions = []
with no_progress():
for model in models:
result["model_id"].append(model.model_id)
perf = model.model_performance(frame)
task = perf._metric_json.get("model_category")
for metric in METRICS:
result[metric.lower()].append(perf._metric_json.get(metric))
if row_index is not None:
predictions.append(model.predict(frame[row_index, :]))
for metric in METRICS:
if not any(result[metric.lower()]) or not all([not math.isnan(float(m)) for m in result[metric.lower()]]):
del result[metric.lower()]
leaderboard = h2o.H2OFrame(result)[["model_id"] + [m.lower()
for m in METRICS
if m.lower() in result]]
if row_index is not None:
preds = predictions[0]
for pr in predictions[1:]:
preds = preds.rbind(pr)
leaderboard = leaderboard.cbind(preds)
sort_metric = "mse" if task is None else \
"auc" if task.lower() == "binomial" else \
"logloss" if task.lower() == "multinomial" else \
"mean_residual_deviance"
return leaderboard.sort(sort_metric).head(rows=min(top_n, leaderboard.nrow))
def _process_explanation_lists(
exclude_explanations, # type: Union[str, List[str]]
include_explanations, # type: Union[str, List[str]]
possible_explanations # type: List[str]
):
# type: (...) -> List[str]
"""
Helper function to process explanation lists.
:param exclude_explanations: list of model explanations to exclude
:param include_explanations: list of model explanations to include
:param possible_explanations: list of all possible explanations
:returns: list of actual explanations
"""
if not isinstance(include_explanations, list):
include_explanations = [include_explanations]
if not isinstance(exclude_explanations, list):
exclude_explanations = [exclude_explanations]
include_explanations = [exp.lower() for exp in include_explanations]
exclude_explanations = [exp.lower() for exp in exclude_explanations]
if len(exclude_explanations) == 0:
explanations = possible_explanations if "all" in include_explanations \
else include_explanations
else:
if "all" not in include_explanations:
raise RuntimeError(
"Only one of include_explanations or exclude_explanation should be specified!")
for exp in exclude_explanations:
if exp not in possible_explanations:
raise RuntimeError("Unknown explanation \"{}\". Please use one of: {}".format(
exp, possible_explanations))
explanations = [exp for exp in possible_explanations if exp not in exclude_explanations]
return explanations
def _process_models_input(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List, h2o.model.ModelBase]
frame, # type: h2o.H2OFrame
):
# type: (...) -> Tuple[bool, List, bool, bool, bool, List, List]
"""
Helper function to get basic information about models/H2OAutoML.
:param models: H2OAutoML/List of models/H2O Model
:param frame: H2O Frame
:returns: tuple (is_aml, models_to_show, classification, multinomial_classification,
multiple_models, targets, tree_models_to_show)
"""
is_aml = False
if _is_automl_or_leaderboard(models):
is_aml = True
if isinstance(models, h2o.automl._base.H2OAutoMLBaseMixin):
models_to_show = [models.leader]
models = models.leaderboard
else:
models_to_show = [h2o.get_model(models[0, "model_id"])]
if _has_varimp(models_to_show[0]):
models_with_varimp = models_to_show
else:
model_with_varimp = next(_get_models_from_automl_or_leaderboard(models, filter_=_has_varimp), None)
models_with_varimp = [] if model_with_varimp is None else [model_with_varimp]
multiple_models = models.nrow > 1
elif isinstance(models, h2o.model.ModelBase):
models_to_show = [models]
multiple_models = False
models_with_varimp = [models] if _has_varimp(models) else []
else:
models_to_show = models
multiple_models = len(models) > 1
models_with_varimp = [model for model in models if _has_varimp(model)]
tree_models_to_show = _get_tree_models(models, 1 if is_aml else float("inf"))
y = _get_xy(models_to_show[0])[1]
classification = frame[y].isfactor()[0]
multinomial_classification = classification and frame[y].nlevels()[0] > 2
targets = [None]
if multinomial_classification:
targets = [[t] for t in frame[y].levels()[0]]
return is_aml, models_to_show, classification, multinomial_classification, \
multiple_models, targets, tree_models_to_show, models_with_varimp
def _custom_args(user_specified, **kwargs):
# type: (Optional[Dict], **Any) -> Dict
"""
Helper function to make customization of arguments easier.
:param user_specified: dictionary of user specified overrides or None
:param kwargs: default values, such as, `top_n=5`
:returns: dictionary of actual arguments to use
"""
if user_specified is None:
user_specified = dict()
result = dict(**kwargs)
result.update(user_specified)
return result
def explain(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, h2o.H2OFrame, List[h2o.model.ModelBase]]
frame, # type: h2o.H2OFrame
columns=None, # type: Optional[Union[List[int], List[str]]]
top_n_features=5, # type: int
include_explanations="ALL", # type: Union[str, List[str]]
exclude_explanations=[], # type: Union[str, List[str]]
plot_overrides=dict(), # type: Dict
figsize=(16, 9), # type: Tuple[float]
render=True, # type: bool
qualitative_colormap="Dark2", # type: str
sequential_colormap="RdYlBu_r" # type: str
):
# type: (...) -> H2OExplanation
"""
Generate model explanations on frame data set.
The H2O Explainability Interface is a convenient wrapper to a number of explainabilty
methods and visualizations in H2O. The function can be applied to a single model or group
of models and returns an object containing explanations, such as a partial dependence plot
or a variable importance plot. Most of the explanations are visual (plots).
These plots can also be created by individual utility functions/methods as well.
:param models: a list of H2O models, an H2O AutoML instance, or an H2OFrame with a 'model_id' column (e.g. H2OAutoML leaderboard)
:param frame: H2OFrame
:param columns: either a list of columns or column indices to show. If specified
parameter top_n_features will be ignored.
:param top_n_features: a number of columns to pick using variable importance (where applicable).
:param include_explanations: if specified, return only the specified model explanations
(Mutually exclusive with exclude_explanations)
:param exclude_explanations: exclude specified model explanations
:param plot_overrides: overrides for individual model explanations
:param figsize: figure size; passed directly to matplotlib
:param render: if True, render the model explanations; otherwise model explanations are just returned
:returns: H2OExplanation containing the model explanations including headers and descriptions
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train an H2OAutoML
>>> aml = H2OAutoML(max_models=10)
>>> aml.train(y=response, training_frame=train)
>>>
>>> # Create the H2OAutoML explanation
>>> aml.explain(test)
>>>
>>> # Create the leader model explanation
>>> aml.leader.explain(test)
"""
plt = get_matplotlib_pyplot(False, raise_if_not_available=True)
(is_aml, models_to_show, classification, multinomial_classification, multiple_models, targets,
tree_models_to_show, models_with_varimp) = _process_models_input(models, frame)
if top_n_features < 0:
top_n_features = float("inf")
if columns is not None and isinstance(columns, list):
columns_of_interest = [frame.columns[col] if isinstance(col, int) else col for col in columns]
else:
columns_of_interest = None
possible_explanations = [
"leaderboard",
"confusion_matrix",
"residual_analysis",
"varimp",
"varimp_heatmap",
"model_correlation_heatmap",
"shap_summary",
"pdp",
"ice"
]
explanations = _process_explanation_lists(
exclude_explanations=exclude_explanations,
include_explanations=include_explanations,
possible_explanations=possible_explanations
)
if render:
display = _display
else:
display = _dont_display
result = H2OExplanation()
if multiple_models and "leaderboard" in explanations:
result["leaderboard"] = H2OExplanation()
result["leaderboard"]["header"] = display(Header("Leaderboard"))
result["leaderboard"]["description"] = display(Description("leaderboard"))
result["leaderboard"]["data"] = display(_get_leaderboard(models, frame))
if classification:
if "confusion_matrix" in explanations:
result["confusion_matrix"] = H2OExplanation()
result["confusion_matrix"]["header"] = display(Header("Confusion Matrix"))
result["confusion_matrix"]["description"] = display(Description("confusion_matrix"))
result["confusion_matrix"]["subexplanations"] = H2OExplanation()
for model in models_to_show:
result["confusion_matrix"]["subexplanations"][model.model_id] = H2OExplanation()
result["confusion_matrix"]["subexplanations"][model.model_id]["header"] = display(
Header(model.model_id, 2))
result["confusion_matrix"]["subexplanations"][model.model_id]["plots"] = H2OExplanation()
result["confusion_matrix"]["subexplanations"][model.model_id]["plots"][model.model_id] = display(
model.model_performance(
**_custom_args(plot_overrides.get("confusion_matrix"), test_data=frame)
).confusion_matrix()
)
else:
if "residual_analysis" in explanations:
result["residual_analysis"] = H2OExplanation()
result["residual_analysis"]["header"] = display(Header("Residual Analysis"))
result["residual_analysis"]["description"] = display(Description("residual_analysis"))
result["residual_analysis"]["plots"] = H2OExplanation()
for model in models_to_show:
result["residual_analysis"]["plots"][model.model_id] = display(
residual_analysis_plot(model,
frame,
**_custom_args(
plot_overrides.get(
"residual_analysis"),
figsize=figsize)))
if len(models_with_varimp) > 0 and "varimp" in explanations:
result["varimp"] = H2OExplanation()
result["varimp"]["header"] = display(Header("Variable Importance"))
result["varimp"]["description"] = display(Description("variable_importance"))
result["varimp"]["plots"] = H2OExplanation()
for model in models_with_varimp:
varimp_plot = _varimp_plot(model, figsize, **plot_overrides.get("varimp_plot", dict()))
result["varimp"]["plots"][model.model_id] = display(varimp_plot)
if columns_of_interest is None:
varimps = _consolidate_varimps(models_with_varimp[0])
columns_of_interest = sorted(varimps.keys(), key=lambda k: -varimps[k])[
:min(top_n_features, len(varimps))]
else:
if columns_of_interest is None:
columns_of_interest = _get_xy(models_to_show[0])[0]
# Make sure that there are no string columns to explain as they are not supported by pdp
# Usually those columns would not be used by algos so this just makes sure to exclude them
# if user specifies top_n=float('inf') or columns_of_interest=x etc.
dropped_string_columns = [col for col in columns_of_interest if frame.type(col) == "string"]
if len(dropped_string_columns) > 0:
warnings.warn("Dropping string columns as they are not supported: {}".format(dropped_string_columns))
columns_of_interest = [col for col in columns_of_interest if frame.type(col) != "string"]
if is_aml or len(models_to_show) > 1:
if "varimp_heatmap" in explanations:
result["varimp_heatmap"] = H2OExplanation()
result["varimp_heatmap"]["header"] = display(
Header("Variable Importance Heatmap"))
result["varimp_heatmap"]["description"] = display(
Description("varimp_heatmap"))
result["varimp_heatmap"]["plots"] = display(varimp_heatmap(
models,
**_custom_args(plot_overrides.get("varimp_heatmap"),
colormap=sequential_colormap,
figsize=figsize)))
if "model_correlation_heatmap" in explanations:
result["model_correlation_heatmap"] = H2OExplanation()
result["model_correlation_heatmap"]["header"] = display(Header("Model Correlation"))
result["model_correlation_heatmap"]["description"] = display(Description(
"model_correlation_heatmap"))
result["model_correlation_heatmap"]["plots"] = display(model_correlation_heatmap(
models, **_custom_args(plot_overrides.get("model_correlation_heatmap"),
frame=frame,
colormap=sequential_colormap,
figsize=figsize)))
# SHAP Summary
if len(tree_models_to_show) > 0 and not multinomial_classification \
and "shap_summary" in explanations:
result["shap_summary"] = H2OExplanation()
result["shap_summary"]["header"] = display(Header("SHAP Summary"))
result["shap_summary"]["description"] = display(Description("shap_summary"))
result["shap_summary"]["plots"] = H2OExplanation()
for tree_model in tree_models_to_show:
result["shap_summary"]["plots"][tree_model.model_id] = display(shap_summary_plot(
tree_model,
**_custom_args(
plot_overrides.get("shap_summary_plot"),
frame=frame,
figsize=figsize
)))
# PDP
if "pdp" in explanations:
if is_aml or multiple_models:
result["pdp"] = H2OExplanation()
result["pdp"]["header"] = display(Header("Partial Dependence Plots"))
result["pdp"]["description"] = display(Description("pdp"))
result["pdp"]["plots"] = H2OExplanation()
for column in columns_of_interest:
result["pdp"]["plots"][column] = H2OExplanation()
for target in targets:
pdp = display(pd_multi_plot(
models, column=column, target=target,
**_custom_args(plot_overrides.get("pdp"),
frame=frame,
figsize=figsize,
colormap=qualitative_colormap)))
if target is None:
result["pdp"]["plots"][column] = pdp
else:
result["pdp"]["plots"][column][target[0]] = pdp
else:
result["pdp"] = H2OExplanation()
result["pdp"]["header"] = display(Header("Partial Dependence Plots"))
result["pdp"]["description"] = display(Description("pdp"))
result["pdp"]["plots"] = H2OExplanation()
for column in columns_of_interest:
result["pdp"]["plots"][column] = H2OExplanation()
for target in targets:
fig = pd_plot(models_to_show[0], column=column, target=target,
**_custom_args(plot_overrides.get("pdp"),
frame=frame,
figsize=figsize,
colormap=qualitative_colormap))
if target is None:
result["pdp"]["plots"][column] = display(fig)
else:
result["pdp"]["plots"][column][target[0]] = display(fig)
# ICE
if "ice" in explanations and not classification:
result["ice"] = H2OExplanation()
result["ice"]["header"] = display(Header("Individual Conditional Expectation"))
result["ice"]["description"] = display(Description("ice"))
result["ice"]["plots"] = H2OExplanation()
for column in columns_of_interest:
result["ice"]["plots"][column] = H2OExplanation()
for model in models_to_show:
result["ice"]["plots"][column][model.model_id] = H2OExplanation()
for target in targets:
ice = display(
ice_plot(
model, column=column,
target=target,
**_custom_args(
plot_overrides.get("ice_plot"),
frame=frame,
figsize=figsize,
colormap=sequential_colormap
)))
if target is None:
result["ice"]["plots"][column][model.model_id] = ice
else:
result["ice"]["plots"][column][model.model_id][target[0]] = ice
return result
def explain_row(
models, # type: Union[h2o.automl._base.H2OAutoMLBaseMixin, List[h2o.model.ModelBase]]
frame, # type: h2o.H2OFrame
row_index, # type: int
columns=None, # type: Optional[Union[List[int], List[str]]]
top_n_features=5, # type: int
include_explanations="ALL", # type: Union[str, List[str]]
exclude_explanations=[], # type: Union[str, List[str]]
plot_overrides=dict(), # type: Dict
qualitative_colormap="Dark2", # type: str
figsize=(16, 9), # type: Tuple[float]
render=True, # type: bool
):
# type: (...) -> H2OExplanation
"""
Generate model explanations on frame data set for a given instance.
Explain the behavior of a model or group of models with respect to a single row of data.
The function returns an object containing explanations, such as a partial dependence plot
or a variable importance plot. Most of the explanations are visual (plots).
These plots can also be created by individual utility functions/methods as well.
:param models: H2OAutoML object, supervised H2O model, or list of supervised H2O models
:param frame: H2OFrame
:param row_index: row index of the instance to inspect
:param columns: either a list of columns or column indices to show. If specified
parameter top_n_features will be ignored.
:param top_n_features: a number of columns to pick using variable importance (where applicable).
:param include_explanations: if specified, return only the specified model explanations
(Mutually exclusive with exclude_explanations)
:param exclude_explanations: exclude specified model explanations
:param plot_overrides: overrides for individual model explanations
:param qualitative_colormap: a colormap name
:param figsize: figure size; passed directly to matplotlib
:param render: if True, render the model explanations; otherwise model explanations are just returned
:returns: H2OExplanation containing the model explanations including headers and descriptions
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>>
>>> h2o.init()
>>>
>>> # Import the wine dataset into H2O:
>>> f = "https://h2o-public-test-data.s3.amazonaws.com/smalldata/wine/winequality-redwhite-no-BOM.csv"
>>> df = h2o.import_file(f)
>>>
>>> # Set the response
>>> response = "quality"
>>>
>>> # Split the dataset into a train and test set:
>>> train, test = df.split_frame([0.8])
>>>
>>> # Train an H2OAutoML
>>> aml = H2OAutoML(max_models=10)
>>> aml.train(y=response, training_frame=train)
>>>
>>> # Create the H2OAutoML explanation
>>> aml.explain_row(test, row_index=0)
>>>
>>> # Create the leader model explanation
>>> aml.leader.explain_row(test, row_index=0)
"""
(is_aml, models_to_show, _, multinomial_classification, multiple_models,
targets, tree_models_to_show, models_with_varimp) = _process_models_input(models, frame)
if columns is not None and isinstance(columns, list):
columns_of_interest = [frame.columns[col] if isinstance(col, int) else col for col in columns]
else:
if len(models_with_varimp) > 0:
varimps = _consolidate_varimps(models_with_varimp[0])
columns_of_interest = sorted(varimps.keys(), key=lambda k: -varimps[k])[
:min(top_n_features, len(varimps))]
else:
import warnings
warnings.warn("No model with variable importance. Selecting all features to explain.")
columns_of_interest = _get_xy(models_to_show[0])[0]
# Make sure that there are no string columns to explain as they are not supported by pdp
# Usually those columns would not be used by algos so this just makes sure to exclude them
# if user specifies top_n=float('inf') or columns_of_interest=x etc.
dropped_string_columns = [col for col in columns_of_interest if frame.type(col) == "string"]
if len(dropped_string_columns) > 0:
warnings.warn("Dropping string columns as they are not supported: {}".format(dropped_string_columns))
columns_of_interest = [col for col in columns_of_interest if frame.type(col) != "string"]
possible_explanations = ["leaderboard", "shap_explain_row", "ice"]
explanations = _process_explanation_lists(
exclude_explanations=exclude_explanations,
include_explanations=include_explanations,
possible_explanations=possible_explanations
)
if render:
display = _display
else:
display = _dont_display
result = H2OExplanation()
if multiple_models and "leaderboard" in explanations:
result["leaderboard"] = H2OExplanation()
result["leaderboard"]["header"] = display(Header("Leaderboard"))
result["leaderboard"]["description"] = display(Description("leaderboard_row"))
result["leaderboard"]["data"] = display(_get_leaderboard(models, row_index=row_index,
**_custom_args(
plot_overrides.get("leaderboard"),
frame=frame)))
if len(tree_models_to_show) > 0 and not multinomial_classification and \
"shap_explain_row" in explanations:
result["shap_explain_row"] = H2OExplanation()
result["shap_explain_row"]["header"] = display(Header("SHAP Explanation"))
result["shap_explain_row"]["description"] = display(Description("shap_explain_row"))
for tree_model in tree_models_to_show:
result["shap_explain_row"][tree_model.model_id] = display(shap_explain_row_plot(
tree_model, row_index=row_index,
**_custom_args(plot_overrides.get("shap_explain_row"),
frame=frame, figsize=figsize)))
if "ice" in explanations and not multiple_models:
result["ice"] = H2OExplanation()
result["ice"]["header"] = display(Header("Individual Conditional Expectation"))
result["ice"]["description"] = display(Description("ice_row"))
result["ice"]["plots"] = H2OExplanation()
for column in columns_of_interest:
result["ice"]["plots"][column] = H2OExplanation()
for target in targets:
ice = display(pd_plot(
models_to_show[0], column=column,
row_index=row_index,
target=target,
**_custom_args(
plot_overrides.get("ice"),
frame=frame,
figsize=figsize,
colormap=qualitative_colormap
)))
if target is None:
result["ice"]["plots"][column] = ice
else:
result["ice"]["plots"][column][target[0]] = ice
return result
| 53,082 |
7,482 | /*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2019-01-25 winner micro
*/
#ifndef DRV_ONESHOT_H__
#define DRV_ONESHOT_H__
typedef enum{
WM_UDP,
WM_APSOCKET,
WM_APWEB
}WM_ONESHOT_MODE;
typedef void (*wm_oneshot_callback)(int state, unsigned char *ssid, unsigned char *key);
extern int wm_oneshot_start(WM_ONESHOT_MODE mode, wm_oneshot_callback callback);
extern int wm_oneshot_stop(void);
extern int wm_oneshot_get(void);
#endif
| 243 |
348 | {"nom":"Ousse","circ":"2ème circonscription","dpt":"Pyrénées-Atlantiques","inscrits":1257,"abs":528,"votants":729,"blancs":21,"nuls":5,"exp":703,"res":[{"nuance":"MDM","nom":"<NAME>","voix":309},{"nuance":"SOC","nom":"Mme <NAME>","voix":99},{"nuance":"FI","nom":"M. <NAME>","voix":75},{"nuance":"FN","nom":"M. <NAME>","voix":69},{"nuance":"LR","nom":"M. <NAME>","voix":69},{"nuance":"ECO","nom":"M. <NAME>","voix":27},{"nuance":"ECO","nom":"Mme <NAME>","voix":24},{"nuance":"DVD","nom":"M. <NAME>","voix":17},{"nuance":"EXG","nom":"M. <NAME>","voix":6},{"nuance":"DVD","nom":"M. <NAME>","voix":4},{"nuance":"DIV","nom":"Mme <NAME>","voix":3},{"nuance":"DIV","nom":"M. <NAME>","voix":1}]} | 283 |
16,989 | // Copyright 2018 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.skyframe.serialization;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.CodedOutputStream;
import java.io.IOException;
import java.io.Serializable;
import java.lang.invoke.SerializedLambda;
import java.lang.reflect.Method;
/**
* A codec for Java 8 serializable lambdas. Lambdas that are tagged as {@link Serializable} have a
* generated method, {@code writeReplace}, that converts them into a {@link SerializedLambda}, which
* can then be serialized like a normal object. On deserialization, we call {@link
* SerializedLambda#readResolve}, which converts the object back into a lambda.
*
* <p>Since lambdas do not share a common base class, choosing this codec for serializing them must
* be special-cased in {@link ObjectCodecRegistry}. We must also make a somewhat arbitrary choice
* around the generic parameter. Since all of our lambdas are {@link Serializable}, we use that.
* Because {@link Serializable} is an interface, not a class, this codec will never be chosen for
* any object without special-casing.
*/
class LambdaCodec implements ObjectCodec<Serializable> {
private final Method readResolveMethod;
LambdaCodec() {
try {
this.readResolveMethod = SerializedLambda.class.getDeclaredMethod("readResolve");
} catch (NoSuchMethodException e) {
throw new IllegalStateException(e);
}
readResolveMethod.setAccessible(true);
}
static boolean isProbablyLambda(Class<?> type) {
return type.isSynthetic() && !type.isLocalClass() && !type.isAnonymousClass();
}
@Override
public Class<? extends Serializable> getEncodedClass() {
return Serializable.class;
}
@Override
public void serialize(SerializationContext context, Serializable obj, CodedOutputStream codedOut)
throws SerializationException, IOException {
Class<?> objClass = obj.getClass();
if (!isProbablyLambda(objClass)) {
throw new SerializationException(obj + " is not a lambda: " + objClass);
}
Method writeReplaceMethod;
try {
// TODO(janakr): We could cache these methods if retrieval shows up as a hotspot.
writeReplaceMethod = objClass.getDeclaredMethod("writeReplace");
} catch (NoSuchMethodException e) {
throw new SerializationException(
"No writeReplace method for " + obj + " with " + objClass, e);
}
writeReplaceMethod.setAccessible(true);
SerializedLambda serializedLambda;
try {
serializedLambda = (SerializedLambda) writeReplaceMethod.invoke(obj);
} catch (ReflectiveOperationException e) {
throw new SerializationException(
"Exception invoking writeReplace for " + obj + " with " + objClass, e);
}
context.serialize(serializedLambda, codedOut);
}
@Override
public Serializable deserialize(DeserializationContext context, CodedInputStream codedIn)
throws SerializationException, IOException {
SerializedLambda serializedLambda = context.deserialize(codedIn);
try {
return (Serializable) readResolveMethod.invoke(serializedLambda);
} catch (ReflectiveOperationException e) {
throw new IllegalStateException("Error read-resolving " + serializedLambda, e);
}
}
}
| 1,183 |
5,272 | <reponame>JiangShoudong/iOS-
//
// LMJElementsFlowLayout.h
// 瀑布流完善接口
//
// Created by apple on 16/7/31.
// Copyright © 2016年 NJHu. All rights reserved.
//
#import <UIKit/UIKit.h>
@class LMJElementsFlowLayout;
@protocol LMJElementsFlowLayoutDelegate <NSObject>
@required
/**
* 要求实现
*
* @param waterflowLayout 哪个布局需要代理返回大小
* @param indexPath 对应的cell, 的indexPath, 但是indexPath.section == 0
*
* @return 需要代理高度对应的cell的高度
*/
- (CGSize)waterflowLayout:(LMJElementsFlowLayout *)waterflowLayout collectionView:(UICollectionView *)collectionView sizeForItemAtIndexPath:(NSIndexPath *)indexPath;
@optional
/**
* 列间距, 默认10
*/
- (CGFloat)waterflowLayout:(LMJElementsFlowLayout *)waterflowLayout collectionView:(UICollectionView *)collectionView columnsMarginForItemAtIndexPath:(NSIndexPath *)indexPath;
/**
* 行间距, 默认10
*/
- (CGFloat)waterflowLayout:(LMJElementsFlowLayout *)waterflowLayout collectionView:(UICollectionView *)collectionView linesMarginForItemAtIndexPath:(NSIndexPath *)indexPath;
/**
* 距离collectionView四周的间距, 默认{20, 10, 10, 10}
*/
- (UIEdgeInsets)waterflowLayout:(LMJElementsFlowLayout *)waterflowLayout edgeInsetsInCollectionView:(UICollectionView *)collectionView;
@end
@interface LMJElementsFlowLayout : UICollectionViewLayout
/** layout的代理 */
- (instancetype)initWithDelegate:(id<LMJElementsFlowLayoutDelegate>)delegate;
+ (instancetype)flowLayoutWithDelegate:(id<LMJElementsFlowLayoutDelegate>)delegate;
@end
| 613 |
879 | package org.zstack.test.utils;
import junit.framework.Assert;
import org.junit.Test;
import org.zstack.utils.network.NetworkUtils;
import java.util.concurrent.TimeUnit;
public class TestRemotePort {
@Test
public void test() {
boolean open = NetworkUtils.isRemotePortOpen("127.0.0.1", 22, (int) TimeUnit.SECONDS.toMillis(1));
Assert.assertTrue(open);
open = NetworkUtils.isRemotePortOpen("127.0.0.1", 3, (int) TimeUnit.SECONDS.toMillis(1));
Assert.assertFalse(open);
}
}
| 212 |
374 | /* Copyright (c) 2008-2009 <NAME>
Permission is hereby granted,free of charge,to any person obtaining a copy of this software and associated documentation files (the "Software"),to deal in the Software without restriction,including without limitation the rights to use,copy,modify,merge,publish,distribute,sublicense,and/or sell copies of the Software,and to permit persons to whom the Software is furnished to do so,subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS",WITHOUT WARRANTY OF ANY KIND,EXPRESS OR IMPLIED,INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,DAMAGES OR OTHER LIABILITY,WHETHER IN AN ACTION OF CONTRACT,TORT OR OTHERWISE,ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#import <CoreFoundation/CFBase.h>
#import <CoreFoundation/CFString.h>
#import <CoreFoundation/CFDictionary.h>
typedef struct __NSMutableAttributedString *CFAttributedStringRef;
typedef struct __NSMutableAttributedString *CFMutableAttributedStringRef;
COREFOUNDATION_EXPORT CFTypeID CFAttributedStringGetTypeID(void);
COREFOUNDATION_EXPORT CFAttributedStringRef CFAttributedStringCreate(CFAllocatorRef allocator, CFStringRef string, CFDictionaryRef attributes);
COREFOUNDATION_EXPORT CFAttributedStringRef CFAttributedStringCreateWithSubstring(CFAllocatorRef allocator, CFAttributedStringRef self, CFRange range);
COREFOUNDATION_EXPORT CFAttributedStringRef CFAttributedStringCreateCopy(CFAllocatorRef allocator, CFAttributedStringRef self);
COREFOUNDATION_EXPORT CFIndex CFAttributedStringGetLength(CFAttributedStringRef self);
COREFOUNDATION_EXPORT CFStringRef CFAttributedStringGetString(CFAttributedStringRef self);
COREFOUNDATION_EXPORT CFTypeRef CFAttributedStringGetAttribute(CFAttributedStringRef self, CFIndex location, CFStringRef name, CFRange *effectiveRange);
COREFOUNDATION_EXPORT CFTypeRef CFAttributedStringGetAttributeAndLongestEffectiveRange(CFAttributedStringRef self, CFIndex location, CFStringRef name, CFRange range, CFRange *longestEffectiveRange);
COREFOUNDATION_EXPORT CFDictionaryRef CFAttributedStringGetAttributes(CFAttributedStringRef self, CFIndex location, CFRange *effectiveRange);
COREFOUNDATION_EXPORT CFDictionaryRef CFAttributedStringGetAttributesAndLongestEffectiveRange(CFAttributedStringRef self, CFIndex location, CFRange range, CFRange *longestEffectiveRange);
// mutable
COREFOUNDATION_EXPORT CFMutableAttributedStringRef CFAttributedStringCreateMutable(CFAllocatorRef allocator, CFIndex maxLength);
COREFOUNDATION_EXPORT CFMutableAttributedStringRef CFAttributedStringCreateMutableCopy(CFAllocatorRef allocator, CFIndex maxLength, CFAttributedStringRef self);
COREFOUNDATION_EXPORT CFMutableStringRef CFAttributedStringGetMutableString(CFMutableAttributedStringRef self);
COREFOUNDATION_EXPORT void CFAttributedStringRemoveAttribute(CFMutableAttributedStringRef self, CFRange range, CFStringRef name);
COREFOUNDATION_EXPORT void CFAttributedStringReplaceAttributedString(CFMutableAttributedStringRef self, CFRange range, CFAttributedStringRef replacement);
COREFOUNDATION_EXPORT void CFAttributedStringReplaceString(CFMutableAttributedStringRef self, CFRange range, CFStringRef replacement);
COREFOUNDATION_EXPORT void CFAttributedStringSetAttribute(CFMutableAttributedStringRef self, CFRange range, CFStringRef name, CFTypeRef value);
COREFOUNDATION_EXPORT void CFAttributedStringSetAttributes(CFMutableAttributedStringRef self, CFRange range, CFDictionaryRef replacement, Boolean clearPreviousAttributes);
COREFOUNDATION_EXPORT void CFAttributedStringBeginEditing(CFMutableAttributedStringRef self);
COREFOUNDATION_EXPORT void CFAttributedStringEndEditing(CFMutableAttributedStringRef self);
| 1,121 |
461 | <reponame>t3zeng/mynewt-nimble
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "syscfg/syscfg.h"
#if MYNEWT_VAL(BLE_STORE_CONFIG_PERSIST)
#include <inttypes.h>
#include <string.h>
#include "sysinit/sysinit.h"
#include "host/ble_hs.h"
#include "config/config.h"
#include "base64/base64.h"
#include "store/config/ble_store_config.h"
#include "ble_store_config_priv.h"
static int
ble_store_config_conf_set(int argc, char **argv, char *val);
static int
ble_store_config_conf_export(void (*func)(char *name, char *val),
enum conf_export_tgt tgt);
static struct conf_handler ble_store_config_conf_handler = {
.ch_name = "ble_hs",
.ch_get = NULL,
.ch_set = ble_store_config_conf_set,
.ch_commit = NULL,
.ch_export = ble_store_config_conf_export
};
#define BLE_STORE_CONFIG_SEC_ENCODE_SZ \
BASE64_ENCODE_SIZE(sizeof (struct ble_store_value_sec))
#define BLE_STORE_CONFIG_SEC_SET_ENCODE_SZ \
(MYNEWT_VAL(BLE_STORE_MAX_BONDS) * BLE_STORE_CONFIG_SEC_ENCODE_SZ + 1)
#define BLE_STORE_CONFIG_CCCD_ENCODE_SZ \
BASE64_ENCODE_SIZE(sizeof (struct ble_store_value_cccd))
#define BLE_STORE_CONFIG_CCCD_SET_ENCODE_SZ \
(MYNEWT_VAL(BLE_STORE_MAX_CCCDS) * BLE_STORE_CONFIG_CCCD_ENCODE_SZ + 1)
static void
ble_store_config_serialize_arr(const void *arr, int obj_sz, int num_objs,
char *out_buf, int buf_sz)
{
int arr_size;
arr_size = obj_sz * num_objs;
assert(arr_size <= buf_sz);
base64_encode(arr, arr_size, out_buf, 1);
}
static int
ble_store_config_deserialize_arr(const char *enc,
void *out_arr,
int obj_sz,
int *out_num_objs)
{
int len;
len = base64_decode(enc, out_arr);
if (len < 0) {
return OS_EINVAL;
}
*out_num_objs = len / obj_sz;
return 0;
}
static int
ble_store_config_conf_set(int argc, char **argv, char *val)
{
int rc;
if (argc == 1) {
if (strcmp(argv[0], "our_sec") == 0) {
rc = ble_store_config_deserialize_arr(
val,
ble_store_config_our_secs,
sizeof *ble_store_config_our_secs,
&ble_store_config_num_our_secs);
return rc;
} else if (strcmp(argv[0], "peer_sec") == 0) {
rc = ble_store_config_deserialize_arr(
val,
ble_store_config_peer_secs,
sizeof *ble_store_config_peer_secs,
&ble_store_config_num_peer_secs);
return rc;
} else if (strcmp(argv[0], "cccd") == 0) {
rc = ble_store_config_deserialize_arr(
val,
ble_store_config_cccds,
sizeof *ble_store_config_cccds,
&ble_store_config_num_cccds);
return rc;
}
}
return OS_ENOENT;
}
static int
ble_store_config_conf_export(void (*func)(char *name, char *val),
enum conf_export_tgt tgt)
{
union {
char sec[BLE_STORE_CONFIG_SEC_SET_ENCODE_SZ];
char cccd[BLE_STORE_CONFIG_CCCD_SET_ENCODE_SZ];
} buf;
ble_store_config_serialize_arr(ble_store_config_our_secs,
sizeof *ble_store_config_our_secs,
ble_store_config_num_our_secs,
buf.sec,
sizeof buf.sec);
func("ble_hs/our_sec", buf.sec);
ble_store_config_serialize_arr(ble_store_config_peer_secs,
sizeof *ble_store_config_peer_secs,
ble_store_config_num_peer_secs,
buf.sec,
sizeof buf.sec);
func("ble_hs/peer_sec", buf.sec);
ble_store_config_serialize_arr(ble_store_config_cccds,
sizeof *ble_store_config_cccds,
ble_store_config_num_cccds,
buf.cccd,
sizeof buf.cccd);
func("ble_hs/cccd", buf.cccd);
return 0;
}
static int
ble_store_config_persist_sec_set(const char *setting_name,
const struct ble_store_value_sec *secs,
int num_secs)
{
char buf[BLE_STORE_CONFIG_SEC_SET_ENCODE_SZ];
int rc;
ble_store_config_serialize_arr(secs, sizeof *secs, num_secs,
buf, sizeof buf);
rc = conf_save_one(setting_name, buf);
if (rc != 0) {
return BLE_HS_ESTORE_FAIL;
}
return 0;
}
int
ble_store_config_persist_our_secs(void)
{
int rc;
rc = ble_store_config_persist_sec_set("ble_hs/our_sec",
ble_store_config_our_secs,
ble_store_config_num_our_secs);
if (rc != 0) {
return rc;
}
return 0;
}
int
ble_store_config_persist_peer_secs(void)
{
int rc;
rc = ble_store_config_persist_sec_set("ble_hs/peer_sec",
ble_store_config_peer_secs,
ble_store_config_num_peer_secs);
if (rc != 0) {
return rc;
}
return 0;
}
int
ble_store_config_persist_cccds(void)
{
char buf[BLE_STORE_CONFIG_CCCD_SET_ENCODE_SZ];
int rc;
ble_store_config_serialize_arr(ble_store_config_cccds,
sizeof *ble_store_config_cccds,
ble_store_config_num_cccds,
buf,
sizeof buf);
rc = conf_save_one("ble_hs/cccd", buf);
if (rc != 0) {
return BLE_HS_ESTORE_FAIL;
}
return 0;
}
void
ble_store_config_conf_init(void)
{
int rc;
rc = conf_register(&ble_store_config_conf_handler);
SYSINIT_PANIC_ASSERT_MSG(rc == 0,
"Failed to register ble_store_config conf");
}
#endif /* MYNEWT_VAL(BLE_STORE_CONFIG_PERSIST) */
| 3,704 |
463 | #!/usr/bin/env python3
import os
from t11 import gen11
from t22 import gen22
from t23 import gen23
from t36 import gen36
from t42 import gen42
from t51 import gen51
from t53 import gen53
from t54 import gen54
def make(c):
os.system('g++ %s.cpp -oa -O2; ./a; rm a' % (c))
if __name__ == '__main__':
gen11()
make('t13')
gen22()
gen23()
gen36()
gen42()
gen51()
gen53()
gen54()
| 164 |
1,338 | /*
* Copyright 2006, Haiku. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* <NAME> <<EMAIL>>
*/
#include "BitmapExporter.h"
#include <Bitmap.h>
#include <BitmapStream.h>
#include <TranslatorFormats.h>
#include <TranslatorRoster.h>
#include "Icon.h"
#include "IconRenderer.h"
// constructor
BitmapExporter::BitmapExporter(uint32 size)
: Exporter(),
fFormat(B_PNG_FORMAT),
fSize(size)
{
}
// destructor
BitmapExporter::~BitmapExporter()
{
}
// Export
status_t
BitmapExporter::Export(const Icon* icon, BPositionIO* stream)
{
if (fSize == 0)
return B_NO_INIT;
// render icon into bitmap with given size and transparent background
uint32 bitmapFlags = 0;
#if __HAIKU__
bitmapFlags |= B_BITMAP_NO_SERVER_LINK;
#endif
BBitmap bitmap(BRect(0, 0, fSize - 1, fSize - 1),
bitmapFlags, B_RGBA32);
status_t ret = bitmap.InitCheck();
if (ret < B_OK)
return ret;
IconRenderer renderer(&bitmap);
renderer.SetIcon(icon);
renderer.SetScale(fSize / 64.0);
renderer.Render();
// renderer.Demultiply(&bitmap);
// save bitmap to translator
BTranslatorRoster* roster = BTranslatorRoster::Default();
if (!roster)
return B_ERROR;
BBitmapStream bitmapStream(&bitmap);
ret = roster->Translate(&bitmapStream, NULL, NULL, stream, fFormat, 0);
BBitmap* dummy;
bitmapStream.DetachBitmap(&dummy);
return ret;
}
// MIMEType
const char*
BitmapExporter::MIMEType()
{
// TODO: ...
return "image/png";
}
| 575 |
665 | <reponame>apache/isis
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package demoapp.dom;
import org.springframework.boot.autoconfigure.domain.EntityScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.context.annotation.Profile;
import org.apache.isis.extensions.commandlog.jpa.IsisModuleExtCommandLogJpa;
import org.apache.isis.persistence.jpa.eclipselink.IsisModulePersistenceJpaEclipselink;
import demoapp.dom.domain.actions.Action.commandPublishing.jpa.ActionCommandPublishingJpa;
import demoapp.dom.domain.actions.Action.executionPublishing.jpa.ActionExecutionPublishingJpa;
import demoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.disabled.jpa.DomainObjectEntityChangePublishingDisabledJpa;
import demoapp.dom.domain.objects.DomainObject.entityChangePublishing.annotated.enabled.jpa.DomainObjectEntityChangePublishingEnabledJpa;
import demoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnot.enabled.jpa.DomainObjectEntityChangePublishingEnabledMetaAnnotatedJpa;
import demoapp.dom.domain.objects.DomainObject.entityChangePublishing.metaAnnotOverridden.enabled.jpa.DomainObjectEntityChangePublishingEnabledMetaAnnotOverriddenJpa;
import demoapp.dom.domain.objects.DomainObject.nature.viewmodels.jaxbrefentity.jpa.JaxbRefJpa;
import demoapp.dom.domain.objects.other.embedded.jpa.NumberConstantJpa;
import demoapp.dom.domain.properties.Property.commandPublishing.jpa.PropertyCommandPublishingJpa;
import demoapp.dom.domain.properties.Property.executionPublishing.jpa.PropertyExecutionPublishingJpa;
import demoapp.dom.domain.properties.Property.projecting.jpa.PropertyProjectingChildJpa;
import demoapp.dom.services.core.wrapperFactory.jpa.WrapperFactoryJpa;
import demoapp.dom.services.extensions.secman.apptenancy.jpa.TenantedJpa;
import demoapp.dom.types.isis.blobs.jpa.IsisBlobJpa;
import demoapp.dom.types.isis.clobs.jpa.IsisClobJpa;
import demoapp.dom.types.isis.localresourcepaths.jpa.IsisLocalResourcePathJpa;
import demoapp.dom.types.isis.markups.jpa.IsisMarkupJpa;
import demoapp.dom.types.isis.passwords.jpa.IsisPasswordJpa;
import demoapp.dom.types.isisext.asciidocs.jpa.IsisAsciiDocJpa;
import demoapp.dom.types.isisext.cal.jpa.IsisCalendarEventJpa;
import demoapp.dom.types.isisext.markdowns.jpa.IsisMarkdownJpa;
import demoapp.dom.types.javaawt.images.jpa.JavaAwtBufferedImageJpa;
import demoapp.dom.types.javalang.booleans.jpa.WrapperBooleanJpa;
import demoapp.dom.types.javalang.bytes.jpa.WrapperByteJpa;
import demoapp.dom.types.javalang.characters.jpa.WrapperCharacterJpa;
import demoapp.dom.types.javalang.doubles.jpa.WrapperDoubleJpa;
import demoapp.dom.types.javalang.floats.jpa.WrapperFloatJpa;
import demoapp.dom.types.javalang.integers.jpa.WrapperIntegerJpa;
import demoapp.dom.types.javalang.longs.jpa.WrapperLongJpa;
import demoapp.dom.types.javalang.shorts.jpa.WrapperShortJpa;
import demoapp.dom.types.javalang.strings.jpa.JavaLangStringJpa;
import demoapp.dom.types.javamath.bigdecimals.jpa.JavaMathBigDecimalJpa;
import demoapp.dom.types.javamath.bigintegers.jpa.JavaMathBigIntegerJpa;
import demoapp.dom.types.javanet.urls.jpa.JavaNetUrlJpa;
import demoapp.dom.types.javasql.javasqldate.jpa.JavaSqlDateJpa;
import demoapp.dom.types.javasql.javasqltimestamp.jpa.JavaSqlTimestampJpa;
import demoapp.dom.types.javatime.javatimelocaldate.jpa.JavaTimeLocalDateJpa;
import demoapp.dom.types.javatime.javatimelocaldatetime.jpa.JavaTimeLocalDateTimeJpa;
import demoapp.dom.types.javatime.javatimeoffsetdatetime.jpa.JavaTimeOffsetDateTimeJpa;
import demoapp.dom.types.javatime.javatimeoffsettime.jpa.JavaTimeOffsetTimeJpa;
import demoapp.dom.types.javatime.javatimezoneddatetime.jpa.JavaTimeZonedDateTimeJpa;
import demoapp.dom.types.javautil.javautildate.jpa.JavaUtilDateJpa;
import demoapp.dom.types.javautil.uuids.jpa.JavaUtilUuidJpa;
import demoapp.dom.types.primitive.booleans.jpa.PrimitiveBooleanJpa;
import demoapp.dom.types.primitive.bytes.jpa.PrimitiveByteJpa;
import demoapp.dom.types.primitive.chars.jpa.PrimitiveCharJpa;
import demoapp.dom.types.primitive.doubles.jpa.PrimitiveDoubleJpa;
import demoapp.dom.types.primitive.floats.jpa.PrimitiveFloatJpa;
import demoapp.dom.types.primitive.ints.jpa.PrimitiveIntJpa;
import demoapp.dom.types.primitive.longs.jpa.PrimitiveLongJpa;
import demoapp.dom.types.primitive.shorts.jpa.PrimitiveShortJpa;
@Configuration
@Profile("demo-jpa")
@Import({
DemoModuleCommon.class,
IsisModulePersistenceJpaEclipselink.class,
IsisModuleExtCommandLogJpa.class,
})
@EntityScan(basePackageClasses = {
IsisBlobJpa.class,
IsisClobJpa.class,
IsisLocalResourcePathJpa.class,
IsisMarkupJpa.class,
IsisPasswordJpa.class,
IsisAsciiDocJpa.class,
IsisMarkdownJpa.class,
IsisCalendarEventJpa.class,
JavaAwtBufferedImageJpa.class,
JavaLangStringJpa.class,
JavaMathBigDecimalJpa.class,
JavaMathBigIntegerJpa.class,
JavaNetUrlJpa.class,
JavaSqlDateJpa.class,
JavaSqlTimestampJpa.class,
JavaTimeLocalDateJpa.class,
JavaTimeLocalDateTimeJpa.class,
JavaTimeOffsetDateTimeJpa.class,
JavaTimeOffsetTimeJpa.class,
JavaTimeZonedDateTimeJpa.class,
JavaUtilDateJpa.class,
JavaUtilUuidJpa.class,
PrimitiveBooleanJpa.class,
PrimitiveDoubleJpa.class,
PrimitiveFloatJpa.class,
PrimitiveCharJpa.class,
PrimitiveLongJpa.class,
PrimitiveIntJpa.class,
PrimitiveShortJpa.class,
PrimitiveByteJpa.class,
WrapperBooleanJpa.class,
WrapperDoubleJpa.class,
WrapperFloatJpa.class,
WrapperCharacterJpa.class,
WrapperLongJpa.class,
WrapperIntegerJpa.class,
WrapperShortJpa.class,
WrapperByteJpa.class,
TenantedJpa.class,
WrapperFactoryJpa.class,
DomainObjectEntityChangePublishingDisabledJpa.class,
DomainObjectEntityChangePublishingEnabledJpa.class,
DomainObjectEntityChangePublishingEnabledMetaAnnotatedJpa.class,
DomainObjectEntityChangePublishingEnabledMetaAnnotOverriddenJpa.class,
ActionCommandPublishingJpa.class,
ActionExecutionPublishingJpa.class,
PropertyCommandPublishingJpa.class,
PropertyExecutionPublishingJpa.class,
PropertyProjectingChildJpa.class,
JaxbRefJpa.class,
NumberConstantJpa.class,
})
public class DemoModuleJpa {
}
| 2,754 |
1,165 | /*******************************************************************************
* Copyright 2018 T Mobile, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
******************************************************************************/
package com.tmobile.pacman.api.compliance.service;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.runners.MockitoJUnitRunner;
import com.tmobile.pacman.api.commons.repo.ElasticSearchRepository;
import com.tmobile.pacman.api.compliance.repository.FAQRepository;
@RunWith(MockitoJUnitRunner.class)
public class FAQServiceImplTest {
@InjectMocks
private FAQServiceImpl faqServiceImpl;
@Mock
private ElasticSearchRepository elasticSearchRepository;
@Mock
private FAQRepository repository;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
}
@Test
public void getFAQSByWidgetTest() throws Exception {
Map<String, Object> targetTypesMap = new HashMap<>();
targetTypesMap.put("faqid", "q5");
targetTypesMap.put("tag", "vulnerabilities");
List<Map<String, Object>> maintargetTypesList = new ArrayList<>();
maintargetTypesList.add(targetTypesMap);
when(repository.getFAQSFromEs(anyString(), anyString()))
.thenReturn(maintargetTypesList);
when(repository.getRelevantFAQSFromEs(
anyString(), anyString(), anyObject(),
anyObject(), anyObject()))
.thenReturn(maintargetTypesList);
assertThat(faqServiceImpl.getFAQSByWidget("aws-all", ""),
is(notNullValue()));
}
}
| 1,040 |
454 | package io.vertx.up.secure.bridge;
/**
* @author <a href="http://www.origin-x.cn">Lang</a>
*/
interface Info {
String AUTH_401_METHOD = "[ Auth ] Your `@Wall` class missed @Authenticate method ! {0}";
String AUTH_401_SERVICE = "[ Auth ] Your `Lee` in service-loader /META-INF/services/ is missing....";
String AUTH_401_HANDLER = "[ Auth ] You have configured secure, but the authenticate handler is null! type = {0}";
}
| 148 |
895 | <reponame>JKot-Coder/slang
//
// The file is meant to be included multiple times, to produce different
// pieces of declaration/definition code related to diagnostic messages
//
// Each diagnostic is declared here with:
//
// DIAGNOSTIC(id, severity, name, messageFormat)
//
// Where `id` is the unique diagnostic ID, `severity` is the default
// severity (from the `Severity` enum), `name` is a name used to refer
// to this diagnostic from code, and `messageFormat` is the default
// (non-localized) message for the diagnostic, with placeholders
// for any arguments.
#ifndef DIAGNOSTIC
#error Need to #define DIAGNOSTIC(...) before including "test-server-diagnostics-defs.h"
#define DIAGNOSTIC(id, severity, name, messageFormat) /* */
#endif
DIAGNOSTIC(100000, Error, unableToLoadSharedLibrary, "Unable to load shared library '$0'")
DIAGNOSTIC(100001, Error, unableToFindFunctionInSharedLibrary, "Unable to find function '$0' in shared library")
DIAGNOSTIC(100002, Error, unableToGetUnitTestModule, "Unable to get unit test module")
DIAGNOSTIC(100003, Error, unableToFindTest, "Unable to find test '$0'")
DIAGNOSTIC(100004, Error, unableToFindUnitTestModule, "Unable to find unit test module '$0'")
#undef DIAGNOSTIC
| 388 |
8,747 | SOC_IRAM_LOW = 0x40020000
SOC_IRAM_HIGH = 0x40070000
SOC_DRAM_LOW = 0x3ffb0000
SOC_DRAM_HIGH = 0x40000000
SOC_RTC_DRAM_LOW = 0x3ff9e000
SOC_RTC_DRAM_HIGH = 0x3ffa0000
SOC_RTC_DATA_LOW = 0x50000000
SOC_RTC_DATA_HIGH = 0x50002000
| 131 |
400 | <gh_stars>100-1000
#ifndef OS_MODULES_H
#define OS_MODULES_H
#include "interrupt.h"
extern InterruptManager interruptManager;
#endif | 54 |
521 | /**
* @file tests/utils/address_tests.cpp
* @brief Tests for the @c address module.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
#include <gtest/gtest.h>
#include "retdec/utils/address.h"
using namespace ::testing;
namespace retdec {
namespace utils {
namespace tests {
/**
* @brief Tests for the @c Address class.
*/
class AddressTests: public Test
{
};
TEST_F(AddressTests, UninitializedAddressIsUndefined)
{
Address a;
EXPECT_TRUE(a.isUndefined());
EXPECT_FALSE(a.isDefined());
}
TEST_F(AddressTests, InitializedAddressIsDefined)
{
Address a(1234);
EXPECT_FALSE(a.isUndefined());
EXPECT_TRUE(a.isDefined());
}
TEST_F(AddressTests, stringCtorPrefixHexa)
{
Address a("0x1234");
EXPECT_FALSE(a.isUndefined());
EXPECT_TRUE(a.isDefined());
EXPECT_EQ(0x1234, a.getValue());
}
TEST_F(AddressTests, stringCtorNoPrefixDecimal)
{
Address a("1234");
EXPECT_FALSE(a.isUndefined());
EXPECT_TRUE(a.isDefined());
EXPECT_EQ(1234, a.getValue());
}
TEST_F(AddressTests, stringCtorBadIsUndefined1)
{
Address a("");
EXPECT_TRUE(a.isUndefined());
}
TEST_F(AddressTests, stringCtorBadIsUndefined2)
{
Address a("0x");
EXPECT_TRUE(a.isUndefined());
}
TEST_F(AddressTests, stringCtorBadIsUndefined3)
{
Address a("jak55");
EXPECT_TRUE(a.isUndefined());
}
TEST_F(AddressTests, stringCtorBadIsUndefined4)
{
Address a("55jak");
EXPECT_TRUE(a.isUndefined());
}
TEST_F(AddressTests, stringCtorBadIsUndefined5)
{
Address a("0xjak55");
EXPECT_TRUE(a.isUndefined());
}
TEST_F(AddressTests, stringCtorBadIsUndefined6)
{
Address a("0x55 jak");
EXPECT_TRUE(a.isUndefined());
}
TEST_F(AddressTests, AssignmentWorks)
{
unsigned val = 1234;
Address a = val;
EXPECT_FALSE(a.isUndefined());
EXPECT_TRUE(a.isDefined());
EXPECT_TRUE(a == val);
}
TEST_F(AddressTests, IncrementationWorks)
{
unsigned val = 1234;
Address a(val);
a++;
val++;
EXPECT_TRUE(a == val);
++a;
++val;
EXPECT_TRUE(a == val);
}
TEST_F(AddressTests, DecrementationWorks)
{
unsigned val = 1234;
Address a(val);
a--;
val--;
EXPECT_TRUE(a == val);
--a;
--val;
EXPECT_TRUE(a == val);
}
TEST_F(AddressTests, toHexString)
{
EXPECT_EQ("12ab", Address(0x12ab).toHexString());
}
TEST_F(AddressTests, toHexPrefixString)
{
EXPECT_EQ("0x12ab", Address(0x12ab).toHexPrefixString());
}
/**
* @brief Tests for the @c AddressRange class.
*/
class AddressRangeTests: public Test
{
};
TEST_F(AddressRangeTests, DefaultCtorUndefValues)
{
AddressRange r;
EXPECT_TRUE(r.getStart().isUndefined());
EXPECT_TRUE(r.getEnd().isUndefined());
}
TEST_F(AddressRangeTests, CtorOnlyFirstValue)
{
AddressRange r(10);
EXPECT_EQ(10, r.getStart());
EXPECT_TRUE(r.getEnd().isUndefined());
}
TEST_F(AddressRangeTests, CtorBothValues)
{
AddressRange r(10, 20);
EXPECT_EQ(10, r.getStart());
EXPECT_EQ(20, r.getEnd());
}
TEST_F(AddressRangeTests, CtorFromString)
{
AddressRange r("0x1234-0x5678");
EXPECT_EQ(0x1234, r.getStart());
EXPECT_EQ(0x5678, r.getEnd());
}
TEST_F(AddressRangeTests, ComparisonWorks)
{
AddressRange p1(10, 20);
AddressRange p2(10, 20);
AddressRange p3(10, 30);
AddressRange p4(50, 100);
AddressRange p5(100, 200);
EXPECT_TRUE(p1 == p2);
EXPECT_FALSE(p1 != p2);
EXPECT_TRUE(p1 != p3);
EXPECT_TRUE(p2 != p3);
EXPECT_TRUE(p1 != p4);
EXPECT_FALSE(p1 == p4);
EXPECT_TRUE(p1 < p4);
EXPECT_TRUE(p4 < p5);
EXPECT_TRUE(p1 < p5);
}
TEST_F(AddressRangeTests, ContainsWorks)
{
unsigned start = 10;
unsigned end = 100;
AddressRange p(start, end);
EXPECT_FALSE( p.contains( start-1 ) );
EXPECT_TRUE( p.contains( start ) );
EXPECT_TRUE( p.contains( (start+end)/2 ) );
EXPECT_TRUE( p.contains( end-1 ) );
EXPECT_FALSE( p.contains( end ) );
EXPECT_FALSE( p.contains( end+1 ) );
}
/**
* @brief Tests for the @c AddressRangeContainer class.
*/
class AddressRangeContainerTests: public Test
{
};
TEST_F(AddressRangeContainerTests, NewContainerIsEmpty)
{
AddressRangeContainer c;
EXPECT_TRUE(c.empty()) << c;
EXPECT_EQ(0, c.size()) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeNonOverlapping)
{
AddressRangeContainer c;
auto r1 = c.insert(AddressRange(10, 20));
auto r2 = c.insert(AddressRange(30, 40));
auto r3 = c.insert(AddressRange(50, 60));
EXPECT_FALSE(c.empty()) << c;
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(r1.second) << c;
EXPECT_EQ(AddressRange(10, 20), *r1.first) << c;
EXPECT_TRUE(r2.second) << c;
EXPECT_EQ(AddressRange(30, 40), *r2.first) << c;
EXPECT_TRUE(r3.second) << c;
EXPECT_EQ(AddressRange(50, 60), *r3.first) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeFullyInOldRange)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40);
c.insert(0x100, 0x500); // should not be affected
auto r = c.insert(0x20, 0x30);
EXPECT_EQ(3, c.size()) << c;
EXPECT_FALSE(r.second) << c;
EXPECT_EQ(AddressRange(0x10, 0x40), *r.first) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeFullyInNewRangeOne)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x20, 0x40);
c.insert(0x100, 0x500); // should not be affected
auto r = c.insert(0x10, 0x60);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(r.second) << c;
EXPECT_EQ(AddressRange(0x10, 0x60), *r.first) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeMergeWithStart)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40);
c.insert(0x100, 0x500); // should not be affected
auto r = c.insert(0x20, 0x60);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(r.second) << c;
EXPECT_EQ(AddressRange(0x10, 0x60), *r.first) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeMergeWithEnd)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x20, 0x40);
c.insert(0x100, 0x500); // should not be affected
auto r = c.insert(0x10, 0x30);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(r.second) << c;
EXPECT_EQ(AddressRange(0x10, 0x40), *r.first) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeMergeMultiple)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x7, 0x20);
c.insert(0x30, 0x40);
c.insert(0x60, 0x70);
c.insert(0x80, 0x95);
c.insert(0x100, 0x500); // should not be affected
auto r = c.insert(0x10, 0x90);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(r.second) << c;
EXPECT_EQ(AddressRange(0x7, 0x95), *r.first) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeMergeMultipleInside)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x15, 0x20);
c.insert(0x30, 0x40);
c.insert(0x60, 0x70);
c.insert(0x80, 0x85);
c.insert(0x100, 0x500); // should not be affected
auto r = c.insert(0x10, 0x90);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(r.second) << c;
EXPECT_EQ(AddressRange(0x10, 0x90), *r.first) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, InsertRangeMergeBordering)
{
AddressRangeContainer c;
c.insert(0x6, 0x16);
c.insert(0x0, 0x6);
c.insert(0x16, 0x100);
EXPECT_EQ(1, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x100))) << c;
}
TEST_F(AddressRangeContainerTests, operatorEqEmpty)
{
AddressRangeContainer c;
EXPECT_EQ(AddressRangeContainer(), c);
}
TEST_F(AddressRangeContainerTests, operatorEq)
{
AddressRangeContainer c;
c.insert(0x10, 0x20);
c.insert(0x30, 0x40);
AddressRangeContainer r;
r.insert(0x10, 0x20);
r.insert(0x30, 0x40);
EXPECT_EQ(r, c);
}
TEST_F(AddressRangeContainerTests, operatorNeq)
{
AddressRangeContainer c;
c.insert(0x10, 0x20);
c.insert(0x30, 0x40);
AddressRangeContainer r;
r.insert(0x10, 0x20);
r.insert(0x35, 0x40);
EXPECT_NE(r, c);
}
TEST_F(AddressRangeContainerTests, containsGetRange)
{
AddressRangeContainer c;
c.insert(0x10, 0x20);
c.insert(0x30, 0x40);
EXPECT_TRUE(c.contains(0x10));
EXPECT_EQ(AddressRange(0x10, 0x20), *c.getRange(0x10));
EXPECT_TRUE(c.contains(0x15));
EXPECT_EQ(AddressRange(0x10, 0x20), *c.getRange(0x15));
EXPECT_TRUE(c.contains(0x19));
EXPECT_EQ(AddressRange(0x10, 0x20), *c.getRange(0x19));
EXPECT_TRUE(c.contains(0x30));
EXPECT_EQ(AddressRange(0x30, 0x40), *c.getRange(0x30));
EXPECT_FALSE(c.contains(0x0));
EXPECT_EQ(nullptr, c.getRange(0x0));
EXPECT_FALSE(c.contains(0x5));
EXPECT_EQ(nullptr, c.getRange(0x5));
EXPECT_FALSE(c.contains(0x9));
EXPECT_EQ(nullptr, c.getRange(0x9));
EXPECT_FALSE(c.contains(0x21));
EXPECT_EQ(nullptr, c.getRange(0x21));
EXPECT_FALSE(c.contains(0x29));
EXPECT_EQ(nullptr, c.getRange(0x29));
EXPECT_FALSE(c.contains(0x41));
EXPECT_EQ(nullptr, c.getRange(0x41));
}
TEST_F(AddressRangeContainerTests, containsExact)
{
AddressRangeContainer c;
c.insert(0x10, 0x20);
EXPECT_TRUE(c.containsExact(AddressRange(0x10, 0x20)));
EXPECT_FALSE(c.containsExact(AddressRange(0x10, 0x19)));
EXPECT_FALSE(c.containsExact(AddressRange(0x11, 0x20)));
EXPECT_FALSE(c.containsExact(AddressRange(0x15, 0x17)));
EXPECT_FALSE(c.containsExact(AddressRange(0x20, 0x21)));
}
TEST_F(AddressRangeContainerTests, RemoveRangeMiss)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40); // should not be affected
c.insert(0x100, 0x500); // should not be affected
c.remove(0x50, 0x90);
EXPECT_FALSE(c.empty()) << c;
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x10, 0x40))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveExactlyOldRange)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x10, 0x40);
EXPECT_EQ(2, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemovedRangeFullyInNewExistingRange)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x60);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x20, 0x40);
EXPECT_EQ(4, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x10, 0x20))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x40, 0x60))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemovedRangeFullyInNewExistingRangeLeave1Ranges)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x60);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x11, 0x5f);
EXPECT_EQ(4, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x10, 0x11))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x5f, 0x60))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveRangeFromStart)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x10, 0x20);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x20, 0x40))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveRangeFromStartLeave1Range)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x10, 0x3f);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x3f, 0x40))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveRangeFromEnd)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x20, 0x40);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x10, 0x20))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveRangeFromEndLeave1Range)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x40);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x11, 0x40);
EXPECT_EQ(3, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x10, 0x11))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveRangeMultipleOutside)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x20);
c.insert(0x30, 0x40);
c.insert(0x60, 0x70);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x16, 0x64);
EXPECT_EQ(4, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x10, 0x16))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x64, 0x70))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveRangeMultipleInside)
{
AddressRangeContainer c;
c.insert(0x0, 0x5); // should not be affected
c.insert(0x10, 0x20);
c.insert(0x30, 0x40);
c.insert(0x60, 0x70);
c.insert(0x100, 0x500); // should not be affected
c.remove(0x7, 0x90);
EXPECT_EQ(2, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x0, 0x5))) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x100, 0x500))) << c;
}
TEST_F(AddressRangeContainerTests, RemoveAndGetRange)
{
AddressRangeContainer c;
c.insert(0x0, 0x100);
c.remove(0x0, 0x50);
EXPECT_EQ(1, c.size()) << c;
EXPECT_TRUE(c.containsExact(AddressRange(0x50, 0x100))) << c;
EXPECT_EQ(AddressRange(0x50, 0x100), *c.getRange(0x50));
EXPECT_EQ(AddressRange(0x50, 0x100), *c.getRange(0x60));
EXPECT_EQ(AddressRange(0x50, 0x100), *c.getRange(0xff));
}
} // namespace tests
} // namespace utils
} // namespace retdec
| 6,692 |
713 | package org.infinispan.notifications.cachelistener.event;
/**
* This event subtype is passed in to any method annotated with
* {@link org.infinispan.notifications.cachelistener.annotation.CacheEntryPassivated}.
*
* @author <NAME>
* @author <NAME>
* @since 5.0
*/
public interface CacheEntryPassivatedEvent<K, V> extends CacheEntryEvent<K, V> {
/**
* Retrieves the value of the entry being passivated.
* <p />
* @return the value of the entry being passivated, if <tt>isPre()</tt> is <tt>true</tt>. <tt>null</tt> otherwise.
*/
V getValue();
}
| 198 |
1,382 | /*
* Copyright (c) 2007 - 2015 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
//
// autotest fft data for 10-point transform
//
#include <complex.h>
float complex fft_test_x10[] = {
-0.380648737020 + 1.003981780953*_Complex_I,
1.031511152163 + -2.625896014009*_Complex_I,
-1.083239396623 + 1.646877001105*_Complex_I,
0.951587457487 + -0.004983138281*_Complex_I,
0.407589360084 + 0.345698641918*_Complex_I,
0.549291472049 + 0.542579734652*_Complex_I,
-0.911825526748 + 1.282009726257*_Complex_I,
-0.617849040964 + 0.696673367751*_Complex_I,
1.097501043733 + 1.373947311009*_Complex_I,
0.848713422957 + -0.738252787172*_Complex_I};
float complex fft_test_y10[] = {
1.892631207117 + 3.522635624182*_Complex_I,
-1.167216826866 + -3.158947047615*_Complex_I,
-0.019614668329 + 1.291770408491*_Complex_I,
-3.842057814631 + -1.668342848977*_Complex_I,
-2.323082893679 + 1.200058008683*_Complex_I,
-3.633877720265 + 7.782393298301*_Complex_I,
0.649048218143 + 6.997144832999*_Complex_I,
1.025423884758 + 1.907666229150*_Complex_I,
0.644231811894 + -5.278801296330*_Complex_I,
2.968027431661 + -2.555759399357*_Complex_I};
| 912 |
2,003 | <filename>src/sdk/sdk_perf.h
// Copyright (c) 2015, Baidu.com, Inc. All Rights Reserved
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TERA_SDK_SDK_PERF_H_
#define TERA_SDK_SDK_PERF_H_
#include <thread>
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "common/metric/metric_counter.h"
#include "common/metric/collector_report.h"
#include "common/this_thread.h"
#include "tera.h"
DECLARE_int32(tera_sdk_perf_collect_interval);
namespace tera {
namespace sdk {
class PerfCollecter {
public:
PerfCollecter() : stopped_(false) {}
~PerfCollecter() {}
void Run() { thread_ = std::thread{&PerfCollecter::ScheduleCollect, this}; }
void Stop() {
stopped_ = true;
thread_.join();
}
private:
void ScheduleCollect() {
while (!stopped_) {
CollectorReportPublisher::GetInstance().Refresh();
DumpLog();
ThisThread::Sleep(FLAGS_tera_sdk_perf_collect_interval);
}
}
void DumpLog();
private:
std::thread thread_;
bool stopped_;
};
} // namespace sdk
} // namespace tera
#endif // TERA_SDK_SDK_PERF_H_
| 448 |
2,542 | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
using namespace Reliability;
using namespace Reliability::ReconfigurationAgentComponent;
using namespace Common;
using namespace std;
using namespace ReliabilityUnitTest;
using namespace Infrastructure;
namespace
{
std::wstring DefaultActivityIdPrefix(L"foo");
}
class TimerHolder
{
DENY_COPY(TimerHolder);
public:
TimerHolder(unique_ptr<RetryTimer> && timer)
: timer_(move(timer))
{
}
~TimerHolder()
{
if (timer_)
{
timer_->Close();
}
}
__declspec(property(get=get_Timer)) RetryTimer & Timer;
RetryTimer & get_Timer() { return *timer_; }
private:
unique_ptr<RetryTimer> timer_;
};
DWORD ShortTimerDurationInMs = 10;
DWORD MiddleTimerDurationInMs = 100;
class TestRetryTimer
{
protected:
TestRetryTimer() { BOOST_REQUIRE(TestSetup()); }
TEST_METHOD_SETUP(TestSetup);
~TestRetryTimer() { BOOST_REQUIRE(TestCleanup()); }
TEST_METHOD_CLEANUP(TestCleanup);
unique_ptr<TimerHolder> CreateTimerHolderEx(DWORD intervalInMs, RetryTimer::RetryTimerCallback const & callback)
{
testContext_->Config.FMMessageRetryInterval = TimeSpan::FromMilliseconds(intervalInMs);
auto timer = make_unique<RetryTimer>(
L"a",
testContext_->RA,
testContext_->Config.FMMessageRetryIntervalEntry,
DefaultActivityIdPrefix,
callback);
return make_unique<TimerHolder>(move(timer));
}
unique_ptr<TimerHolder> CreateTimerHolder(DWORD intervalInMs, std::function<void ()> const & callback)
{
return CreateTimerHolderEx(intervalInMs, [=] (wstring const&, ReconfigurationAgent&) { callback(); });
}
unique_ptr<UnitTestContext> testContext_;
};
bool TestRetryTimer::TestSetup()
{
testContext_ = UnitTestContext::Create();
return true;
}
bool TestRetryTimer::TestCleanup()
{
testContext_->Cleanup();
return true;
}
BOOST_AUTO_TEST_SUITE(Functional)
BOOST_FIXTURE_TEST_SUITE(TestRetryTimerSuite,TestRetryTimer)
BOOST_AUTO_TEST_CASE(OnConstructSequenceNumberIsZero)
{
auto holder = CreateTimerHolder(ShortTimerDurationInMs, [] { });
RetryTimer & timer = holder->Timer;
VERIFY_ARE_EQUAL(0, timer.SequenceNumber);
timer.Close();
}
BOOST_AUTO_TEST_CASE(OnConstructTimerIsNotSet)
{
auto holder = CreateTimerHolder(ShortTimerDurationInMs, [] { });
RetryTimer & timer = holder->Timer;
VERIFY_IS_FALSE(timer.IsSet);
timer.Close();
}
BOOST_AUTO_TEST_CASE(SetTimerInvokesCallback)
{
Common::atomic_bool isInvoked(false);
auto holder = CreateTimerHolder(ShortTimerDurationInMs, [&isInvoked]
{
isInvoked.store(true);
});
RetryTimer & timer = holder->Timer;
timer.Set();
BusyWaitUntil([&isInvoked] { return isInvoked.load(); });
timer.Close();
}
BOOST_AUTO_TEST_CASE(SetTimerIncrementsSequenceNumber)
{
auto holder = CreateTimerHolder(ShortTimerDurationInMs, [] {});
RetryTimer & timer = holder->Timer;
auto initialSequenceNumber = timer.SequenceNumber;
timer.Set();
auto finalSequenceNumber = timer.SequenceNumber;
timer.Close();
VERIFY_ARE_EQUAL(1, finalSequenceNumber - initialSequenceNumber);
}
BOOST_AUTO_TEST_CASE(SetTimerSetsIsSetToTrue)
{
auto holder = CreateTimerHolder(ShortTimerDurationInMs, [] {});
RetryTimer & timer = holder->Timer;
timer.Set();
VERIFY_IS_TRUE(timer.IsSet);
timer.Close();
}
BOOST_AUTO_TEST_CASE(TimerIsNotSetInCallback)
{
RetryTimer* retryTimer = nullptr;
bool timerSetValueInCallback = false;
Common::atomic_bool isInvoked(false);
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, [&timerSetValueInCallback, &retryTimer, &isInvoked]
{
timerSetValueInCallback = retryTimer->IsSet;
isInvoked.store(true);
});
RetryTimer & timer = holder->Timer;
retryTimer = &timer;
timer.Set();
BusyWaitUntil([&] () { return isInvoked.load(); });
VERIFY_IS_FALSE(timerSetValueInCallback);
timer.Close();
}
BOOST_AUTO_TEST_CASE(CallbackIsInvokedOnlyOnceRegardlessOfSetCount)
{
Common::atomic_long value(0);
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, [&value]
{
value++;
});
RetryTimer & timer = holder->Timer;
timer.Set();
timer.Set();
BusyWaitUntil([&] () { return 1 == value.load(); });
VERIFY_ARE_EQUAL(1, value.load());
timer.Close();
}
BOOST_AUTO_TEST_CASE(TryCancelWithEqualSequenceNumberCancelsCallback)
{
Common::atomic_bool isInvoked(false);
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, [&isInvoked]
{
isInvoked.store(true);
});
RetryTimer & timer = holder->Timer;
timer.Set();
VERIFY_IS_TRUE(timer.IsSet);
VERIFY_IS_TRUE(timer.TryCancel(timer.SequenceNumber));
VERIFY_IS_FALSE(timer.IsSet);
Sleep(2 * MiddleTimerDurationInMs);
// the isInvoked should be false
VERIFY_IS_FALSE(isInvoked.load());
timer.Close();
}
BOOST_AUTO_TEST_CASE(TryCancelWithLowerSequenceNumberDoesNotCancelCallback)
{
Common::atomic_bool isInvoked(false);
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, [&isInvoked]
{
isInvoked.store(true);
});
RetryTimer & timer = holder->Timer;
timer.Set();
VERIFY_IS_TRUE(timer.IsSet);
VERIFY_IS_FALSE(timer.TryCancel(timer.SequenceNumber - 1));
VERIFY_IS_TRUE(timer.IsSet);
BusyWaitUntil([&isInvoked] { return isInvoked.load(); }, 20, MiddleTimerDurationInMs * 10);
timer.Close();
}
BOOST_AUTO_TEST_CASE(SettingTheTimerInTheCallbackWorks)
{
RetryTimer* timerPtr = nullptr;
Common::atomic_long count(0);
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, [&count, &timerPtr]
{
count++;
if (count.load() == 1)
{
timerPtr->Set();
}
});
RetryTimer & timer = holder->Timer;
timerPtr = &timer;
timer.Set();
BusyWaitUntil([&count] { return count.load() == 2; });
timer.Close();
}
BOOST_AUTO_TEST_CASE(SetTimerThenCancelAndThenSetFiresTimer)
{
Common::atomic_bool isInvoked(false);
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, [&isInvoked]
{
isInvoked.store(true);
});
RetryTimer & timer = holder->Timer;
// Set the timer
timer.Set();
// Cancel the timer
timer.TryCancel(timer.SequenceNumber);
// call back cant have been invoked
VERIFY_IS_FALSE(isInvoked.load());
// Set the timer again
timer.Set();
// callback should be invoked
BusyWaitUntil([&isInvoked] { return isInvoked.load();});
timer.Close();
}
BOOST_AUTO_TEST_CASE(SetIsNoOpAfterClosingTimer)
{
Common::atomic_bool isInvoked(false);
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, [&isInvoked]
{
isInvoked.store(true);
});
RetryTimer & timer = holder->Timer;
// Close the timer
timer.Close();
// set
timer.Set();
VERIFY_IS_FALSE(timer.IsSet);
// wait
Sleep(MiddleTimerDurationInMs * 4);
// assert that cb was not invoked
VERIFY_IS_FALSE(isInvoked.load());
}
BOOST_AUTO_TEST_CASE(CloseSetsTimerToNull)
{
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, []
{
});
RetryTimer & timer = holder->Timer;
timer.Close();
VERIFY_IS_NULL(timer.Test_GetRawTimer());
}
BOOST_AUTO_TEST_CASE(SuccessfulTryCancelAfterCloseDoesNotRecreateTimer)
{
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, []
{
});
RetryTimer & timer = holder->Timer;
timer.Close();
timer.TryCancel(timer.SequenceNumber);
VERIFY_IS_FALSE(timer.IsSet);
VERIFY_IS_NULL(timer.Test_GetRawTimer());
}
BOOST_AUTO_TEST_CASE(MultipleCloseIsNoOp)
{
auto holder = CreateTimerHolder(MiddleTimerDurationInMs, []
{
});
RetryTimer & timer = holder->Timer;
timer.Close();
timer.Close();
VERIFY_IS_FALSE(timer.IsSet);
}
BOOST_AUTO_TEST_CASE(CloseTimerWhileCallbackIsRunningDoesNotBlowUp)
{
ManualResetEvent ev;
Common::atomic_bool hasCallbackStarted(false);
Common::atomic_bool hasCallbackEnded(false);
auto holder = CreateTimerHolder(ShortTimerDurationInMs, [&]
{
hasCallbackStarted.store(true);
ev.WaitOne();
hasCallbackEnded.store(true);
});
RetryTimer & timer = holder->Timer;
// reset the event
ev.Reset();
// Set the timer
timer.Set();
// Wait for the callback to start and it to wait on the event
BusyWaitUntil([&] { return hasCallbackStarted.load(); });
// Close the timer while callback is running
timer.Close();
// Timer should be null
VERIFY_IS_TRUE(nullptr == timer.Test_GetRawTimer());
// Set the event - this resumes the cb
ev.Set();
// Wait for callback to end
BusyWaitUntil([&] { return hasCallbackEnded.load(); });
// Set again - this should not AV either
timer.Set();
}
BOOST_AUTO_TEST_CASE(ActivityIdPrefixTestHelper)
{
ExclusiveLock lock;
vector<wstring> activityIds;
size_t totalTries = 3;
auto holder = CreateTimerHolderEx(ShortTimerDurationInMs, [&] (wstring const & aid, ReconfigurationAgent&)
{
AcquireExclusiveLock grab(lock);
activityIds.push_back(aid);
});
RetryTimer & timer = holder->Timer;
// let callbacks happen
for(size_t i = 0; i < totalTries; i++)
{
size_t expected = i + 1;
timer.Set();
BusyWaitUntil([&] () -> bool
{
AcquireExclusiveLock grab(lock);
return expected == activityIds.size();
});
}
// close timer
timer.Close();
// assert three callbacks
VERIFY_ARE_EQUAL(totalTries, activityIds.size());
// each aid should start with the correct prefix
for(size_t i = 0; i < activityIds.size(); i++)
{
VERIFY_ARE_EQUAL(0, activityIds[i].compare(0, DefaultActivityIdPrefix.size(), DefaultActivityIdPrefix.c_str()));
}
// each aid should be unique
for(size_t i = 0; i < activityIds.size(); i++)
{
for(size_t j = 0; j < activityIds.size(); j++)
{
if (i == j)
{
continue;
}
VERIFY_ARE_NOT_EQUAL(activityIds[i], activityIds[j]);
}
}
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
| 5,876 |
1,144 | package de.metas.handlingunits;
/*
* #%L
* de.metas.handlingunits.base
* %%
* Copyright (C) 2015 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
import de.metas.handlingunits.model.I_M_HU;
import de.metas.handlingunits.model.I_M_HU_PI;
/**
* Builds the display name of a {@link I_M_HU}.
*/
public interface IHUDisplayNameBuilder
{
/**
* @return display name of underlying HU
*/
public String build();
/**
* @return
* <ul>
* <li>the packing instructions name of current HU
* <li>or if it's an aggregate HU - the name of the PI that is represented.
* </ul>
*/
String getPIName();
/**
* @return true if included HU count shall be displayed
*/
public boolean isShowIncludedHUCount();
/**
* @param showIncludedHUCount true if included HU count shall be displayed
*/
public IHUDisplayNameBuilder setShowIncludedHUCount(boolean showIncludedHUCount);
/**
* @return included HU suffix (i.e TU, VHU etc)
*/
public String getIncludedHUCountSuffix();
/**
* @param includedHUCountSuffix included HU suffix (i.e TU, VHU etc)
*/
public IHUDisplayNameBuilder setIncludedHUCountSuffix(String includedHUCountSuffix);
/**
* Gets included HUs count for current HU.
*
* NOTE this method does not care about {@link #isShowIncludedHUCount()}.
*
* @return included HUs count for current HU
*/
int getIncludedHUsCount();
/**
* @return true if {@link I_M_HU_PI#getName()} shall be displayed below value
*/
public boolean isShowHUPINameNextLine();
/**
* @param showHUPINameNextLine if {@link I_M_HU_PI#getName()} shall be displayed below value
*/
public IHUDisplayNameBuilder setShowHUPINameNextLine(boolean showHUPINameNextLine);
/**
* Sets if we shall display a "Destroyed" marker in case the HU is destroyed.
*
* NOTE: usually this "tag" is not displayed because we are not carrying/ignoring about destroyed HUs in our business logic.
* But in case this happens, it's important to be displayed because in most of the cases, it's an issue.
*/
public IHUDisplayNameBuilder setShowIfDestroyed(boolean showIfDestroyed);
}
| 916 |
755 | <gh_stars>100-1000
/**
* @project: Overload
* @author: <NAME>.
* @licence: MIT
*/
#include <utility>
#include <stdexcept>
#include <cmath>
#include "OvMaths/FVector3.h"
const OvMaths::FVector3 OvMaths::FVector3::One(1.0f, 1.0f, 1.0f);
const OvMaths::FVector3 OvMaths::FVector3::Zero(0.0f, 0.0f, 0.0f);
const OvMaths::FVector3 OvMaths::FVector3::Forward(0.0f, 0.0f, 1.0f);
const OvMaths::FVector3 OvMaths::FVector3::Right(1.0f, 0.0f, 0.0f);
const OvMaths::FVector3 OvMaths::FVector3::Up(0.0f, 1.0f, 0.0f);
OvMaths::FVector3::FVector3(float p_x, float p_y, float p_z) : x(p_x), y(p_y), z(p_z)
{
}
OvMaths::FVector3::FVector3(const FVector3& p_toCopy) : x(p_toCopy.x), y(p_toCopy.y), z(p_toCopy.z)
{
}
OvMaths::FVector3 OvMaths::FVector3::operator-() const
{
return operator*(-1);
}
OvMaths::FVector3 OvMaths::FVector3::operator=(const FVector3& p_other)
{
this->x = p_other.x;
this->y = p_other.y;
this->z = p_other.z;
return *this;
}
OvMaths::FVector3 OvMaths::FVector3::operator+(const FVector3& p_other) const
{
return Add(*this, p_other);
}
OvMaths::FVector3& OvMaths::FVector3::operator+=(const FVector3& p_other)
{
*this = Add(*this, p_other);
return *this;
}
OvMaths::FVector3 OvMaths::FVector3::operator-(const FVector3& p_other) const
{
return Substract(*this, p_other);
}
OvMaths::FVector3& OvMaths::FVector3::operator-=(const FVector3& p_other)
{
*this = Substract(*this, p_other);
return *this;
}
OvMaths::FVector3 OvMaths::FVector3::operator*(float p_scalar) const
{
return Multiply(*this, p_scalar);
}
OvMaths::FVector3& OvMaths::FVector3::operator*=(float p_scalar)
{
*this = Multiply(*this, p_scalar);
return *this;
}
OvMaths::FVector3 OvMaths::FVector3::operator/(float p_scalar) const
{
return Divide(*this, p_scalar);
}
OvMaths::FVector3& OvMaths::FVector3::operator/=(float p_scalar)
{
*this = Divide(*this, p_scalar);
return *this;
}
bool OvMaths::FVector3::operator==(const FVector3 & p_other)
{
return
this->x == p_other.x &&
this->y == p_other.y &&
this->z == p_other.z;
}
bool OvMaths::FVector3::operator!=(const FVector3 & p_other)
{
return !operator==(p_other);
}
OvMaths::FVector3 OvMaths::FVector3::Add(const FVector3& p_left, const FVector3& p_right)
{
return FVector3
(
p_left.x + p_right.x,
p_left.y + p_right.y,
p_left.z + p_right.z
);
}
OvMaths::FVector3 OvMaths::FVector3::Substract(const FVector3& p_left, const FVector3& p_right)
{
return FVector3
(
p_left.x - p_right.x,
p_left.y - p_right.y,
p_left.z - p_right.z
);
}
OvMaths::FVector3 OvMaths::FVector3::Multiply(const FVector3& p_target, float p_scalar)
{
return FVector3
(
p_target.x * p_scalar,
p_target.y * p_scalar,
p_target.z * p_scalar
);
}
OvMaths::FVector3 OvMaths::FVector3::Divide(const FVector3& p_left, float p_scalar)
{
FVector3 result(p_left);
if (p_scalar == 0)
throw std::logic_error("Division by 0");
result.x /= p_scalar;
result.y /= p_scalar;
result.z /= p_scalar;
return result;
}
float OvMaths::FVector3::Length(const FVector3& p_target)
{
return std::sqrt(p_target.x * p_target.x + p_target.y * p_target.y + p_target.z * p_target.z);
}
float OvMaths::FVector3::Dot(const FVector3& p_left, const FVector3& p_right)
{
return p_left.x * p_right.x + p_left.y * p_right.y + p_left.z * p_right.z;
}
float OvMaths::FVector3::Distance(const FVector3 & p_left, const FVector3 & p_right)
{
return std::sqrt
(
(p_left.x - p_right.x) * (p_left.x - p_right.x) +
(p_left.y - p_right.y) * (p_left.y - p_right.y) +
(p_left.z - p_right.z) * (p_left.z - p_right.z)
);
}
OvMaths::FVector3 OvMaths::FVector3::Cross(const FVector3 & p_left, const FVector3 & p_right)
{
return FVector3
(
p_left.y * p_right.z - p_left.z * p_right.y,
p_left.z * p_right.x - p_left.x * p_right.z,
p_left.x * p_right.y - p_left.y * p_right.x
);
}
OvMaths::FVector3 OvMaths::FVector3::Normalize(const FVector3 & p_target)
{
float length = Length(p_target);
if (length > 0.0f)
{
float targetLength = 1.0f / length;
return FVector3
(
p_target.x * targetLength,
p_target.y * targetLength,
p_target.z * targetLength
);
}
else
{
return FVector3::Zero;
}
}
OvMaths::FVector3 OvMaths::FVector3::Lerp(const FVector3& p_start, const FVector3& p_end, float p_alpha)
{
return (p_start + (p_end - p_start) * p_alpha);
}
float OvMaths::FVector3::AngleBetween(const FVector3& p_from, const FVector3& p_to)
{
float lengthProduct = Length(p_from) * Length(p_to);
if (lengthProduct > 0.0f)
{
float fractionResult = Dot(p_from, p_to) / lengthProduct;
if (fractionResult >= -1.0f && fractionResult <= 1.0f)
return acosf(fractionResult);
}
return 0.0f;
}
| 2,169 |
2,296 | package com.github.appintro.example.ui.java;
import android.os.Bundle;
import androidx.annotation.Nullable;
import androidx.fragment.app.Fragment;
import com.github.appintro.AppIntro;
import com.github.appintro.AppIntroFragment;
import com.github.appintro.AppIntroPageTransformerType;
import com.github.appintro.example.R;
public class JavaIntro extends AppIntro {
@Override
public void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
addSlide(AppIntroFragment.newInstance("Welcome!",
"This is a demo example in java of AppIntro library, with a custom background on each slide!",
R.drawable.ic_slide1));
addSlide(AppIntroFragment.newInstance(
"Clean App Intros",
"This library offers developers the ability to add clean app intros at the start of their apps.",
R.drawable.ic_slide2
));
addSlide(AppIntroFragment.newInstance(
"Simple, yet Customizable",
"The library offers a lot of customization, while keeping it simple for those that like simple.",
R.drawable.ic_slide3
));
addSlide(AppIntroFragment.newInstance(
"Explore",
"Feel free to explore the rest of the library demo!",
R.drawable.ic_slide4
));
// Fade Transition
setTransformer(AppIntroPageTransformerType.Fade.INSTANCE);
// Show/hide status bar
showStatusBar(true);
//Speed up or down scrolling
setScrollDurationFactor(2);
//Enable the color "fade" animation between two slides (make sure the slide implements SlideBackgroundColorHolder)
setColorTransitionsEnabled(true);
//Prevent the back button from exiting the slides
setSystemBackButtonLocked(true);
//Activate wizard mode (Some aesthetic changes)
setWizardMode(true);
//Show/hide skip button
setSkipButtonEnabled(true);
//Enable immersive mode (no status and nav bar)
setImmersiveMode();
//Enable/disable page indicators
setIndicatorEnabled(true);
//Dhow/hide ALL buttons
setButtonsEnabled(true);
}
@Override
protected void onSkipPressed(Fragment currentFragment) {
super.onSkipPressed(currentFragment);
finish();
}
@Override
protected void onDonePressed(Fragment currentFragment) {
super.onDonePressed(currentFragment);
finish();
}
}
| 1,122 |
6,451 | <gh_stars>1000+
class TemporaryNoOpSummarizer:
def render(self, input):
return input
class DropAllVowelsSummarizer:
def render(self, input):
return input
| 71 |
6,034 | <reponame>ssSlowDown/onemall<filename>moved/product/product-service-impl/src/main/java/cn/iocoder/mall/product/convert/ProductSpuConvert.java
package cn.iocoder.mall.product.convert;
import cn.iocoder.common.framework.util.StringUtil;
import cn.iocoder.mall.product.api.bo.*;
import cn.iocoder.mall.product.api.dto.ProductSkuAddOrUpdateDTO;
import cn.iocoder.mall.product.api.dto.ProductSpuAddDTO;
import cn.iocoder.mall.product.api.dto.ProductSpuUpdateDTO;
import cn.iocoder.mall.product.dataobject.ProductCategoryDO;
import cn.iocoder.mall.product.dataobject.ProductSkuDO;
import cn.iocoder.mall.product.dataobject.ProductSpuDO;
import org.mapstruct.Mapper;
import org.mapstruct.Mapping;
import org.mapstruct.Mappings;
import org.mapstruct.Named;
import org.mapstruct.factory.Mappers;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Mapper
public interface ProductSpuConvert {
ProductSpuConvert INSTANCE = Mappers.getMapper(ProductSpuConvert.class);
@Mappings({
@Mapping(source = "picUrls", target = "picUrls", qualifiedByName = "translatePicUrlsFromString")
})
ProductSpuBO convert(ProductSpuDO spu);
@Named("translatePicUrlsFromString")
default List<String> translatePicUrlsFromString(String picUrls) {
return StringUtil.split(picUrls, ",");
}
@Mappings({})
List<ProductSpuBO> convert(List<ProductSpuDO> spus);
@Mappings({
@Mapping(source = "picUrls", target = "picUrls", ignore = true)
})
ProductSpuDO convert(ProductSpuAddDTO productSpuAddDTO);
@Mappings({
@Mapping(source = "attrs", target = "attrs", ignore = true)
})
ProductSkuDO convert(ProductSkuAddOrUpdateDTO productSkuAddDTO);
@Mappings({
@Mapping(source = "picUrls", target = "picUrls", ignore = true)
})
ProductSpuDO convert(ProductSpuUpdateDTO productSpuUpdateDTO);
@Mappings({})
ProductSpuDetailBO convert(ProductSpuBO spu);
@Mappings({
@Mapping(source = "picUrls", target = "picUrls", ignore = true)
})
ProductSpuDetailBO convert2(ProductSpuDO spu);
@Mappings({
@Mapping(source = "picUrls", target = "picUrls", ignore = true)
})
ProductSkuDetailBO.Spu convert3(ProductSpuDO spu);
@Mappings({
@Mapping(source = "attrs", target = "attrs", ignore = true)
})
ProductSpuDetailBO.Sku convert2(ProductSkuDO sku);
@Mappings({
@Mapping(source = "attrs", target = "attrs", ignore = true)
})
ProductSkuDetailBO convert3(ProductSkuDO sku);
@Mappings({
// @Mapping(source = "attrs", target = "attrs", ignore = true) // TODO 芋艿 后续补充
})
ProductSkuBO convert4(ProductSkuDO sku);
@Mappings({}) // TODO 芋艿,后续细看下 mapstruct 的 API ,优化这块
default List<ProductSkuDetailBO> convert3(List<ProductSkuDO> skus, List<ProductSpuDO> spus, List<ProductAttrAndValuePairBO> productAttrDetailBOs) {
// 创建 ProductAttrDetailBO 的映射。其中,KEY 为 ProductAttrDetailBO.attrValueId ,即规格值的编号
Map<Integer, ProductAttrAndValuePairBO> productAttrDetailBOMap = productAttrDetailBOs.stream().collect(
Collectors.toMap(ProductAttrAndValuePairBO::getAttrValueId, productAttrDetailBO -> productAttrDetailBO));
// 创建 ProductSpuDO 的映射
Map<Integer, ProductSkuDetailBO.Spu> spuMap = spus.stream().collect(
Collectors.toMap(ProductSpuDO::getId, spu -> ProductSpuConvert.this.convert3(spu).setPicUrls(StringUtil.split(spu.getPicUrls(), ","))));
// 拼装结果
List<ProductSkuDetailBO> spuDetailList = new ArrayList<>(skus.size());
for (ProductSkuDO sku : skus) {
// 创建 ProductSkuDetailBO 对象
ProductSkuDetailBO skuDetail = ProductSpuConvert.this.convert3(sku)
.setAttrs(new ArrayList<>())
.setSpu(spuMap.get(sku.getSpuId()));
spuDetailList.add(skuDetail);
// 设置 ProductSpuDetailBO 的 attrs 规格属性
List<String> attrs = StringUtil.split(sku.getAttrs(), ",");
attrs.forEach(attr -> skuDetail.getAttrs().add(productAttrDetailBOMap.get(Integer.valueOf(attr))));
}
// 返回
return spuDetailList;
}
}
| 1,958 |
669 | <gh_stars>100-1000
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
BLOCK_SPREAD = 1
# BLOCK_TO_PICKUPS = 2
# BREWING_COMPLETED = 3
# BREWING_COMPLETING = 4
CHAT = 5
CHUNK_AVAILABLE = 6
# CHUNK_GENERATED = 7
# CHUNK_GENERATING = 8
# CHUNK_UNLOADED = 9
# CHUNK_UNLOADING = 10
COLLECTING_PICKUP = 11
# CRAFTING_NO_RECIPE = 12
# DISCONNECT = 13
# ENTITY_ADD_EFFECT = 14
# ENTITY_CHANGED_WORLD = 15
# ENTITY_CHANGING_WORLD = 16
# ENTITY_TELEPORT = 17
# EXECUTE_COMMAND = 18
EXPLODED = 19
# EXPLODING = 20
# HANDSHAKE = 21
# HOPPER_PULLING_ITEM = 22
# HOPPER_PUSHING_ITEM = 23
KILLED = 24
# KILLING = 25
# LOGIN = 26
# LOGIN_FORGE = 27
# PLAYER_ANIMATION = 28
# PLAYER_BREAKING_BLOCK = 29
PLAYER_BROKEN_BLOCK = 30
PLAYER_DESTROYED = 31
PLAYER_EATING = 32
# PLAYER_FISHED = 33
# PLAYER_FISHING = 34
PLAYER_FOOD_LEVEL_CHANGE = 35
# PLAYER_JOINED = 36
# PLAYER_LEFT_CLICK = 37
PLAYER_MOVING = 38
# PLAYER_OPENING_WINDOW = 39
PLAYER_PLACED_BLOCK = 40
# PLAYER_PLACING_BLOCK = 41
# PLAYER_RIGHT_CLICK = 42
# PLAYER_RIGHT_CLICKING_ENTITY = 43
PLAYER_SHOOTING = 44
PLAYER_SPAWNED = 45
# PLAYER_TOSSING_ITEM = 46
PLAYER_USED_BLOCK = 47
PLAYER_USED_ITEM = 48
# PLAYER_USING_BLOCK = 49
# PLAYER_USING_ITEM = 50
# PLUGINS_LOADED = 51
# PLUGIN_MESSAGE = 52
POST_CRAFTING = 53
PRE_CRAFTING = 54
PROJECTILE_HIT_BLOCK = 55
PROJECTILE_HIT_ENTITY = 56
# SERVER_PING = 57
SPAWNED_ENTITY = 58
SPAWNED_MONSTER = 59
# SPAWNING_ENTITY = 60
# SPAWNING_MONSTER = 61
TAKE_DAMAGE = 62
# TICK = 63
UPDATED_SIGN = 64
# UPDATING_SIGN = 65
WEATHER_CHANGED = 66
# WEATHER_CHANGING = 67
WORLD_STARTED = 68
WORLD_TICK = 69
MONSTER_MOVED = 70
PLAYER_LOOK = 71
| 753 |
2,151 | /*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H
#define COMPAT_ATOMICS_WIN32_STDATOMIC_H
#define WIN32_LEAN_AND_MEAN
#include <stddef.h>
#include <stdint.h>
#include <windows.h>
#define ATOMIC_FLAG_INIT 0
#define ATOMIC_VAR_INIT(value) (value)
#define atomic_init(obj, value) \
do { \
*(obj) = (value); \
} while(0)
#define kill_dependency(y) ((void)0)
#define atomic_thread_fence(order) \
MemoryBarrier();
#define atomic_signal_fence(order) \
((void)0)
#define atomic_is_lock_free(obj) 0
typedef intptr_t atomic_flag;
typedef intptr_t atomic_bool;
typedef intptr_t atomic_char;
typedef intptr_t atomic_schar;
typedef intptr_t atomic_uchar;
typedef intptr_t atomic_short;
typedef intptr_t atomic_ushort;
typedef intptr_t atomic_int;
typedef intptr_t atomic_uint;
typedef intptr_t atomic_long;
typedef intptr_t atomic_ulong;
typedef intptr_t atomic_llong;
typedef intptr_t atomic_ullong;
typedef intptr_t atomic_wchar_t;
typedef intptr_t atomic_int_least8_t;
typedef intptr_t atomic_uint_least8_t;
typedef intptr_t atomic_int_least16_t;
typedef intptr_t atomic_uint_least16_t;
typedef intptr_t atomic_int_least32_t;
typedef intptr_t atomic_uint_least32_t;
typedef intptr_t atomic_int_least64_t;
typedef intptr_t atomic_uint_least64_t;
typedef intptr_t atomic_int_fast8_t;
typedef intptr_t atomic_uint_fast8_t;
typedef intptr_t atomic_int_fast16_t;
typedef intptr_t atomic_uint_fast16_t;
typedef intptr_t atomic_int_fast32_t;
typedef intptr_t atomic_uint_fast32_t;
typedef intptr_t atomic_int_fast64_t;
typedef intptr_t atomic_uint_fast64_t;
typedef intptr_t atomic_intptr_t;
typedef intptr_t atomic_uintptr_t;
typedef intptr_t atomic_size_t;
typedef intptr_t atomic_ptrdiff_t;
typedef intptr_t atomic_intmax_t;
typedef intptr_t atomic_uintmax_t;
#define atomic_store(object, desired) \
do { \
*(object) = (desired); \
MemoryBarrier(); \
} while (0)
#define atomic_store_explicit(object, desired, order) \
atomic_store(object, desired)
#define atomic_load(object) \
(MemoryBarrier(), *(object))
#define atomic_load_explicit(object, order) \
atomic_load(object)
#define atomic_exchange(object, desired) \
InterlockedExchangePointer(object, desired);
#define atomic_exchange_explicit(object, desired, order) \
atomic_exchange(object, desired)
// Chromium: commented out since it doesn't compile on Windows, and also isn't
// used anywhere.
#if 0
static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected,
intptr_t desired)
{
intptr_t old = *expected;
*expected = (intptr_t)InterlockedCompareExchangePointer(
(PVOID *)object, (PVOID)desired, (PVOID)old);
return *expected == old;
}
#endif
#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \
atomic_compare_exchange_strong(object, expected, desired)
#define atomic_compare_exchange_weak(object, expected, desired) \
atomic_compare_exchange_strong(object, expected, desired)
#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \
atomic_compare_exchange_weak(object, expected, desired)
#ifdef _WIN64
#define atomic_fetch_add(object, operand) \
InterlockedExchangeAdd64(object, operand)
#define atomic_fetch_sub(object, operand) \
InterlockedExchangeAdd64(object, -(operand))
#define atomic_fetch_or(object, operand) \
InterlockedOr64(object, operand)
#define atomic_fetch_xor(object, operand) \
InterlockedXor64(object, operand)
#define atomic_fetch_and(object, operand) \
InterlockedAnd64(object, operand)
#else
#define atomic_fetch_add(object, operand) \
InterlockedExchangeAdd(object, operand)
#define atomic_fetch_sub(object, operand) \
InterlockedExchangeAdd(object, -(operand))
#define atomic_fetch_or(object, operand) \
InterlockedOr(object, operand)
#define atomic_fetch_xor(object, operand) \
InterlockedXor(object, operand)
#define atomic_fetch_and(object, operand) \
InterlockedAnd(object, operand)
#endif /* _WIN64 */
#define atomic_fetch_add_explicit(object, operand, order) \
atomic_fetch_add(object, operand)
#define atomic_fetch_sub_explicit(object, operand, order) \
atomic_fetch_sub(object, operand)
#define atomic_fetch_or_explicit(object, operand, order) \
atomic_fetch_or(object, operand)
#define atomic_fetch_xor_explicit(object, operand, order) \
atomic_fetch_xor(object, operand)
#define atomic_fetch_and_explicit(object, operand, order) \
atomic_fetch_and(object, operand)
#define atomic_flag_test_and_set(object) \
atomic_exchange(object, 1)
#define atomic_flag_test_and_set_explicit(object, order) \
atomic_flag_test_and_set(object)
#define atomic_flag_clear(object) \
atomic_store(object, 0)
#define atomic_flag_clear_explicit(object, order) \
atomic_flag_clear(object)
#endif /* COMPAT_ATOMICS_WIN32_STDATOMIC_H */
| 2,271 |
575 | <filename>ios/chrome/browser/ui/authentication/signin/user_signin/user_policy_signout_coordinator.h
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_UI_AUTHENTICATION_SIGNIN_USER_SIGNIN_USER_POLICY_SIGNOUT_COORDINATOR_H_
#define IOS_CHROME_BROWSER_UI_AUTHENTICATION_SIGNIN_USER_SIGNIN_USER_POLICY_SIGNOUT_COORDINATOR_H_
#import "ios/chrome/browser/ui/coordinators/chrome_coordinator.h"
@protocol ApplicationCommands;
@protocol PolicySignoutPromptCommands;
// Coordinates the user sign-out prompt when the user is signed out due to
// the BrowserSignin policy disabling browser sign-in.
@interface UserPolicySignoutCoordinator : ChromeCoordinator
// Handler for commands related to the sign-out prompt for this coordinator.
@property(nonatomic, weak) id<PolicySignoutPromptCommands> signoutPromptHandler;
// Handler for application commands for this coordinator.
@property(nonatomic, weak) id<ApplicationCommands> applicationHandler;
@end
#endif // IOS_CHROME_BROWSER_UI_AUTHENTICATION_SIGNIN_USER_SIGNIN_USER_POLICY_SIGNOUT_COORDINATOR_H_
| 382 |
628 | <filename>External/FEXCore/include/FEXCore/Utils/Allocator.h
#pragma once
#include <FEXCore/Utils/CompilerDefs.h>
#include <cstdint>
#include <functional>
namespace FEXCore::Allocator {
using MMAP_Hook = void*(*)(void*, size_t, int, int, int, off_t);
using MUNMAP_Hook = int(*)(void*, size_t);
using MALLOC_Hook = void*(*)(size_t);
using REALLOC_Hook = void*(*)(void*, size_t);
using FREE_Hook = void(*)(void*);
FEX_DEFAULT_VISIBILITY extern MMAP_Hook mmap;
FEX_DEFAULT_VISIBILITY extern MUNMAP_Hook munmap;
FEX_DEFAULT_VISIBILITY extern MALLOC_Hook malloc;
FEX_DEFAULT_VISIBILITY extern REALLOC_Hook realloc;
FEX_DEFAULT_VISIBILITY extern FREE_Hook free;
FEX_DEFAULT_VISIBILITY void SetupHooks();
FEX_DEFAULT_VISIBILITY void ClearHooks();
}
| 316 |
25,360 | package org.jeecg.boot.starter.lock.test;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.stream.IntStream;
@RunWith(SpringRunner.class)
@SpringBootTest(classes = LockTestApplication.class)
public class LockTest {
@Autowired
LockService lockService;
/**
* 测试分布式锁(模拟秒杀)
*/
@Test
public void test1() throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(6);
IntStream.range(0, 30).forEach(i -> executorService.submit(() -> {
try {
lockService.seckill("20120508784");
} catch (Exception e) {
e.printStackTrace();
}
}));
executorService.awaitTermination(30, TimeUnit.SECONDS);
}
/**
* 测试分布式锁(模拟秒杀)
*/
@Test
public void test2() throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(6);
IntStream.range(0, 30).forEach(i -> executorService.submit(() -> {
try {
lockService.seckill2("20120508784");
} catch (Exception e) {
e.printStackTrace();
}
}));
executorService.awaitTermination(30, TimeUnit.SECONDS);
}
/**
* 测试分布式锁(模拟重复提交)
*/
@Test
public void test3() throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(6);
IntStream.range(0, 20).forEach(i -> executorService.submit(() -> {
try {
lockService.reSubmit("test");
} catch (Exception e) {
e.printStackTrace();
}
}));
executorService.awaitTermination(30, TimeUnit.SECONDS);
}
}
| 964 |
2,571 | # ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from jittor import nn
class TestOptimizer(unittest.TestCase):
def test_param_groups(self):
pa = jt.ones((1,))
pb = jt.ones((1,))
data = jt.ones((1,))
opt = nn.SGD([
{"params":[pa], "lr":0.1},
{"params":[pb]},
], 1)
opt.step(pa*data+pb*data)
assert pa.data == 0.9 and pb.data == 0, (pa, pb)
def test_clip_grad_norm(self):
a = jt.ones(2)
opt = jt.optim.SGD([a], 0.1)
loss = a*a
opt.zero_grad()
opt.backward(loss)
opt.clip_grad_norm(0.01, 2)
assert np.allclose(opt.param_groups[0]['grads'][0].norm(), 0.01)
opt.step()
def test_state_dict(self):
a = jt.ones(2)
opt = jt.optim.SGD([a], 0.1)
s = opt.state_dict()
# print(s)
opt.load_state_dict(s)
def test_opt_grad(self):
a = jt.ones(2)
opt = jt.optim.SGD([a], 0.1)
opt.backward(a**2)
g = a.opt_grad(opt)
np.testing.assert_allclose(g.data, 2)
if __name__ == "__main__":
unittest.main() | 726 |
2,111 | #include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdio.h>
#include <assert.h>
#include "htslib/tbx.h"
#include "htslib/bgzf.h"
#include "htslib/khash.h"
KHASH_DECLARE(s2i, kh_cstr_t, int64_t)
tbx_conf_t tbx_conf_gff = { 0, 1, 4, 5, '#', 0 };
tbx_conf_t tbx_conf_bed = { TBX_UCSC, 1, 2, 3, '#', 0 };
tbx_conf_t tbx_conf_psltbl = { TBX_UCSC, 15, 17, 18, '#', 0 };
tbx_conf_t tbx_conf_sam = { TBX_SAM, 3, 4, 0, '@', 0 };
tbx_conf_t tbx_conf_vcf = { TBX_VCF, 1, 2, 0, '#', 0 };
typedef struct {
int64_t beg, end;
char *ss, *se;
int tid;
} tbx_intv_t;
static inline int get_tid(tbx_t *tbx, const char *ss, int is_add)
{
khint_t k;
khash_t(s2i) *d;
if (tbx->dict == 0) tbx->dict = kh_init(s2i);
d = (khash_t(s2i)*)tbx->dict;
if (is_add) {
int absent;
k = kh_put(s2i, d, ss, &absent);
if (absent) {
kh_key(d, k) = strdup(ss);
kh_val(d, k) = kh_size(d) - 1;
}
} else k = kh_get(s2i, d, ss);
return k == kh_end(d)? -1 : kh_val(d, k);
}
int tbx_name2id(tbx_t *tbx, const char *ss)
{
return get_tid(tbx, ss, 0);
}
int tbx_parse1(const tbx_conf_t *conf, int len, char *line, tbx_intv_t *intv)
{
int i, b = 0, id = 1, ncols = 0;
char *s;
intv->ss = intv->se = 0; intv->beg = intv->end = -1;
for (i = 0; i <= len; ++i) {
if (line[i] == '\t' || line[i] == 0) {
++ncols;
if (id == conf->sc) {
intv->ss = line + b; intv->se = line + i;
} else if (id == conf->bc) {
// here ->beg is 0-based.
intv->beg = intv->end = strtol(line + b, &s, 0);
if ( s==line+b ) return -1; // expected int
if (!(conf->preset&TBX_UCSC)) --intv->beg;
else ++intv->end;
if (intv->beg < 0) intv->beg = 0;
if (intv->end < 1) intv->end = 1;
} else {
if ((conf->preset&0xffff) == TBX_GENERIC) {
if (id == conf->ec)
{
intv->end = strtol(line + b, &s, 0);
if ( s==line+b ) return -1; // expected int
}
} else if ((conf->preset&0xffff) == TBX_SAM) {
if (id == 6) { // CIGAR
int l = 0, op;
char *t;
for (s = line + b; s < line + i;) {
long x = strtol(s, &t, 10);
op = toupper(*t);
if (op == 'M' || op == 'D' || op == 'N') l += x;
s = t + 1;
}
if (l == 0) l = 1;
intv->end = intv->beg + l;
}
} else if ((conf->preset&0xffff) == TBX_VCF) {
if (id == 4) {
if (b < i) intv->end = intv->beg + (i - b);
} else if (id == 8) { // look for "END="
int c = line[i];
line[i] = 0;
s = strstr(line + b, "END=");
if (s == line + b) s += 4;
else if (s) {
s = strstr(line + b, ";END=");
if (s) s += 5;
}
if (s) intv->end = strtol(s, &s, 0);
line[i] = c;
}
}
}
b = i + 1;
++id;
}
}
if (intv->ss == 0 || intv->se == 0 || intv->beg < 0 || intv->end < 0) return -1;
return 0;
}
static inline int get_intv(tbx_t *tbx, kstring_t *str, tbx_intv_t *intv, int is_add)
{
if (tbx_parse1(&tbx->conf, str->l, str->s, intv) == 0) {
int c = *intv->se;
*intv->se = '\0'; intv->tid = get_tid(tbx, intv->ss, is_add); *intv->se = c;
return (intv->tid >= 0 && intv->beg >= 0 && intv->end >= 0)? 0 : -1;
} else {
char *type = NULL;
switch (tbx->conf.preset&0xffff)
{
case TBX_SAM: type = "TBX_SAM"; break;
case TBX_VCF: type = "TBX_VCF"; break;
case TBX_UCSC: type = "TBX_UCSC"; break;
default: type = "TBX_GENERIC"; break;
}
fprintf(stderr, "[E::%s] failed to parse %s, was wrong -p [type] used?\nThe offending line was: \"%s\"\n", __func__, type, str->s);
return -1;
}
}
int tbx_readrec(BGZF *fp, void *tbxv, void *sv, int *tid, int *beg, int *end)
{
tbx_t *tbx = (tbx_t *) tbxv;
kstring_t *s = (kstring_t *) sv;
int ret;
if ((ret = bgzf_getline(fp, '\n', s)) >= 0) {
tbx_intv_t intv;
get_intv(tbx, s, &intv, 0);
*tid = intv.tid; *beg = intv.beg; *end = intv.end;
}
return ret;
}
void tbx_set_meta(tbx_t *tbx)
{
int i, l = 0, l_nm;
uint32_t x[7];
char **name;
uint8_t *meta;
khint_t k;
khash_t(s2i) *d = (khash_t(s2i)*)tbx->dict;
memcpy(x, &tbx->conf, 24);
name = (char**)malloc(sizeof(char*) * kh_size(d));
for (k = kh_begin(d), l = 0; k != kh_end(d); ++k) {
if (!kh_exist(d, k)) continue;
name[kh_val(d, k)] = (char*)kh_key(d, k);
l += strlen(kh_key(d, k)) + 1; // +1 to include '\0'
}
l_nm = x[6] = l;
meta = (uint8_t*)malloc(l_nm + 28);
if (ed_is_big())
for (i = 0; i < 7; ++i)
x[i] = ed_swap_4(x[i]);
memcpy(meta, x, 28);
for (l = 28, i = 0; i < (int)kh_size(d); ++i) {
int x = strlen(name[i]) + 1;
memcpy(meta + l, name[i], x);
l += x;
}
free(name);
hts_idx_set_meta(tbx->idx, l, meta, 0);
}
tbx_t *tbx_index(BGZF *fp, int min_shift, const tbx_conf_t *conf)
{
tbx_t *tbx;
kstring_t str;
int ret, first = 0, n_lvls, fmt;
int64_t lineno = 0;
uint64_t last_off = 0;
tbx_intv_t intv;
str.s = 0; str.l = str.m = 0;
tbx = (tbx_t*)calloc(1, sizeof(tbx_t));
tbx->conf = *conf;
if (min_shift > 0) n_lvls = (TBX_MAX_SHIFT - min_shift + 2) / 3, fmt = HTS_FMT_CSI;
else min_shift = 14, n_lvls = 5, fmt = HTS_FMT_TBI;
while ((ret = bgzf_getline(fp, '\n', &str)) >= 0) {
++lineno;
if (lineno <= tbx->conf.line_skip || str.s[0] == tbx->conf.meta_char) {
last_off = bgzf_tell(fp);
continue;
}
if (first == 0) {
tbx->idx = hts_idx_init(0, fmt, last_off, min_shift, n_lvls);
first = 1;
}
get_intv(tbx, &str, &intv, 1);
ret = hts_idx_push(tbx->idx, intv.tid, intv.beg, intv.end, bgzf_tell(fp), 1);
if (ret < 0)
{
free(str.s);
tbx_destroy(tbx);
return NULL;
}
}
if ( !tbx->idx ) tbx->idx = hts_idx_init(0, fmt, last_off, min_shift, n_lvls); // empty file
if ( !tbx->dict ) tbx->dict = kh_init(s2i);
hts_idx_finish(tbx->idx, bgzf_tell(fp));
tbx_set_meta(tbx);
free(str.s);
return tbx;
}
void tbx_destroy(tbx_t *tbx)
{
khash_t(s2i) *d = (khash_t(s2i)*)tbx->dict;
if (d != NULL)
{
khint_t k;
for (k = kh_begin(d); k != kh_end(d); ++k)
if (kh_exist(d, k)) free((char*)kh_key(d, k));
}
hts_idx_destroy(tbx->idx);
kh_destroy(s2i, d);
free(tbx);
}
int tbx_index_build(const char *fn, int min_shift, const tbx_conf_t *conf)
{
tbx_t *tbx;
BGZF *fp;
if ( bgzf_is_bgzf(fn)!=1 ) { fprintf(stderr,"Not a BGZF file: %s\n", fn); return -1; }
if ((fp = bgzf_open(fn, "r")) == 0) return -1;
if ( !fp->is_compressed ) { bgzf_close(fp); return -1; }
tbx = tbx_index(fp, min_shift, conf);
bgzf_close(fp);
if ( !tbx ) return -1;
hts_idx_save(tbx->idx, fn, min_shift > 0? HTS_FMT_CSI : HTS_FMT_TBI);
tbx_destroy(tbx);
return 0;
}
tbx_t *tbx_index_load(const char *fn)
{
tbx_t *tbx;
uint8_t *meta;
char *nm, *p;
uint32_t x[7];
int l_meta, l_nm;
tbx = (tbx_t*)calloc(1, sizeof(tbx_t));
tbx->idx = hts_idx_load(fn, HTS_FMT_TBI);
if ( !tbx->idx )
{
free(tbx);
return NULL;
}
meta = hts_idx_get_meta(tbx->idx, &l_meta);
memcpy(x, meta, 28);
memcpy(&tbx->conf, x, 24);
p = nm = (char*)meta + 28;
l_nm = x[6];
for (; p - nm < l_nm; p += strlen(p) + 1) get_tid(tbx, p, 1);
return tbx;
}
const char **tbx_seqnames(tbx_t *tbx, int *n)
{
khash_t(s2i) *d = (khash_t(s2i)*)tbx->dict;
if (d == NULL)
{
*n = 0;
return NULL;
}
int tid, m = kh_size(d);
const char **names = (const char**) calloc(m,sizeof(const char*));
khint_t k;
for (k=kh_begin(d); k<kh_end(d); k++)
{
if ( !kh_exist(d,k) ) continue;
tid = kh_val(d,k);
assert( tid<m );
names[tid] = kh_key(d,k);
}
// sanity check: there should be no gaps
for (tid=0; tid<m; tid++)
assert(names[tid]);
*n = m;
return names;
}
| 4,408 |
7,746 | /*++
Copyright (c) 2006 Microsoft Corporation
Module Name:
smt_relevancy.cpp
Abstract:
<abstract>
Author:
<NAME> (leonardo) 2008-06-04.
Revision History:
--*/
#include "smt/smt_context.h"
#include "smt/smt_relevancy.h"
#include "ast/ast_pp.h"
#include "ast/ast_ll_pp.h"
#include "ast/ast_smt2_pp.h"
namespace smt {
void relevancy_eh::mark_as_relevant(relevancy_propagator & rp, expr * n) {
rp.mark_as_relevant(n);
}
void relevancy_eh::mark_args_as_relevant(relevancy_propagator & rp, app * n) {
unsigned j = n->get_num_args();
while (j > 0) {
--j;
rp.mark_as_relevant(n->get_arg(j));
}
}
void simple_relevancy_eh::operator()(relevancy_propagator & rp) {
rp.mark_as_relevant(m_target);
}
void pair_relevancy_eh::operator()(relevancy_propagator & rp) {
if (!rp.is_relevant(m_source1))
return;
if (!rp.is_relevant(m_source2))
return;
rp.mark_as_relevant(m_target);
}
class and_relevancy_eh : public relevancy_eh {
app * m_parent;
public:
and_relevancy_eh(app * p):m_parent(p) {}
~and_relevancy_eh() override {}
void operator()(relevancy_propagator & rp) override;
};
class or_relevancy_eh : public relevancy_eh {
app * m_parent;
public:
or_relevancy_eh(app * p):m_parent(p) {}
~or_relevancy_eh() override {}
void operator()(relevancy_propagator & rp) override;
};
class ite_relevancy_eh : public relevancy_eh {
app * m_parent;
public:
ite_relevancy_eh(app * p):m_parent(p) {}
~ite_relevancy_eh() override {}
void operator()(relevancy_propagator & rp) override;
};
class ite_term_relevancy_eh : public relevancy_eh {
app * m_parent;
app * m_then_eq;
app * m_else_eq;
public:
ite_term_relevancy_eh(app * p, app * then_eq, app * else_eq):m_parent(p), m_then_eq(then_eq), m_else_eq(else_eq) {}
~ite_term_relevancy_eh() override {}
void operator()(relevancy_propagator & rp) override;
};
relevancy_propagator::relevancy_propagator(context & ctx):
m_context(ctx) {
}
bool relevancy_propagator::enabled() const {
return m_context.relevancy();
}
region & relevancy_propagator::get_region() const {
return m_context.get_region();
}
ast_manager & relevancy_propagator::get_manager() const {
return m_context.get_manager();
}
void relevancy_propagator::add_dependency(expr * src, expr * target) {
if (!enabled())
return;
if (is_relevant(src))
mark_as_relevant(target);
else
add_handler(src, mk_relevancy_eh(simple_relevancy_eh(target)));
}
relevancy_eh * relevancy_propagator::mk_or_relevancy_eh(app * n) {
SASSERT(get_manager().is_or(n));
return mk_relevancy_eh(or_relevancy_eh(n));
}
relevancy_eh * relevancy_propagator::mk_and_relevancy_eh(app * n) {
SASSERT(get_manager().is_and(n));
return mk_relevancy_eh(and_relevancy_eh(n));
}
relevancy_eh * relevancy_propagator::mk_ite_relevancy_eh(app * n) {
SASSERT(get_manager().is_ite(n));
return mk_relevancy_eh(ite_relevancy_eh(n));
}
relevancy_eh * relevancy_propagator::mk_term_ite_relevancy_eh(app * c, app * t, app * e) {
return mk_relevancy_eh(ite_term_relevancy_eh(c, t, e));
}
struct relevancy_propagator_imp : public relevancy_propagator {
unsigned m_qhead;
expr_ref_vector m_relevant_exprs;
uint_set m_is_relevant;
typedef list<relevancy_eh *> relevancy_ehs;
obj_map<expr, relevancy_ehs *> m_relevant_ehs;
obj_map<expr, relevancy_ehs *> m_watches[2];
struct eh_trail {
enum kind { POS_WATCH, NEG_WATCH, HANDLER };
kind m_kind;
expr * m_node;
eh_trail(expr * n):m_kind(HANDLER), m_node(n) {}
eh_trail(expr * n, bool val):m_kind(val ? POS_WATCH : NEG_WATCH), m_node(n) {}
kind get_kind() const { return m_kind; }
expr * get_node() const { return m_node; }
};
svector<eh_trail> m_trail;
struct scope {
unsigned m_relevant_exprs_lim;
unsigned m_trail_lim;
};
svector<scope> m_scopes;
bool m_propagating;
relevancy_propagator_imp(context & ctx):
relevancy_propagator(ctx), m_qhead(0), m_relevant_exprs(ctx.get_manager()),
m_propagating(false) {}
~relevancy_propagator_imp() override {
undo_trail(0);
}
relevancy_ehs * get_handlers(expr * n) {
relevancy_ehs * r = nullptr;
m_relevant_ehs.find(n, r);
SASSERT(m_relevant_ehs.contains(n) || r == 0);
return r;
}
void set_handlers(expr * n, relevancy_ehs * ehs) {
if (ehs == nullptr)
m_relevant_ehs.erase(n);
else
m_relevant_ehs.insert(n, ehs);
}
relevancy_ehs * get_watches(expr * n, bool val) {
relevancy_ehs * r = nullptr;
m_watches[val ? 1 : 0].find(n, r);
SASSERT(m_watches[val ? 1 : 0].contains(n) || r == 0);
return r;
}
void set_watches(expr * n, bool val, relevancy_ehs * ehs) {
if (ehs == nullptr)
m_watches[val ? 1 : 0].erase(n);
else
m_watches[val ? 1 : 0].insert(n, ehs);
}
void push_trail(eh_trail const & t) {
get_manager().inc_ref(t.get_node());
m_trail.push_back(t);
}
void add_handler(expr * source, relevancy_eh * eh) override {
if (!enabled())
return;
if (is_relevant_core(source)) {
eh->operator()(*this, source);
}
else {
SASSERT(eh);
push_trail(eh_trail(source));
set_handlers(source, new (get_region()) relevancy_ehs(eh, get_handlers(source)));
}
}
void add_watch(expr * n, bool val, relevancy_eh * eh) override {
if (!enabled())
return;
lbool lval = m_context.find_assignment(n);
if (!val)
lval = ~lval;
switch (lval) {
case l_false:
return;
case l_undef:
SASSERT(eh);
set_watches(n, val, new (get_region()) relevancy_ehs(eh, get_watches(n, val)));
push_trail(eh_trail(n, val));
break;
case l_true:
eh->operator()(*this, n, val);
break;
}
}
void add_watch(expr * n, bool val, expr * target) override {
if (!enabled())
return;
lbool lval = m_context.find_assignment(n);
if (!val)
lval = ~lval;
switch (lval) {
case l_false:
return;
case l_undef:
add_watch(n, val, mk_relevancy_eh(simple_relevancy_eh(target)));
break;
case l_true:
mark_as_relevant(target); propagate();
break;
}
}
bool is_relevant_core(expr * n) const { return m_is_relevant.contains(n->get_id()); }
bool is_relevant(expr * n) const override {
return !enabled() || is_relevant_core(n);
}
void push() override {
m_scopes.push_back(scope());
scope & s = m_scopes.back();
s.m_relevant_exprs_lim = m_relevant_exprs.size();
s.m_trail_lim = m_trail.size();
}
void pop(unsigned num_scopes) override {
SASSERT(m_context.get_scope_level() == m_scopes.size());
unsigned lvl = m_scopes.size();
SASSERT(num_scopes <= lvl);
unsigned new_lvl = lvl - num_scopes;
scope & s = m_scopes[new_lvl];
unmark_relevant_exprs(s.m_relevant_exprs_lim);
undo_trail(s.m_trail_lim);
m_scopes.shrink(new_lvl);
}
/**
\brief Unmark expressions marked as relevant.
*/
void unmark_relevant_exprs(unsigned old_lim) {
SASSERT(old_lim <= m_relevant_exprs.size());
unsigned i = m_relevant_exprs.size();
while (i != old_lim) {
--i;
expr * n = m_relevant_exprs.get(i);
m_is_relevant.remove(n->get_id());
TRACE("propagate_relevancy", tout << "unmarking:\n" << mk_ismt2_pp(n, get_manager()) << "\n";);
}
m_relevant_exprs.shrink(old_lim);
m_qhead = m_relevant_exprs.size();
}
void undo_trail(unsigned old_lim) {
SASSERT(old_lim <= m_trail.size());
ast_manager & m = get_manager();
unsigned i = m_trail.size();
while (i != old_lim) {
--i;
eh_trail & t = m_trail[i];
expr * n = t.get_node();
relevancy_ehs * ehs;
switch (t.get_kind()) {
case eh_trail::POS_WATCH: ehs = get_watches(n, true); SASSERT(ehs); set_watches(n, true, ehs->tail()); break;
case eh_trail::NEG_WATCH: ehs = get_watches(n, false); SASSERT(ehs); set_watches(n, false, ehs->tail()); break;
case eh_trail::HANDLER: ehs = get_handlers(n); SASSERT(ehs); set_handlers(n, ehs->tail()); break;
default: UNREACHABLE(); break;
}
m.dec_ref(n);
}
m_trail.shrink(old_lim);
}
void set_relevant(expr * n) {
m_is_relevant.insert(n->get_id());
m_relevant_exprs.push_back(n);
m_context.relevant_eh(n);
}
/**
\brief Mark an expression as relevant and propagate
the relevancy to its descendants.
*/
void mark_and_propagate(expr * n) {
if (!enabled())
return;
if (!is_relevant_core(n)) {
mark_as_relevant(n);
propagate();
}
}
/**
\brief Mark the given expression as relevant if it is not
already marked.
*/
void mark_as_relevant(expr * n) override {
if (!enabled())
return;
if (!is_relevant_core(n)) {
enode * e = m_context.find_enode(n);
if (e != nullptr) {
enode * curr = e;
do {
if (!is_relevant_core(curr->get_expr()))
set_relevant(curr->get_expr());
curr = curr->get_next();
}
while (curr != e);
}
else {
set_relevant(n);
}
}
}
/**
\brief Marks the children of n as relevant.
\pre n is marked as relevant.
*/
void propagate_relevant_app(app * n) {
SASSERT(is_relevant_core(n));
unsigned j = n->get_num_args();
while (j > 0) {
--j;
mark_as_relevant(n->get_arg(j));
}
}
/**
\brief Propagate relevancy for an or-application.
*/
void propagate_relevant_or(app * n) {
SASSERT(get_manager().is_or(n));
lbool val = m_context.find_assignment(n);
// If val is l_undef, then the expression
// is a root, and no boolean variable was created for it.
if (val == l_undef)
val = l_true;
switch (val) {
case l_false:
propagate_relevant_app(n);
break;
case l_undef:
break;
case l_true: {
expr * true_arg = nullptr;
unsigned num_args = n->get_num_args();
for (unsigned i = 0; i < num_args; i++) {
expr * arg = n->get_arg(i);
if (m_context.find_assignment(arg) == l_true) {
if (is_relevant_core(arg))
return;
else if (!true_arg)
true_arg = arg;
}
}
if (true_arg)
mark_as_relevant(true_arg);
break;
} }
}
/**
\brief Propagate relevancy for an and-application.
*/
void propagate_relevant_and(app * n) {
lbool val = m_context.find_assignment(n);
switch (val) {
case l_false: {
expr * false_arg = nullptr;
unsigned num_args = n->get_num_args();
for (unsigned i = 0; i < num_args; i++) {
expr * arg = n->get_arg(i);
if (m_context.find_assignment(arg) == l_false) {
if (is_relevant_core(arg))
return;
else if (!false_arg)
false_arg = arg;
}
}
if (false_arg)
mark_as_relevant(false_arg);
break;
}
case l_undef:
break;
case l_true:
propagate_relevant_app(n);
break;
}
}
/**
\brief Propagate relevancy for an ite-expression.
*/
void propagate_relevant_ite(app * n) {
TRACE("propagate_relevant_ite", tout << "propagating relevancy for #" << n->get_id() << "\n" << mk_pp(n, get_manager()) << "\n";);
mark_as_relevant(n->get_arg(0));
switch (m_context.find_assignment(n->get_arg(0))) {
case l_false:
TRACE("propagate_relevant_ite", tout << "marking as relevant: " << mk_pp(n->get_arg(2), get_manager()) << "\n";);
mark_as_relevant(n->get_arg(2));
break;
case l_undef:
TRACE("propagate_relevant_ite", tout << "ite c is unassigned\n";);
break;
case l_true:
TRACE("propagate_relevant_ite", tout << "marking as relevant: " << mk_pp(n->get_arg(1), get_manager()) << "\n";);
mark_as_relevant(n->get_arg(1));
break;
}
}
/**
\brief Propagate relevancy to the arguments of recently marked
expressions. That is, expressions that are located at positions
[m_qhead, m_relevant_exprs.size()) in the stack of
relevant expressions.
*/
void propagate() override {
if (m_propagating) {
return;
}
flet<bool> l_prop(m_propagating, true);
ast_manager & m = get_manager();
while (m_qhead < m_relevant_exprs.size()) {
expr * n = m_relevant_exprs.get(m_qhead);
TRACE("propagate_relevancy_to_args", tout << "propagating relevancy to args of #" << n->get_id() << "\n";);
TRACE("propagate_relevancy", tout << "marking as relevant:\n" << mk_bounded_pp(n, m) << "\n";);
SASSERT(is_relevant_core(n));
m_qhead++;
if (is_app(n)) {
family_id fid = to_app(n)->get_family_id();
if (fid == m.get_basic_family_id()) {
switch (to_app(n)->get_decl_kind()) {
case OP_OR:
propagate_relevant_or(to_app(n));
break;
case OP_AND:
propagate_relevant_and(to_app(n));
break;
case OP_ITE:
propagate_relevant_ite(to_app(n));
break;
default:
propagate_relevant_app(to_app(n));
break;
}
}
else {
propagate_relevant_app(to_app(n));
}
}
relevancy_ehs * ehs = get_handlers(n);
while (ehs != nullptr) {
ehs->head()->operator()(*this, n);
ehs = ehs->tail();
}
}
}
bool can_propagate() const override {
return m_qhead < m_relevant_exprs.size();
}
void assign_eh(expr * n, bool val) override {
if (!enabled())
return;
ast_manager & m = get_manager();
SASSERT(enabled());
if (is_relevant_core(n)) {
if (m.is_or(n))
propagate_relevant_or(to_app(n));
else if (m.is_and(n))
propagate_relevant_and(to_app(n));
}
relevancy_ehs * ehs = get_watches(n, val);
while (ehs != nullptr) {
ehs->head()->operator()(*this, n, val);
ehs = ehs->tail();
}
}
void display(std::ostream & out) const override {
if (enabled() && !m_relevant_exprs.empty()) {
out << "relevant exprs:\n";
for (unsigned i = 0; i < m_relevant_exprs.size(); i++) {
out << "#" << m_relevant_exprs.get(i)->get_id() << " ";
}
out << "\n";
}
}
#ifdef Z3DEBUG
bool check_relevancy_app(app * n) const {
SASSERT(is_relevant(n));
unsigned num_args = n->get_num_args();
for (unsigned i = 0; i < num_args; i++) {
CTRACE("relevancy_bug", !is_relevant(n->get_arg(i)), tout << "n: " << mk_ismt2_pp(n, get_manager()) << "\ni: " << i << "\n";);
SASSERT(is_relevant(n->get_arg(i)));
}
return true;
}
bool check_relevancy_or(app * n, bool root) const override {
lbool val = root ? l_true : m_context.find_assignment(n);
if (val == l_false)
return check_relevancy_app(n);
if (val == l_true) {
unsigned num_args = n->get_num_args();
for (unsigned i = 0; i < num_args; i++) {
expr * arg = n->get_arg(i);
if (m_context.find_assignment(arg) == l_true && is_relevant(arg))
return true;
}
TRACE("check_relevancy", tout << "failed:\n" << mk_ll_pp(n, get_manager()); display(tout););
UNREACHABLE();
}
return true;
}
bool check_relevancy_and(app * n) const {
lbool val = m_context.find_assignment(n);
if (val == l_true)
return check_relevancy_app(n);
if (val == l_false) {
unsigned num_args = n->get_num_args();
for (unsigned i = 0; i < num_args; i++) {
expr * arg = n->get_arg(i);
if (m_context.find_assignment(arg) == l_false && is_relevant(arg))
return true;
}
UNREACHABLE();
}
return true;
}
bool check_relevancy_ite(app * n) const {
SASSERT(is_relevant(n->get_arg(0)));
switch (m_context.find_assignment(n->get_arg(0))) {
case l_false:
if (get_manager().is_bool(n)) {
TRACE("ite_bug", tout << mk_bounded_pp(n, get_manager()) << "\n";);
SASSERT(is_relevant(n->get_arg(2)));
}
else {
app_ref eq(get_manager());
eq = m_context.mk_eq_atom(n, n->get_arg(2));
SASSERT(is_relevant(eq.get()));
}
break;
case l_undef:
break;
case l_true:
if (get_manager().is_bool(n)) {
SASSERT(is_relevant(n->get_arg(1)));
}
else {
app_ref eq(get_manager());
eq = m_context.mk_eq_atom(n, n->get_arg(1));
SASSERT(is_relevant(eq.get()));
}
break;
}
return true;
}
bool check_relevancy(expr_ref_vector const & v) const override {
SASSERT(!can_propagate());
ast_manager & m = get_manager();
unsigned sz = v.size();
for (unsigned i = 0; i < sz; i++) {
expr * n = v.get(i);
if (is_relevant(n)) {
TRACE("check_relevancy", tout << "checking:\n" << mk_ll_pp(n, get_manager()) << "internalized: " << m_context.find_enode(n) << "\n";);
if (m.is_or(n)) {
SASSERT(check_relevancy_or(to_app(n), false));
}
else if (m.is_and(n)) {
SASSERT(check_relevancy_and(to_app(n)));
}
else if (m.is_ite(n)) {
SASSERT(check_relevancy_ite(to_app(n)));
}
else if (is_app(n)) {
SASSERT(check_relevancy_app(to_app(n)));
}
else {
enode * n0 = m_context.find_enode(n);
if (n0 != 0) {
enode * n = n0->get_next();
while (n0 != n) {
SASSERT(is_relevant(n->get_expr()));
n = n->get_next();
}
}
}
}
}
return true;
}
#endif
};
void and_relevancy_eh::operator()(relevancy_propagator & rp) {
if (rp.is_relevant(m_parent))
static_cast<relevancy_propagator_imp&>(rp).propagate_relevant_and(m_parent);
}
void or_relevancy_eh::operator()(relevancy_propagator & rp) {
if (rp.is_relevant(m_parent))
static_cast<relevancy_propagator_imp&>(rp).propagate_relevant_or(m_parent);
}
void ite_relevancy_eh::operator()(relevancy_propagator & rp) {
if (rp.is_relevant(m_parent)) {
static_cast<relevancy_propagator_imp&>(rp).propagate_relevant_ite(m_parent);
}
}
void ite_term_relevancy_eh::operator()(relevancy_propagator & rp) {
if (!rp.is_relevant(m_parent))
return;
rp.mark_as_relevant(m_parent->get_arg(0));
switch (rp.get_context().get_assignment(m_parent->get_arg(0))) {
case l_false:
TRACE("ite_term_relevancy", tout << "marking else: #" << m_else_eq->get_id() << "\n";);
rp.mark_as_relevant(m_else_eq);
break;
case l_undef:
break;
case l_true:
TRACE("ite_term_relevancy", tout << "marking then: #" << m_then_eq->get_id() << "\n";);
rp.mark_as_relevant(m_then_eq);
break;
}
}
relevancy_propagator * mk_relevancy_propagator(context & ctx) { return alloc(relevancy_propagator_imp, ctx); }
};
| 14,007 |
1,467 | /*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.awssdk.services.json;
import java.util.function.Consumer;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder;
/**
* This includes configuration specific to Json Service that is supported by both {@link JsonClientBuilder} and
* {@link JsonAsyncClientBuilder}.
*/
@Generated("software.amazon.awssdk:codegen")
public interface JsonBaseClientBuilder<B extends JsonBaseClientBuilder<B, C>, C> extends AwsClientBuilder<B, C> {
B serviceConfiguration(ServiceConfiguration serviceConfiguration);
default B serviceConfiguration(Consumer<ServiceConfiguration.Builder> serviceConfiguration) {
return serviceConfiguration(ServiceConfiguration.builder().applyMutation(serviceConfiguration).build());
}
}
| 371 |
9,959 | @lombok.Builder
public class BuilderWithBadNames {
String build;
String toString;
}
| 28 |
681 | from metaworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place import (
SawyerPickAndPlaceEnv,
SawyerPickAndPlaceEnvYZ,
)
# from metaworld.core.image_env import ImageEnv
# from metaworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
import numpy as np
env = SawyerPickAndPlaceEnvYZ(
hand_low=(-0.1, 0.55, 0.05),
hand_high=(0.0, 0.65, 0.2),
action_scale=0.02,
hide_goal_markers=False,
num_goals_presampled=5,
p_obj_in_hand=1,
)
while True:
obs = env.reset()
"""
Sample a goal (object will be in hand as p_obj_in_hand=1) and try to set
the env state to the goal. I think there's a small chance this can fail
and the object falls out.
"""
env.set_to_goal(
{'state_desired_goal': env.generate_uncorrected_env_goals(1)['state_desired_goal'][0]}
)
# Close gripper for 20 timesteps
action = np.array([0, 0, 1])
for _ in range(20):
obs, _, _, _ = env.step(action)
env.render() | 411 |
5,411 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_APK_ASSETS_H_
#define BASE_ANDROID_APK_ASSETS_H_
#include <string>
#include "base/android/jni_android.h"
#include "base/files/file_path.h"
#include "base/files/memory_mapped_file.h"
namespace base {
namespace android {
// Opens an asset (e.g. a .pak file) from the apk.
// Can be used from renderer process.
// Fails if the asset is not stored uncompressed within the .apk.
// Returns: The File Descriptor of the asset, or -1 upon failure.
// Input arguments:
// - |file_path|: Path to file within .apk. e.g.: assets/foo.pak
// Output arguments:
// - |region|: size & offset (in bytes) within the .apk of the asset.
BASE_EXPORT int OpenApkAsset(
const std::string& file_path,
base::MemoryMappedFile::Region* region);
// Registers an uncompressed asset from within the apk in the
// FileDescriptorStore.
// Returns: true in case of success, false otherwise.
BASE_EXPORT bool RegisterApkAssetWithFileDescriptorStore(
const std::string& key,
const base::FilePath& file_path);
} // namespace android
} // namespace base
#endif // BASE_ANDROID_APK_ASSETS_H_
| 424 |
530 | import asyncio
import random
import pytest
from tartiflette import Resolver, create_engine
_BOOKS = [{"id": i, "title": f"Book #{i}"} for i in range(25)]
_SDL = """
type Book {
id: Int!
title: String!
}
type Query {
books: [Book!]
}
"""
@pytest.mark.asyncio
async def test_issue_457_sequentially(random_schema_name):
@Resolver(
"Query.books", list_concurrently=False, schema_name=random_schema_name
)
async def test_query_books(parent, args, ctx, info):
return _BOOKS
books_parsing_order = []
@Resolver("Book.id", schema_name=random_schema_name)
async def test_book_id(parent, args, ctx, info):
await asyncio.sleep(random.randint(0, 10) / 100)
books_parsing_order.append(parent["id"])
return parent["id"]
engine = await create_engine(_SDL, schema_name=random_schema_name)
assert await engine.execute("{ books { id title } }") == {
"data": {"books": _BOOKS}
}
assert books_parsing_order == [book["id"] for book in _BOOKS]
@pytest.mark.asyncio
async def test_issue_457_concurrently(random_schema_name):
@Resolver(
"Query.books", list_concurrently=True, schema_name=random_schema_name
)
async def test_query_books(parent, args, ctx, info):
return _BOOKS
books_parsing_order = []
@Resolver("Book.id", schema_name=random_schema_name)
async def test_book_id(parent, args, ctx, info):
await asyncio.sleep(random.randint(0, 10) / 100)
books_parsing_order.append(parent["id"])
return parent["id"]
engine = await create_engine(_SDL, schema_name=random_schema_name)
assert await engine.execute("{ books { id title } }") == {
"data": {"books": _BOOKS}
}
assert books_parsing_order != [book["id"] for book in _BOOKS]
@pytest.mark.asyncio
async def test_issue_457_sequentially_schema_level(random_schema_name):
books_parsing_order = []
@Resolver("Book.id", schema_name=random_schema_name)
async def test_book_id(parent, args, ctx, info):
await asyncio.sleep(random.randint(0, 10) / 100)
books_parsing_order.append(parent["id"])
return parent["id"]
engine = await create_engine(
_SDL, coerce_list_concurrently=False, schema_name=random_schema_name
)
assert await engine.execute(
"{ books { id title } }", initial_value={"books": _BOOKS}
) == {"data": {"books": _BOOKS}}
assert books_parsing_order == [book["id"] for book in _BOOKS]
@pytest.mark.asyncio
async def test_issue_457_concurrently_schema_level(random_schema_name):
books_parsing_order = []
@Resolver("Book.id", schema_name=random_schema_name)
async def test_book_id(parent, args, ctx, info):
await asyncio.sleep(random.randint(0, 10) / 100)
books_parsing_order.append(parent["id"])
return parent["id"]
engine = await create_engine(
_SDL, coerce_list_concurrently=True, schema_name=random_schema_name
)
assert await engine.execute(
"{ books { id title } }", initial_value={"books": _BOOKS}
) == {"data": {"books": _BOOKS}}
assert books_parsing_order != [book["id"] for book in _BOOKS]
| 1,296 |
4,238 | #!/usr/bin/env python
# Lint as: python3
"""A compatibility layer for the IPython shell."""
# pylint: disable=g-import-not-at-top
def IPShell(argv=None, user_ns=None, banner=None):
if argv is None:
argv = []
try:
from IPython.terminal.embed import InteractiveShellEmbed
shell = InteractiveShellEmbed(user_ns=user_ns, banner2=str(banner))
shell(local_ns=user_ns)
except ImportError:
from IPython import Shell
# IPython < 0.11
Shell.IPShell(argv=argv, user_ns=user_ns).mainloop(banner=banner)
| 203 |
542 | //----------------------------------------------------------------------------
//
// TSDuck - The MPEG Transport Stream Toolkit
// Copyright (c) 2005-2021, <NAME>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
//----------------------------------------------------------------------------
#include "tsSIParameterDescriptor.h"
#include "tsDescriptor.h"
#include "tsNames.h"
#include "tsTablesDisplay.h"
#include "tsPSIRepository.h"
#include "tsPSIBuffer.h"
#include "tsDuckContext.h"
#include "tsxmlElement.h"
#include "tsMJD.h"
#define MY_XML_NAME u"SI_parameter_descriptor"
#define MY_CLASS ts::SIParameterDescriptor
#define MY_DID ts::DID_ISDB_SI_PARAMETER
#define MY_PDS ts::PDS_ISDB
#define MY_STD ts::Standards::ISDB
TS_REGISTER_DESCRIPTOR(MY_CLASS, ts::EDID::Private(MY_DID, MY_PDS), MY_XML_NAME, MY_CLASS::DisplayDescriptor);
//----------------------------------------------------------------------------
// Constructors
//----------------------------------------------------------------------------
ts::SIParameterDescriptor::SIParameterDescriptor() :
AbstractDescriptor(MY_DID, MY_XML_NAME, MY_STD, 0),
parameter_version(0),
update_time(),
entries()
{
}
void ts::SIParameterDescriptor::clearContent()
{
parameter_version = 0;
update_time.clear();
entries.clear();
}
ts::SIParameterDescriptor::SIParameterDescriptor(DuckContext& duck, const Descriptor& desc) :
SIParameterDescriptor()
{
deserialize(duck, desc);
}
ts::SIParameterDescriptor::Entry::Entry() :
table_id(TID_NULL),
table_description()
{
}
//----------------------------------------------------------------------------
// Serialization
//----------------------------------------------------------------------------
void ts::SIParameterDescriptor::serializePayload(PSIBuffer& buf) const
{
buf.putUInt8(parameter_version);
buf.putMJD(update_time, 2); // 2 bytes: date only
for (auto it = entries.begin(); it != entries.end(); ++it) {
buf.putUInt8(it->table_id);
buf.putUInt8(uint8_t(it->table_description.size()));
buf.putBytes(it->table_description);
}
}
//----------------------------------------------------------------------------
// Deserialization
//----------------------------------------------------------------------------
void ts::SIParameterDescriptor::deserializePayload(PSIBuffer& buf)
{
parameter_version = buf.getUInt8();
update_time = buf.getMJD(2); // 2 bytes: date only
while (buf.canRead()) {
Entry e;
e.table_id = buf.getUInt8();
const size_t len = buf.getUInt8();
buf.getBytes(e.table_description, len);
entries.push_back(e);
}
}
//----------------------------------------------------------------------------
// Static method to display a descriptor.
//----------------------------------------------------------------------------
void ts::SIParameterDescriptor::DisplayDescriptor(TablesDisplay& disp, PSIBuffer& buf, const UString& margin, DID did, TID tid, PDS pds)
{
if (buf.canReadBytes(3)) {
disp << margin << UString::Format(u"Parameter version: 0x%X (%<d)", {buf.getUInt8()}) << std::endl;
disp << margin << "Update time: " << buf.getMJD(2).format(Time::DATE) << std::endl;
while (buf.canReadBytes(2)) {
disp << margin << "- Table id: " << names::TID(disp.duck(), buf.getUInt8(), CASID_NULL, NamesFlags::HEXA_FIRST) << std::endl;
disp.displayPrivateData(u"Table description", buf, buf.getUInt8(), margin + u" ");
}
}
}
//----------------------------------------------------------------------------
// XML serialization
//----------------------------------------------------------------------------
void ts::SIParameterDescriptor::buildXML(DuckContext& duck, xml::Element* root) const
{
root->setIntAttribute(u"parameter_version", parameter_version, true);
root->setDateAttribute(u"update_time", update_time);
for (auto it = entries.begin(); it != entries.end(); ++it) {
xml::Element* e = root->addElement(u"table");
e->setIntAttribute(u"id", it->table_id, true);
if (!it->table_description.empty()) {
e->addHexaText(it->table_description);
}
}
}
//----------------------------------------------------------------------------
// XML deserialization
//----------------------------------------------------------------------------
bool ts::SIParameterDescriptor::analyzeXML(DuckContext& duck, const xml::Element* element)
{
xml::ElementVector xtables;
bool ok =
element->getIntAttribute(parameter_version, u"parameter_version", true) &&
element->getDateAttribute(update_time, u"update_time", true) &&
element->getChildren(xtables, u"table");
for (auto it = xtables.begin(); ok && it != xtables.end(); ++it) {
Entry entry;
ok = (*it)->getIntAttribute(entry.table_id, u"id", true) &&
(*it)->getHexaText(entry.table_description, 0, 255);
entries.push_back(entry);
}
return ok;
}
| 2,053 |
435 | package org.uma.jmetal.example.operator;
import org.uma.jmetal.lab.visualization.plot.PlotFront;
import org.uma.jmetal.lab.visualization.plot.impl.PlotSmile;
import org.uma.jmetal.operator.crossover.CrossoverOperator;
import org.uma.jmetal.operator.crossover.impl.SBXCrossover;
import org.uma.jmetal.problem.doubleproblem.DoubleProblem;
import org.uma.jmetal.problem.multiobjective.Kursawe;
import org.uma.jmetal.solution.doublesolution.DoubleSolution;
import org.uma.jmetal.util.JMetalLogger;
import org.uma.jmetal.util.bounds.Bounds;
import org.uma.jmetal.util.comparator.DoubleVariableComparator;
import org.uma.jmetal.util.fileoutput.SolutionListOutput;
import org.uma.jmetal.util.fileoutput.impl.DefaultFileOutputContext;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* @author <NAME>
* @version 1.0
*
* This class is intended to verify the working of the SBX crossover operator. A figure
* depicting the values obtained when generating 100000 solutions, a granularity of 200, and a number
* of different distribution index values (5, 10, 20) can be found here:
* <a href="https://github.com/jMetal/jMetal/blob/master/figures/sbxCrossover.png">
SBX crossover</a>
*/
public class SBXCrossoverExample {
/**
* Program to generate data representing the distribution of points generated by a SBX
* crossover operator. The parameters to be introduced by the command line are:
* - numberOfSolutions: number of solutions to generate
* - granularity: number of subdivisions to be considered.
* - distributionIndex: distribution index of the polynomial mutation operator
* - outputFile: file containing the results
*
* @param args Command line arguments
*/
public static void main(String[] args) {
int numberOfPoints ;
int granularity ;
double distributionIndex ;
if (args.length !=3) {
JMetalLogger.logger.info("Usage: numberOfSolutions granularity distributionIndex") ;
JMetalLogger.logger.info("Using default parameters") ;
numberOfPoints = 10000 ;
granularity = 100 ;
distributionIndex = 10 ;
} else {
numberOfPoints = Integer.parseInt(args[0]);
granularity = Integer.parseInt(args[1]);
distributionIndex = Double.parseDouble(args[2]);
}
DoubleProblem problem ;
problem = new Kursawe(1) ;
CrossoverOperator<DoubleSolution> crossover = new SBXCrossover(1.0, distributionIndex) ;
DoubleSolution solution1 = problem.createSolution() ;
DoubleSolution solution2 = problem.createSolution() ;
solution1.variables().set(0, -3.0);
solution2.variables().set(0, 3.0);
List<DoubleSolution> parents = Arrays.asList(solution1, solution2) ;
List<DoubleSolution> population = new ArrayList<>(numberOfPoints) ;
for (int i = 0; i < numberOfPoints ; i++) {
List<DoubleSolution> solutions = (List<DoubleSolution>) crossover.execute(parents);
population.add(solutions.get(0)) ;
population.add(solutions.get(1)) ;
}
population.sort(new DoubleVariableComparator());
new SolutionListOutput(population)
.setVarFileOutputContext(new DefaultFileOutputContext("solutionsSBX"))
.print();
double[][] classifier = classify(population, problem, granularity);
PlotFront plot = new PlotSmile(classifier, "") ;
plot.plot();
}
private static double[][] classify(List<DoubleSolution> solutions, DoubleProblem problem, int granularity) {
Bounds<Double> bounds = problem.getBoundsForVariables().get(0);
double grain = (bounds.getUpperBound() - bounds.getLowerBound()) / granularity ;
double[][] classifier = new double[granularity][] ;
for (int i = 0 ; i < granularity; i++) {
classifier[i] = new double[2] ;
classifier[i][0] = bounds.getLowerBound() + i * grain ;
classifier[i][1] = 0 ;
}
for (DoubleSolution solution : solutions) {
boolean found = false ;
int index = 0 ;
while (!found) {
if (solution.variables().get(0) <= classifier[index][0]) {
classifier[index][1] ++ ;
found = true ;
} else {
if (index == (granularity - 1)) {
classifier[index][1] ++ ;
found = true ;
} else {
index++;
}
}
}
}
return classifier ;
}
}
| 1,510 |
1,069 | # coding: utf-8
from django.db import models
from django_th.models.services import Services
from django_th.models import TriggerService
class Slack(Services):
"""
Model for Slack Service
"""
webhook_url = models.URLField(max_length=2000, blank=True, null=True)
slack_token = models.CharField(max_length=2000, blank=True, null=True)
team_id = models.CharField(max_length=100, blank=True, null=True)
channel = models.CharField(max_length=100, blank=True, null=True)
trigger = models.ForeignKey(TriggerService, on_delete=models.CASCADE)
class Meta:
app_label = 'th_slack'
db_table = 'django_th_slack'
def show(self):
"""
:return: string representing object
"""
return "Services Slack %s %s" % (self.trigger, self.webhook_url)
def __str__(self):
return "%s" % self.webhook_url
| 346 |
482 | package io.cattle.platform.datasource;
import javax.sql.DataSource;
public interface DataSourceFactory {
DataSource createDataSource(String name);
}
| 49 |
758 | def main(args=None):
try:
import pyct.cmd
except ImportError:
import sys
from . import _missing_cmd
print(_missing_cmd())
sys.exit(1)
return pyct.cmd.substitute_main('datashader',args=args)
if __name__ == "__main__":
main()
| 130 |
1,358 | <gh_stars>1000+
import logging
from distributed.http.prometheus import PrometheusCollector
from distributed.http.utils import RequestHandler
class WorkerMetricCollector(PrometheusCollector):
def __init__(self, server):
super().__init__(server)
self.logger = logging.getLogger("distributed.dask_worker")
self.subsystem = "worker"
self.crick_available = True
try:
import crick # noqa: F401
except ImportError:
self.crick_available = False
self.logger.info(
"Not all prometheus metrics available are exported. Digest-based metrics require crick to be installed"
)
def collect(self):
from prometheus_client.core import GaugeMetricFamily
tasks = GaugeMetricFamily(
self.build_name("tasks"),
"Number of tasks at worker.",
labels=["state"],
)
tasks.add_metric(["stored"], len(self.server.data))
tasks.add_metric(["executing"], self.server.executing_count)
tasks.add_metric(["ready"], len(self.server.ready))
tasks.add_metric(["waiting"], self.server.waiting_for_data_count)
yield tasks
yield GaugeMetricFamily(
self.build_name("concurrent_fetch_requests"),
"Number of open fetch requests to other workers.",
value=len(self.server.in_flight_workers),
)
yield GaugeMetricFamily(
self.build_name("threads"),
"Number of worker threads.",
value=self.server.nthreads,
)
yield GaugeMetricFamily(
self.build_name("latency_seconds"),
"Latency of worker connection.",
value=self.server.latency,
)
# all metrics using digests require crick to be installed
# the following metrics will export NaN, if the corresponding digests are None
if self.crick_available:
yield GaugeMetricFamily(
self.build_name("tick_duration_median_seconds"),
"Median tick duration at worker.",
value=self.server.digests["tick-duration"].components[1].quantile(50),
)
yield GaugeMetricFamily(
self.build_name("task_duration_median_seconds"),
"Median task runtime at worker.",
value=self.server.digests["task-duration"].components[1].quantile(50),
)
yield GaugeMetricFamily(
self.build_name("transfer_bandwidth_median_bytes"),
"Bandwidth for transfer at worker in Bytes.",
value=self.server.digests["transfer-bandwidth"]
.components[1]
.quantile(50),
)
class PrometheusHandler(RequestHandler):
_initialized = False
def __init__(self, *args, **kwargs):
import prometheus_client
super().__init__(*args, **kwargs)
if PrometheusHandler._initialized:
return
prometheus_client.REGISTRY.register(WorkerMetricCollector(self.server))
PrometheusHandler._initialized = True
def get(self):
import prometheus_client
self.write(prometheus_client.generate_latest())
self.set_header("Content-Type", "text/plain; version=0.0.4")
| 1,468 |
502 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alipay.common.tracer.core.appender.manager;
import com.alipay.common.tracer.core.appender.builder.XStringBuilder;
import com.alipay.common.tracer.core.appender.encoder.SpanEncoder;
import com.alipay.common.tracer.core.appender.self.Timestamp;
import com.alipay.common.tracer.core.context.span.SofaTracerSpanContext;
import com.alipay.common.tracer.core.span.SofaTracerSpan;
import com.alipay.common.tracer.core.utils.StringUtils;
import java.io.IOException;
import java.util.Map;
/**
* ClientSpanEncoder
*
* @author yangguanchao
* @since 2017/07/01
*/
public class ClientSpanEncoder implements SpanEncoder<SofaTracerSpan> {
@Override
public String encode(SofaTracerSpan span) throws IOException {
XStringBuilder xsb = new XStringBuilder();
SofaTracerSpanContext spanContext = span.getSofaTracerSpanContext();
xsb.reset();
xsb.append(Timestamp.format(span.getEndTime()));
//traceId
xsb.append(spanContext.getTraceId());
//spanId
xsb.append(spanContext.getSpanId());
//tags string
xsb.append(StringUtils.mapToString(span.getTagsWithStr()));
//tags bool
Map<String, Boolean> tagsBool = span.getTagsWithBool();
StringBuilder tagsBoolBuild = new StringBuilder();
for (Map.Entry<String, Boolean> entry : tagsBool.entrySet()) {
tagsBoolBuild.append(entry.getKey()).append(StringUtils.EQUAL)
.append(entry.getValue().toString()).append(StringUtils.AND);
}
xsb.append(tagsBoolBuild.toString());
//tags number
Map<String, Number> tagsNum = span.getTagsWithNumber();
StringBuilder tagsNumBuild = new StringBuilder();
for (Map.Entry<String, Number> entry : tagsNum.entrySet()) {
tagsNumBuild.append(entry.getKey()).append(StringUtils.EQUAL)
.append(entry.getValue().toString()).append(StringUtils.AND);
}
xsb.append(tagsNumBuild.toString());
//baggage
Map<String, String> baggage = spanContext.getBizBaggage();
xsb.appendEnd(StringUtils.mapToString(baggage));
return xsb.toString();
}
}
| 1,100 |
995 | /*
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <fizz/protocol/BrotliCertificateDecompressor.h>
#include <brotli/decode.h>
using namespace folly;
namespace fizz {
namespace {
size_t brotliDecompressImpl(
folly::ByteRange input,
uint8_t* output,
size_t outputSize) {
auto status =
BrotliDecoderDecompress(input.size(), input.data(), &outputSize, output);
if (status != BrotliDecoderResult::BROTLI_DECODER_RESULT_SUCCESS) {
throw std::runtime_error("Decompressing certificate failed");
}
return outputSize;
}
} // namespace
CertificateCompressionAlgorithm BrotliCertificateDecompressor::getAlgorithm()
const {
return CertificateCompressionAlgorithm::brotli;
}
CertificateMsg BrotliCertificateDecompressor::decompress(
const CompressedCertificate& cc) {
if (cc.algorithm != getAlgorithm()) {
throw std::runtime_error(
"Compressed certificate uses non-brotli algorithm: " +
toString(cc.algorithm));
}
if (cc.uncompressed_length > kMaxHandshakeSize) {
throw std::runtime_error(
"Compressed certificate exceeds maximum certificate message size");
}
auto rawCertMessage = IOBuf::create(cc.uncompressed_length);
auto compRange = cc.compressed_certificate_message->coalesce();
auto decompressedSize = brotliDecompressImpl(
compRange, rawCertMessage->writableData(), cc.uncompressed_length);
if (decompressedSize != cc.uncompressed_length) {
throw std::runtime_error("Uncompressed length incorrect");
}
rawCertMessage->append(decompressedSize);
return decode<CertificateMsg>(std::move(rawCertMessage));
}
} // namespace fizz
| 602 |
450 | <filename>lib/Support/Debug.cpp
//===-- Debug.cpp - An easy way to add debug output to your code ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a handy way of adding debugging information to your
// code, without it being enabled all of the time, and without having to add
// command line options to enable it.
//
// In particular, just wrap your code with the DEBUG() macro, and it will be
// enabled automatically if you specify '-debug' on the command-line.
// Alternatively, you can also use the SET_DEBUG_TYPE("foo") macro to specify
// that your debug code belongs to class "foo". Then, on the command line, you
// can specify '-debug-only=foo' to enable JUST the debug information for the
// foo class.
//
// When compiling without assertions, the -debug-* options and all code in
// DEBUG() statements disappears, so it does not affect the runtime of the code.
//
//===----------------------------------------------------------------------===//
#include "onnc/Support/Debug.h"
#include "onnc/Option/CommandLine.h"
#include "onnc/Support/ManagedStatic.h"
#undef isCurrentDebugType
#undef setCurrentDebugType
#undef setCurrentDebugTypes
using namespace onnc;
// Even though LLVM might be built with NDEBUG, define symbols that the code
// built without NDEBUG can depend on via the onnc/Support/Debug.h header.
namespace onnc {
/// Exported boolean set by the -debug option.
/// Return true if the specified string is the debug type
/// specified on the command line, or if none was specified on the command line
/// with the -debug-only=X option.
bool isCurrentDebugType(const char *DebugType)
{
if (DebugMsg::getCurrentDebugType()->empty())
return true;
// See if DebugType is in list. Note: do not use find() as that forces us to
// unnecessarily create an std::string instance.
for (auto &d : *DebugMsg::getCurrentDebugType()) {
if (d == DebugType)
return true;
}
return false;
}
/// Set the current debug type, as if the -debug-only=X
/// option were specified. Note that DebugOpt also needs to be set to true for
/// debug output to be produced.
///
void setCurrentDebugTypes(const char **Types, unsigned Count);
void setCurrentDebugType(const char *Type) { setCurrentDebugTypes(&Type, 1); }
void setCurrentDebugTypes(const char **Types, unsigned Count)
{
DebugMsg::getCurrentDebugType()->clear();
for (size_t T = 0; T < Count; ++T)
DebugMsg::getCurrentDebugType()->push_back(Types[T]);
}
} // namespace onnc
#ifndef NDEBUG
namespace {
struct DebugOption {
void operator=(const bool &Val) const
{
if (Val == true)
DebugMsg::getDebugFlag() = true;
}
};
struct DebugOnlyOpt {
void operator=(const std::string &Val) const
{
DebugMsg::setDebugOnlyOpt(Val);
}
};
} // namespace
namespace onnc {
DebugMsg::DebugMsg()
{
// -debug - Command line option to enable the DEBUG statements in the passes.
// This flag may only be enabled in debug builds.
static cl::opt<DebugOption, cl::OptParser<bool> > debug_option(
"debug", cl::kShort, cl::kOptional, cl::kValueDisallowed, cl::init(false),
cl::desc("Enable debug output"));
static cl::opt<DebugOnlyOpt, cl::OptParser<std::string> > debug_only(
"debug-only", cl::kShort, cl::kOptional, cl::kValueRequired,
cl::desc("Enable a specific type of debug output (comma "
"separated list of types)"));
};
std::vector<std::string> *DebugMsg::getCurrentDebugType()
{
static std::vector<std::string> current_debug_type;
return ¤t_debug_type;
}
bool &DebugMsg::getDebugFlag()
{
static bool debug_flag = false;
return debug_flag;
}
void DebugMsg::setDebugOnlyOpt(const std::string &pVal)
{
if (pVal.empty())
return;
DebugMsg::getDebugFlag() = true;
std::vector<StringRef> dbgTypes;
StringRef(pVal).split(dbgTypes, ',', -1, false);
for (auto dbgType : dbgTypes)
DebugMsg::getCurrentDebugType()->push_back(dbgType);
}
} // namespace onnc
/// dbgs - Return a debug stream.
onnc::OStream &onnc::dbgs() { return onnc::outs(); }
#else
// Avoid "has no symbols" warning.
namespace onnc {
/// dbgs - Return errs().
onnc::OStream &dbgs() { return errs(); }
} // namespace onnc
#endif
| 1,367 |
2,504 | <gh_stars>1000+
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tencent is pleased to support the open source community by making behaviac available.
//
// Copyright (C) 2015-2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at http://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed under the License is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef _BEHAVIAC_COMMON_STRINGCRC_H_
#define _BEHAVIAC_COMMON_STRINGCRC_H_
#include "behaviac/common/config.h"
#include "behaviac/common/assert.h"
#include "behaviac/common/basictypes.h"
#if !BEHAVIAC_RELEASE
#define BEHAVIAC_STRINGID_DEBUG 1
#endif
namespace behaviac {
class BEHAVIAC_API CStringCRC {
public:
typedef uint32_t IDType;
public:
CStringCRC();
virtual ~CStringCRC() {
}
CStringCRC(const CStringCRC& other) : m_value(other.m_value) {
}
explicit CStringCRC(IDType crc) : m_value(crc) {
}
explicit CStringCRC(const char* str);
void ParseString(const char* content);
const IDType& GetUniqueID() const {
return this->m_value;
}
void SetUniqueID(IDType crc) {
this->m_value = crc;
}
bool IsValid() const;
static void Cleanup();
private:
IDType m_value;
public:
const char* LogStr() const;
};
#if !BEHAVIAC_CCDEFINE_MSVC
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif//
inline bool operator==(const CStringCRC& r, const CStringCRC& l) {
const CStringCRC* pR = (const CStringCRC*)&r;
const CStringCRC* pL = (const CStringCRC*)&l;
return pR->GetUniqueID() == pL->GetUniqueID();
}
#if !BEHAVIAC_CCDEFINE_MSVC
//#pragma GCC diagnostic pop
#endif
inline bool operator==(const CStringCRC& r, const CStringCRC::IDType l) {
return r.GetUniqueID() == l;
}
inline bool operator==(const CStringCRC::IDType r, const CStringCRC& l) {
return r == l.GetUniqueID();
}
inline bool operator!=(const CStringCRC& r, const CStringCRC& l) {
return r.GetUniqueID() != l.GetUniqueID();
}
inline bool operator!=(const CStringCRC& r, const CStringCRC::IDType l) {
return r.GetUniqueID() != l;
}
inline bool operator!=(const CStringCRC::IDType r, const CStringCRC& l) {
return r != l.GetUniqueID();
}
inline bool operator<(const CStringCRC& r, const CStringCRC& l) {
return r.GetUniqueID() < l.GetUniqueID();
}
}//namespace behaviac
#endif //#ifndef _BEHAVIAC_COMMON_STRINGCRC_H_
| 1,210 |
2,498 | /*
* chia.h
*
* Created on: May 24, 2021
* Author: mad
*/
#ifndef INCLUDE_CHIA_CHIA_H_
#define INCLUDE_CHIA_CHIA_H_
#include <chrono>
#include <cstdint>
#include <string>
#include <chia/settings.h>
// Unique plot id which will be used as a ChaCha8 key, and determines the PoSpace.
const uint32_t kIdLen = 32;
// Extra bits of output from the f functions. Instead of being a function from k -> k bits,
// it's a function from k -> k + kExtraBits bits. This allows less collisions in matches.
// Refer to the paper for mathematical motivations.
static constexpr uint8_t kExtraBits = 6;
// Convenience variable
static constexpr uint8_t kExtraBitsPow = 1 << kExtraBits;
// Distance between matching entries is stored in the offset
static constexpr uint32_t kOffsetSize = 10;
// ChaCha8 block size
const uint16_t kF1BlockSizeBits = 512;
// B and C groups which constitute a bucket, or BC group. These groups determine how
// elements match with each other. Two elements must be in adjacent buckets to match.
static constexpr uint16_t kB = 119;
static constexpr uint16_t kC = 127;
static constexpr uint16_t kBC = kB * kC;
// This (times k) is the length of the metadata that must be kept for each entry. For example,
// for a table 4 entry, we must keep 4k additional bits for each entry, which is used to
// compute f5.
static const uint8_t kVectorLens[] = {0, 0, 1, 2, 4, 4, 3, 2};
// The number of bits in the stub is k minus this value
static constexpr uint8_t kStubMinusBits = 3;
// EPP for the final file, the higher this is, the less variability, and lower delta
// Note: if this is increased, ParkVector size must increase
static constexpr uint32_t kEntriesPerPark = 2048;
// To store deltas for EPP entries, the average delta must be less than this number of bits
static constexpr double kMaxAverageDeltaTable1 = 5.6;
static constexpr double kMaxAverageDelta = 3.5;
// How many f7s per C1 entry, and how many C1 entries per C2 entry
static constexpr uint32_t kCheckpoint1Interval = 10000;
static constexpr uint32_t kCheckpoint2Interval = 10000;
// C3 entries contain deltas for f7 values, the max average size is the following
static constexpr double kC3BitsPerEntry = 2.4;
// The ANS encoding R values for the 7 final plot tables
// Tweaking the R values might allow lowering of the max average deltas, and reducing final
// plot size
static const double kRValues[7] = {4.7, 2.75, 2.75, 2.7, 2.6, 2.45};
// The ANS encoding R value for the C3 checkpoint table
static constexpr double kC3R = 1.0;
// Plot format (no compatibility guarantees with other formats). If any of the
// above contants are changed, or file format is changed, the version should
// be incremented.
static const std::string kFormatDescription = "v1.0";
struct table_t {
std::string file_name;
size_t num_entries = 0;
};
#endif /* INCLUDE_CHIA_CHIA_H_ */
| 898 |
2,269 | <reponame>Gizallina898/nar
/*
* Copyright 2019. Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.android.apps.santatracker.doodles.presenttoss;
public interface PresentTossSprites {
int[] present_throw_tutorials = {
R.drawable.present_throw_tutorials_01,
R.drawable.present_throw_tutorials_02,
R.drawable.present_throw_tutorials_03,
R.drawable.present_throw_tutorials_04,
R.drawable.present_throw_tutorials_05,
R.drawable.present_throw_tutorials_06,
R.drawable.present_throw_tutorials_07,
R.drawable.present_throw_tutorials_08,
R.drawable.present_throw_tutorials_09,
R.drawable.present_throw_tutorials_10,
R.drawable.present_throw_tutorials_11,
R.drawable.present_throw_tutorials_12,
R.drawable.present_throw_tutorials_13,
R.drawable.present_throw_tutorials_14,
};
int[] present_throw_def_green_left = {
R.drawable.present_throw_blocker_green_01,
R.drawable.present_throw_blocker_green_02,
R.drawable.present_throw_blocker_green_03,
R.drawable.present_throw_blocker_green_04,
R.drawable.present_throw_blocker_green_05,
R.drawable.present_throw_blocker_green_06,
R.drawable.present_throw_blocker_green_07,
R.drawable.present_throw_blocker_green_08,
};
int[] present_throw_def_green_right = {
R.drawable.present_throw_blocker_green_09,
R.drawable.present_throw_blocker_green_10,
R.drawable.present_throw_blocker_green_11,
R.drawable.present_throw_blocker_green_12,
R.drawable.present_throw_blocker_green_13,
R.drawable.present_throw_blocker_green_14,
R.drawable.present_throw_blocker_green_15,
R.drawable.present_throw_blocker_green_16,
};
int[] present_throw_def_green_emerge = {
R.drawable.present_throw_newblocker_green_01,
R.drawable.present_throw_newblocker_green_02,
R.drawable.present_throw_newblocker_green_03,
R.drawable.present_throw_newblocker_green_04,
R.drawable.present_throw_newblocker_green_05,
R.drawable.present_throw_newblocker_green_06,
R.drawable.present_throw_newblocker_green_07,
R.drawable.present_throw_newblocker_green_08,
R.drawable.present_throw_newblocker_green_09,
R.drawable.present_throw_newblocker_green_10,
R.drawable.present_throw_newblocker_green_11,
R.drawable.present_throw_newblocker_green_12,
};
int[] present_throw_def_green_blocking = {
R.drawable.present_throw_blocking_green_01,
R.drawable.present_throw_blocking_green_02,
R.drawable.present_throw_blocking_green_03,
R.drawable.present_throw_blocking_green_04,
R.drawable.present_throw_blocking_green_05,
R.drawable.present_throw_blocking_green_06,
R.drawable.present_throw_blocking_green_07,
R.drawable.present_throw_blocking_green_08,
R.drawable.present_throw_blocking_green_09,
R.drawable.present_throw_blocking_green_10,
R.drawable.present_throw_blocking_green_11,
R.drawable.present_throw_blocking_green_12,
};
int[] present_throw_def_red_left = {
R.drawable.present_throw_blocker_red_01,
R.drawable.present_throw_blocker_red_02,
R.drawable.present_throw_blocker_red_03,
R.drawable.present_throw_blocker_red_04,
R.drawable.present_throw_blocker_red_05,
R.drawable.present_throw_blocker_red_06,
R.drawable.present_throw_blocker_red_07,
R.drawable.present_throw_blocker_red_08,
};
int[] present_throw_def_red_right = {
R.drawable.present_throw_blocker_red_09,
R.drawable.present_throw_blocker_red_10,
R.drawable.present_throw_blocker_red_11,
R.drawable.present_throw_blocker_red_12,
R.drawable.present_throw_blocker_red_13,
R.drawable.present_throw_blocker_red_14,
R.drawable.present_throw_blocker_red_15,
R.drawable.present_throw_blocker_red_16,
};
int[] present_throw_def_red_emerge = {
R.drawable.present_throw_newblocker_red_01,
R.drawable.present_throw_newblocker_red_02,
R.drawable.present_throw_newblocker_red_03,
R.drawable.present_throw_newblocker_red_04,
R.drawable.present_throw_newblocker_red_05,
R.drawable.present_throw_newblocker_red_06,
R.drawable.present_throw_newblocker_red_07,
R.drawable.present_throw_newblocker_red_08,
R.drawable.present_throw_newblocker_red_09,
R.drawable.present_throw_newblocker_red_10,
R.drawable.present_throw_newblocker_red_11,
R.drawable.present_throw_newblocker_red_12,
};
int[] present_throw_def_red_blocking = {
R.drawable.present_throw_blocking_red_01,
R.drawable.present_throw_blocking_red_02,
R.drawable.present_throw_blocking_red_03,
R.drawable.present_throw_blocking_red_04,
R.drawable.present_throw_blocking_red_05,
R.drawable.present_throw_blocking_red_06,
R.drawable.present_throw_blocking_red_07,
R.drawable.present_throw_blocking_red_08,
R.drawable.present_throw_blocking_red_09,
R.drawable.present_throw_blocking_red_10,
R.drawable.present_throw_blocking_red_11,
R.drawable.present_throw_blocking_red_12,
};
int[] present_throw_def_orange_left = {
R.drawable.present_throw_blocker_orange_01,
R.drawable.present_throw_blocker_orange_02,
R.drawable.present_throw_blocker_orange_03,
R.drawable.present_throw_blocker_orange_04,
R.drawable.present_throw_blocker_orange_05,
R.drawable.present_throw_blocker_orange_06,
R.drawable.present_throw_blocker_orange_07,
R.drawable.present_throw_blocker_orange_08,
};
int[] present_throw_def_orange_right = {
R.drawable.present_throw_blocker_orange_09,
R.drawable.present_throw_blocker_orange_10,
R.drawable.present_throw_blocker_orange_11,
R.drawable.present_throw_blocker_orange_12,
R.drawable.present_throw_blocker_orange_13,
R.drawable.present_throw_blocker_orange_14,
R.drawable.present_throw_blocker_orange_15,
R.drawable.present_throw_blocker_orange_16,
};
int[] present_throw_def_orange_emerge = {
R.drawable.present_throw_newblocker_orange_01,
R.drawable.present_throw_newblocker_orange_02,
R.drawable.present_throw_newblocker_orange_03,
R.drawable.present_throw_newblocker_orange_04,
R.drawable.present_throw_newblocker_orange_05,
R.drawable.present_throw_newblocker_orange_06,
R.drawable.present_throw_newblocker_orange_07,
R.drawable.present_throw_newblocker_orange_08,
R.drawable.present_throw_newblocker_orange_09,
R.drawable.present_throw_newblocker_orange_10,
R.drawable.present_throw_newblocker_orange_11,
R.drawable.present_throw_newblocker_orange_12,
};
int[] present_throw_def_orange_blocking = {
R.drawable.present_throw_blocking_orange_01,
R.drawable.present_throw_blocking_orange_02,
R.drawable.present_throw_blocking_orange_03,
R.drawable.present_throw_blocking_orange_04,
R.drawable.present_throw_blocking_orange_05,
R.drawable.present_throw_blocking_orange_06,
R.drawable.present_throw_blocking_orange_07,
R.drawable.present_throw_blocking_orange_08,
R.drawable.present_throw_blocking_orange_09,
R.drawable.present_throw_blocking_orange_10,
R.drawable.present_throw_blocking_orange_11,
R.drawable.present_throw_blocking_orange_12,
};
int[] present_throw_reloading = {
R.drawable.present_throw_reloading_01,
R.drawable.present_throw_reloading_02,
R.drawable.present_throw_reloading_03,
};
int[] present_throw_celebrate = {
R.drawable.present_throw_celebrating_01, R.drawable.present_throw_celebrating_02,
};
int[] present_throw_idle = {
R.drawable.present_throw_throwing_01,
};
int[] present_throw_santabag = {
R.drawable.present_throw_santabag,
};
int[] present_throw_thrownpresent = {
R.drawable.present_throw_thrownpresent_orange,
};
int[] present_throw_floor = {
R.drawable.present_throw_floor,
};
int[] present_throw_elfbag = {
R.drawable.present_throw_elfbag,
};
int[] orange_present_falling = {
R.drawable.orange_present1,
R.drawable.orange_present2,
R.drawable.orange_present3,
R.drawable.orange_present4,
};
int[] present_toss_target = {
R.drawable.waterpolo_target,
};
int[] present_throw_throwing = {
R.drawable.present_throw_throwing_01,
R.drawable.present_throw_throwing_02,
R.drawable.present_throw_throwing_03,
R.drawable.present_throw_throwing_04,
R.drawable.present_throw_throwing_05,
R.drawable.present_throw_throwing_06,
};
}
| 4,318 |
2,100 | <reponame>isabella232/gapid
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __GAPIL_RUNTIME_REPLAY_H__
#define __GAPIL_RUNTIME_REPLAY_H__
#include "gapil/runtime/cc/runtime.h"
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
typedef struct gapil_replay_resource_info {
uint8_t id[20];
uint32_t size;
} gapil_replay_resource_info_t;
typedef struct gapil_replay_data_t {
// instructions currently being built, or opcodes post build.
buffer stream;
// buffer of gapil_replay_resource_info representing all the resources used by
// the replay.
buffer resources;
// buffer of constant data used by the replay.
buffer constants;
// function used to emit the call of the current command
void (*call)(context*);
// additional data referenced by replay.cpp.
void* data_ex;
// Alignment of a pointer for the replay device.
// TODO: Remove. This is only here to match old replay implementation.
uint32_t pointer_alignment;
} gapil_replay_data;
////////////////////////////////////////////////////////////////////////////////
// Runtime API implemented in replay.cpp //
////////////////////////////////////////////////////////////////////////////////
// TODO
void gapil_replay_build(context* ctx, gapil_replay_data* data);
// gapil_replay_remap_func is a function that can be used to return a remapping
// key for the given remapped value at ptr.
typedef uint64_t gapil_replay_remap_func(context* ctx, void* ptr);
// gapil_replay_register_remap_func registers the given remapping function for
// the API type.
void gapil_replay_register_remap_func(const char* api, const char* type,
gapil_replay_remap_func* func);
#ifndef DECL_GAPIL_REPLAY_FUNC
#define DECL_GAPIL_REPLAY_FUNC(RETURN, NAME, ...) RETURN NAME(__VA_ARGS__)
#endif
// gapil_replay_init_data initializes the gapil_replay_data structure.
// Note there are fields that the compiler will initialize itself.
DECL_GAPIL_REPLAY_FUNC(void, gapil_replay_init_data, context* ctx,
gapil_replay_data* data);
// gapil_replay_term_data frees fields of the gapil_replay_data structure
// that were initialized by gapil_replay_init_data.
DECL_GAPIL_REPLAY_FUNC(void, gapil_replay_term_data, context* ctx,
gapil_replay_data* data);
// gapil_replay_allocate_memory allocates size bytes from the volatile memory
// address space with the guaranteed minimum alignment.
// This memory remains allocated for the entire duration of the replay.
DECL_GAPIL_REPLAY_FUNC(uint64_t, gapil_replay_allocate_memory, context* ctx,
gapil_replay_data* data, uint64_t size,
uint64_t alignment);
// gapil_replay_reserve_memory reserves the given capture memory range for
// replay. start is the address of the first byte in the memory range to
// reserve.
// size is the number of bytes to reserve.
// ns is the address namespace.
// min_alignment is the minimum expected alignment in bytes for this block when
// recreated for replay.
DECL_GAPIL_REPLAY_FUNC(void, gapil_replay_reserve_memory, context* ctx,
gapil_replay_data* data, slice* sli, uint32_t ns,
uint32_t min_alignment);
// gapil_replay_add_resource is called whenever a memory range needs to be
// encoded as a resource. The resource identifier is returned.
DECL_GAPIL_REPLAY_FUNC(uint32_t, gapil_replay_add_resource, context* ctx,
gapil_replay_data* data, slice* slice);
// gapil_replay_add_constant adds data to the constants buffer, returning the
// address of the constant in the constant address space.
// Constants are deduplicated.
DECL_GAPIL_REPLAY_FUNC(uint32_t, gapil_replay_add_constant, context* ctx,
gapil_replay_data* data, void* buf, uint32_t size,
uint32_t alignment);
// gapil_replay_get_remap_func is called to lookup the remapping function for a
// given API type.
DECL_GAPIL_REPLAY_FUNC(gapil_replay_remap_func*, gapil_replay_get_remap_func,
char* api, char* type);
// gapil_replay_add_remapping is called to register a remapped value address by
// key.
DECL_GAPIL_REPLAY_FUNC(void, gapil_replay_add_remapping, context* ctx,
gapil_replay_data* data, uint64_t addr, uint64_t key);
// gapil_replay_lookup_remapping is called to lookup a remapped value address
// that was previously registered with gapil_replay_add_remapping. Returns the
// volatile address if the key is found, otherwise ~0.
DECL_GAPIL_REPLAY_FUNC(uint64_t, gapil_replay_lookup_remapping, context* ctx,
gapil_replay_data* data, uint64_t key);
#undef DECL_GAPIL_REPLAY_FUNC
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif // __GAPIL_RUNTIME_REPLAY_H__ | 1,978 |
1,073 | <reponame>kpdev/llvm-tnt<gh_stars>1000+
//===-- X86WinCOFFObjectWriter.cpp - X86 Win COFF Writer ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/X86FixupKinds.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCWinCOFFObjectWriter.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
namespace llvm {
class MCObjectWriter;
}
namespace {
class X86WinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
public:
X86WinCOFFObjectWriter(bool Is64Bit);
~X86WinCOFFObjectWriter() override;
unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsCrossSection,
const MCAsmBackend &MAB) const override;
};
}
X86WinCOFFObjectWriter::X86WinCOFFObjectWriter(bool Is64Bit)
: MCWinCOFFObjectTargetWriter(Is64Bit ? COFF::IMAGE_FILE_MACHINE_AMD64
: COFF::IMAGE_FILE_MACHINE_I386) {}
X86WinCOFFObjectWriter::~X86WinCOFFObjectWriter() {}
unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
const MCFixup &Fixup,
bool IsCrossSection,
const MCAsmBackend &MAB) const {
unsigned FixupKind = IsCrossSection ? FK_PCRel_4 : Fixup.getKind();
MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
if (getMachine() == COFF::IMAGE_FILE_MACHINE_AMD64) {
switch (FixupKind) {
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_riprel_4byte_relax:
case X86::reloc_riprel_4byte_relax_rex:
return COFF::IMAGE_REL_AMD64_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_AMD64_ADDR32NB;
if (Modifier == MCSymbolRefExpr::VK_SECREL)
return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_AMD64_ADDR32;
case FK_Data_8:
return COFF::IMAGE_REL_AMD64_ADDR64;
case FK_SecRel_2:
return COFF::IMAGE_REL_AMD64_SECTION;
case FK_SecRel_4:
return COFF::IMAGE_REL_AMD64_SECREL;
default:
llvm_unreachable("unsupported relocation type");
}
} else if (getMachine() == COFF::IMAGE_FILE_MACHINE_I386) {
switch (FixupKind) {
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
return COFF::IMAGE_REL_I386_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_I386_DIR32NB;
if (Modifier == MCSymbolRefExpr::VK_SECREL)
return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_I386_DIR32;
case FK_SecRel_2:
return COFF::IMAGE_REL_I386_SECTION;
case FK_SecRel_4:
return COFF::IMAGE_REL_I386_SECREL;
default:
llvm_unreachable("unsupported relocation type");
}
} else
llvm_unreachable("Unsupported COFF machine type.");
}
MCObjectWriter *llvm::createX86WinCOFFObjectWriter(raw_pwrite_stream &OS,
bool Is64Bit) {
MCWinCOFFObjectTargetWriter *MOTW = new X86WinCOFFObjectWriter(Is64Bit);
return createWinCOFFObjectWriter(MOTW, OS);
}
| 1,783 |
495 | package com.github.database.rider.spring.model;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Component;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.support.TransactionCallback;
import org.springframework.transaction.support.TransactionTemplate;
import javax.sql.DataSource;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import static org.assertj.core.api.Assertions.assertThat;
/**
* @author <NAME>
*/
@Component
public class EntityUtils {
private static final String INSERT_QUERY = "INSERT INTO Entity (value) VALUES (?)";
private static final String SELECT_QUERY = "SELECT value FROM Entity";
private final JdbcTemplate jdbcTemplate;
private final TransactionTemplate transactionTemplate;
@Autowired
public EntityUtils(DataSource dataSource, PlatformTransactionManager transactionManager) {
this.jdbcTemplate = new JdbcTemplate(dataSource);
this.transactionTemplate = new TransactionTemplate(transactionManager);
}
public void addValues(String... values) {
for (String val : values) {
jdbcTemplate.update(INSERT_QUERY, val);
}
}
public void executeInTransaction(TransactionCallback<?> callback) {
transactionTemplate.execute(callback);
}
public void assertValues(String... values) {
Set<String> expected = new HashSet<>(Arrays.asList(values));
Set<String> actual = new HashSet<>(jdbcTemplate.queryForList(SELECT_QUERY, String.class));
assertThat(actual).containsExactlyElementsOf(expected);
}
}
| 556 |
348 | {"nom":"Paslières","circ":"5ème circonscription","dpt":"Puy-de-Dôme","inscrits":1170,"abs":542,"votants":628,"blancs":15,"nuls":6,"exp":607,"res":[{"nuance":"COM","nom":"M. <NAME>","voix":439},{"nuance":"REM","nom":"M. <NAME>","voix":168}]} | 102 |
852 | <filename>RecoTracker/FinalTrackSelectors/plugins/LwtnnESProducer.cc<gh_stars>100-1000
#include "FWCore/Framework/interface/ModuleFactory.h"
#include "FWCore/Framework/interface/ESProducer.h"
#include "FWCore/ParameterSet/interface/FileInPath.h"
#include "TrackingTools/Records/interface/TrackingComponentsRecord.h"
//from lwtnn
#include "lwtnn/LightweightNeuralNetwork.hh"
#include "lwtnn/parse_json.hh"
#include <fstream>
class LwtnnESProducer : public edm::ESProducer {
public:
LwtnnESProducer(const edm::ParameterSet& iConfig);
~LwtnnESProducer() override = default;
static void fillDescriptions(edm::ConfigurationDescriptions& descriptions);
// TODO: Use of TrackingComponentsRecord is as inadequate as the
// placement of this ESProducer in RecoTracker/FinalTrackSelectors
// (but it works, I tried to create a new record but for some reason
// did not get it to work). Especially if this producer gets used
// wider we should figure out a better record and package.
std::unique_ptr<lwt::LightweightNeuralNetwork> produce(const TrackingComponentsRecord& iRecord);
private:
edm::FileInPath fileName_;
};
LwtnnESProducer::LwtnnESProducer(const edm::ParameterSet& iConfig)
: fileName_(iConfig.getParameter<edm::FileInPath>("fileName")) {
auto componentName = iConfig.getParameter<std::string>("ComponentName");
setWhatProduced(this, componentName);
}
void LwtnnESProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) {
edm::ParameterSetDescription desc;
desc.add<std::string>("ComponentName", "lwtnnESProducer");
desc.add<edm::FileInPath>("fileName", edm::FileInPath());
descriptions.add("lwtnnESProducer", desc);
}
std::unique_ptr<lwt::LightweightNeuralNetwork> LwtnnESProducer::produce(const TrackingComponentsRecord& iRecord) {
std::ifstream jsonfile(fileName_.fullPath().c_str());
auto config = lwt::parse_json(jsonfile);
return std::make_unique<lwt::LightweightNeuralNetwork>(config.inputs, config.layers, config.outputs);
}
#include "FWCore/PluginManager/interface/ModuleDef.h"
#include "FWCore/Framework/interface/MakerMacros.h"
DEFINE_FWK_EVENTSETUP_MODULE(LwtnnESProducer);
| 702 |
525 | /*
* Copyright 2015 Odnoklassniki Ltd, Mail.Ru Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package one.nio.mem;
public interface OffheapMapMXBean {
int getCapacity();
int getCount();
long getExpirations();
long getTimeToLive();
void setTimeToLive(long timeToLive);
long getMinTimeToLive();
void setMinTimeToLive(long minTimeToLive);
long getLockWaitTime();
void setLockWaitTime(long lockWaitTime);
long getCleanupInterval();
void setCleanupInterval(long cleanupInterval);
double getCleanupThreshold();
void setCleanupThreshold(double cleanupThreshold);
int getMaxSamples();
void setMaxSamples(int maxSamples);
}
| 370 |
2,939 | #include "llvm/Support/Format.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include "mlir/Support/LLVM.h"
#include "mlir/TableGen/Attribute.h"
#include "mlir/TableGen/GenInfo.h"
namespace lumen {
namespace eir {
namespace tablegen {
using ::llvm::formatv;
using ::llvm::Record;
using ::mlir::tblgen::EnumAttrCase;
bool emitEncodingDefs(const llvm::RecordKeeper &recordKeeper,
llvm::raw_ostream &os) {
llvm::emitSourceFileHeader("EIR Term Encoding Definitions", os);
auto flags = recordKeeper.getAllDerivedDefinitions("eir_EC");
auto numFlags = flags.size();
os << "#ifndef EIR_ENCODING_FLAG\n";
os << "#define EIR_ENCODING_FLAG(FLAG, VAL)\n";
os << "#define FIRST_EIR_ENCODING_FLAG(FLAG, VAL) EIR_ENCODING_FLAG(FLAG, "
"VAL)\n";
os << "#endif\n\n";
unsigned flg = 0;
for (const auto *def : flags) {
EnumAttrCase ec(def);
if (flg == 0) {
os << formatv("FIRST_EIR_ENCODING_FLAG({0}, {1})\n", ec.getSymbol(),
llvm::format_hex(ec.getValue(), 4, true));
} else {
os << formatv("EIR_ENCODING_FLAG({0}, {1})\n", ec.getSymbol(),
llvm::format_hex(ec.getValue(), 4, true));
}
flg++;
}
os << "\n\n";
os << "#undef EIR_ENCODING_FLAG\n";
os << "#undef FIRST_EIR_ENCODING_FLAG\n\n";
auto kinds = recordKeeper.getAllDerivedDefinitions("eir_TermKind");
auto numKinds = kinds.size();
os << "#ifndef EIR_TERM_KIND\n";
os << "#define EIR_TERM_KIND(KIND, VAL)\n";
os << "#define FIRST_EIR_TERM_KIND(KIND, VAL) EIR_TERM_KIND(KIND, VAL)\n";
os << "#endif\n\n";
unsigned k = 0;
for (const auto *def : kinds) {
EnumAttrCase ec(def);
if (k == 0) {
os << formatv("FIRST_EIR_TERM_KIND({0}, {1})", ec.getSymbol(),
ec.getValue());
} else {
os << formatv("EIR_TERM_KIND({0}, {1})", ec.getSymbol(),
ec.getValue());
}
k++;
if (k < numKinds) {
os << " \\\n";
}
}
os << "\n\n";
os << "#undef EIR_TERM_KIND\n";
os << "#undef FIRST_EIR_TERM_KIND\n\n";
return false;
}
bool emitRustEncodingDefs(const llvm::RecordKeeper &recordKeeper,
llvm::raw_ostream &os) {
auto kinds = recordKeeper.getAllDerivedDefinitions("eir_TermKind");
auto numKinds = kinds.size();
// TermKind enum, used for exchanging type kind between frontend/backend
os << "#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Hash)]\n";
os << "#[repr(C)]\n";
os << "pub enum TermKind {\n";
for (const auto *def : kinds) {
EnumAttrCase ec(def);
os << formatv(" {0} = {1},\n", ec.getSymbol(), ec.getValue());
}
os << "}\n\n";
os << "impl core::convert::TryFrom<u32> for TermKind {\n";
os << " type Error = ();\n";
os << " fn try_from(value: u32) -> core::result::Result<Self, "
"Self::Error> {\n";
os << " match value {\n";
for (const auto *def : kinds) {
EnumAttrCase ec(def);
os << formatv(" {0} => Ok(Self::{1}),\n", ec.getValue(),
ec.getSymbol());
}
os << " _ => Err(()),\n";
os << " }\n";
os << " }\n";
os << "}\n\n";
// Type enum, used for communicating type information from EIR to MLIR
os << "#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Hash)]\n";
os << "#[repr(u32)]\n";
os << "pub enum Type {\n";
for (const auto *def : kinds) {
EnumAttrCase ec(def);
if (ec.getSymbol() == "Tuple") {
os << formatv(" {0}(u32) = {1},\n", ec.getSymbol(),
ec.getValue());
} else {
os << formatv(" {0} = {1},\n", ec.getSymbol(), ec.getValue());
}
}
os << "}\n\n";
return false;
}
} // namespace tablegen
} // namespace eir
} // namespace lumen
static mlir::GenRegistration genEncodingDefs(
"gen-eir-encoding-defs", "Generates EIR term encoding definitions (.cpp)",
[](const llvm::RecordKeeper &records, llvm::raw_ostream &os) {
return lumen::eir::tablegen::emitEncodingDefs(records, os);
});
static mlir::GenRegistration genRustEncodingDefs(
"gen-rust-eir-encoding-defs",
"Generates EIR term encoding definitions (.rs)",
[](const llvm::RecordKeeper &records, llvm::raw_ostream &os) {
return lumen::eir::tablegen::emitRustEncodingDefs(records, os);
});
| 2,246 |
2,338 | <reponame>acidburn0zzz/llvm-project
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.",
lldb.SBFileSpec("main.cpp"))
# Test printing the vector before enabling any C++ module setting.
self.expect_expr("a", result_type="std::vector<int, std::allocator<int> >")
# Set loading the import-std-module to 'fallback' which loads the module
# and retries when an expression fails to parse.
self.runCmd("settings set target.import-std-module fallback")
# Printing the vector still works. This should return the same type
# as before as this shouldn't use a C++ module type (the C++ module type
# is hiding the second template parameter as it's equal to the default
# argument which the C++ module has type info for).
self.expect_expr("a", result_type="std::vector<int, std::allocator<int> >")
# This expression can only parse with a C++ module. LLDB should
# automatically fall back to import the C++ module to get this working.
self.expect_expr("std::max<std::size_t>(0U, a.size())", result_value="3")
# The 'a' and 'local' part can be parsed without loading a C++ module and will
# load type/runtime information. The 'std::max...' part will fail to
# parse without a C++ module. Make sure we reset all the relevant parts of
# the C++ parser so that we don't end up with for example a second
# definition of 'local' when retrying.
self.expect_expr("a; local; std::max<std::size_t>(0U, a.size())", result_value="3")
# Try to declare top-level declarations that require a C++ module to parse.
# Top-level expressions don't support importing the C++ module (yet), so
# this should still fail as before.
self.expect("expr --top-level -- int i = std::max(1, 2);", error=True,
substrs=["no member named 'max' in namespace 'std'"])
# Check that diagnostics from the first parse attempt don't show up
# in the C++ module parse attempt. In the expression below, we first
# fail to parse 'std::max'. Then we retry with a loaded C++ module
# and succeed to parse the 'std::max' part. However, the
# trailing 'unknown_identifier' will fail to parse even with the
# loaded module. The 'std::max' diagnostic from the first attempt
# however should not be shown to the user.
self.expect("expr std::max(1, 2); unknown_identifier", error=True,
matching=False,
substrs=["no member named 'max'"])
# The proper diagnostic however should be shown on the retry.
self.expect("expr std::max(1, 2); unknown_identifier", error=True,
substrs=["use of undeclared identifier 'unknown_identifier'"])
# Turn on the 'import-std-module' setting and make sure we import the
# C++ module.
self.runCmd("settings set target.import-std-module true")
# This is still expected to work.
self.expect_expr("std::max<std::size_t>(0U, a.size())", result_value="3")
# Turn of the 'import-std-module' setting and make sure we don't load
# the module (which should prevent parsing the expression involving
# 'std::max').
self.runCmd("settings set target.import-std-module false")
self.expect("expr std::max(1, 2);", error=True,
substrs=["no member named 'max' in namespace 'std'"])
| 1,522 |
348 | <gh_stars>100-1000
{"nom":"Niederentzen","circ":"4ème circonscription","dpt":"Haut-Rhin","inscrits":491,"abs":278,"votants":213,"blancs":15,"nuls":4,"exp":194,"res":[{"nuance":"REM","nom":"<NAME>","voix":102},{"nuance":"LR","nom":"<NAME>","voix":92}]} | 101 |
634 | <filename>zentral/contrib/zendesk/__init__.py
# django
default_app_config = "zentral.contrib.zendesk.apps.ZentralZendeskAppConfig"
| 57 |
778 | /*
* Copyright (C) 2020-2021 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#pragma once
#include "shared/source/helpers/non_copyable_or_moveable.h"
#include "shared/source/os_interface/linux/drm_neo.h"
#include "shared/source/os_interface/os_interface.h"
#include "level_zero/core/source/device/device.h"
#include "level_zero/tools/source/sysman/linux/firmware_util/firmware_util.h"
#include "level_zero/tools/source/sysman/linux/fs_access.h"
#include "level_zero/tools/source/sysman/linux/pmt/pmt.h"
#include "level_zero/tools/source/sysman/linux/pmu/pmu_imp.h"
#include "level_zero/tools/source/sysman/sysman_imp.h"
#include <map>
namespace L0 {
class PmuInterface;
class LinuxSysmanImp : public OsSysman, NEO::NonCopyableOrMovableClass {
public:
LinuxSysmanImp(SysmanDeviceImp *pParentSysmanDeviceImp);
~LinuxSysmanImp() override;
ze_result_t init() override;
PmuInterface *getPmuInterface();
FirmwareUtil *getFwUtilInterface();
FsAccess &getFsAccess();
ProcfsAccess &getProcfsAccess();
SysfsAccess &getSysfsAccess();
NEO::Drm &getDrm();
PlatformMonitoringTech *getPlatformMonitoringTechAccess(uint32_t subDeviceId);
Device *getDeviceHandle();
SysmanDeviceImp *getSysmanDeviceImp();
std::string getPciRootPortDirectoryPath(std::string realPciPath);
void releasePmtObject();
protected:
FsAccess *pFsAccess = nullptr;
ProcfsAccess *pProcfsAccess = nullptr;
SysfsAccess *pSysfsAccess = nullptr;
NEO::Drm *pDrm = nullptr;
Device *pDevice = nullptr;
PmuInterface *pPmuInterface = nullptr;
FirmwareUtil *pFwUtilInterface = nullptr;
std::map<uint32_t, L0::PlatformMonitoringTech *> mapOfSubDeviceIdToPmtObject;
private:
LinuxSysmanImp() = delete;
SysmanDeviceImp *pParentSysmanDeviceImp = nullptr;
};
} // namespace L0
| 698 |
939 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for neural_structured_learning.research.carls.dynamic_embedding_neighbor_cache."""
from research.carls import context
from research.carls import dynamic_embedding_neighbor_cache as de_nb_cache
from research.carls.testing import test_util
import tensorflow as tf
class DynamicEmbeddingNeighborCacheTest(tf.test.TestCase):
def setUp(self):
super(DynamicEmbeddingNeighborCacheTest, self).setUp()
self._config = test_util.default_de_config(2)
self._service_server = test_util.start_kbs_server()
self._kbs_address = 'localhost:%d' % self._service_server.port()
context.clear_all_collection()
def tearDown(self):
self._service_server.Terminate()
super(DynamicEmbeddingNeighborCacheTest, self).tearDown()
def testLookup(self):
init = self._config.knowledge_bank_config.initializer
init.default_embedding.value.append(1)
init.default_embedding.value.append(2)
cache = de_nb_cache.DynamicEmbeddingNeighborCache(
'nb_cache', self._config, service_address=self._kbs_address)
embedding = cache.lookup(['first', 'second', ''])
self.assertAllClose(embedding.numpy(), [[1, 2], [1, 2], [0, 0]])
def testUpdate(self):
cache = de_nb_cache.DynamicEmbeddingNeighborCache(
'nb_cache', self._config, service_address=self._kbs_address)
update_res = cache.update(['first', 'second', ''],
tf.constant([[2.0, 4.0], [4.0, 8.0], [8.0,
16.0]]))
self.assertAllClose(update_res.numpy(), [[2, 4], [4, 8], [0, 0]])
embedding = cache.lookup(['first', 'second', ''])
self.assertAllClose(embedding.numpy(), [[2, 4], [4, 8], [0, 0]])
if __name__ == '__main__':
tf.test.main()
| 875 |
19,824 | {
"main": "dist/keystone-ui-icons-icons-MoreHorizontalIcon.cjs.js",
"module": "dist/keystone-ui-icons-icons-MoreHorizontalIcon.esm.js"
}
| 57 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.