max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
388 |
class Solution {
public:
bool visited[300][300];
int r[4] = {-1,0,0,1};
int c[4] = {0,-1,1,0};
//Checks if the given row and col value are valid and if the cell is visited and if the cell contains '1' or not.
bool val(int row,int col,vector<vector<char>>& grid,int M,int N)
{
return (row<M && col<N && row>=0 && col>=0 && !visited[row][col] && grid[row][col]=='1');
}
//Dfs function for exploring the surrounding cells
void dfs(int i,int j,vector<vector<char>>& grid, int M, int N)
{
visited[i][j] = true;
for(int a=0;a<4;a++)
{
int row = i + r[a];
int col = j + c[a];
if(val(row,col,grid,M,N))
{
dfs(row,col,grid,M,N);
}
}
}
int numIslands(vector<vector<char>>& grid) {
int m = grid.size();
int n = grid[0].size();
memset(visited,0,sizeof(visited));
int island_count = 0;
for(int i=0;i<m;i++)
{
for(int j=0;j<n;j++)
{
if(!visited[i][j] && grid[i][j]=='1')
{
dfs(i,j,grid,m,n);
island_count++; // Island count is incremented when there is a cell that has not been visited and contains a '1'.
//Dfs function marks the other connected '1's in the visited matrix.
}
}
}
return island_count;
}
};
/* Time complexity : O(m x n) as each cell is accessed one time.
Space complexity : O(m x n) as the visited matrix contains m x n elements.*/
| 871 |
2,338 |
// RUN: %clang_cc1 -fsyntax-only -verify -Wall %s
namespace test1 {
static int abc = 42; // expected-warning {{variable 'abc' is not needed and will not be emitted}}
namespace {
template <typename T> int abc_template = 0;
template <> int abc_template<int> = 0; // expected-warning {{variable 'abc_template<int>' is not needed and will not be emitted}}
} // namespace
template <typename T>
int foo(void) {
return abc + abc_template<int> + abc_template<long>;
}
}
namespace test2 {
struct bah {
};
namespace {
struct foo : bah {
static char bar;
virtual void zed();
};
void foo::zed() {
bar++;
}
char foo::bar=0;
}
bah *getfoo() {
return new foo();
}
}
| 320 |
1,745 |
//********************************* bs::framework - Copyright 2018-2019 <NAME> ************************************//
//*********** Licensed under the MIT license. See LICENSE.md for full terms. This notice is not to be removed. ***********//
#include "BsScriptParticleEmitterShape.generated.h"
#include "BsMonoMethod.h"
#include "BsMonoClass.h"
#include "BsMonoUtil.h"
namespace bs
{
ScriptParticleEmitterShapeBase::ScriptParticleEmitterShapeBase(MonoObject* managedInstance)
:ScriptReflectableBase(managedInstance)
{ }
SPtr<ParticleEmitterShape> ScriptParticleEmitterShapeBase::getInternal() const
{
return std::static_pointer_cast<ParticleEmitterShape>(mInternal);
}
ScriptParticleEmitterShape::ScriptParticleEmitterShape(MonoObject* managedInstance, const SPtr<ParticleEmitterShape>& value)
:TScriptReflectable(managedInstance, value)
{
mInternal = value;
}
void ScriptParticleEmitterShape::initRuntimeData()
{
}
MonoObject* ScriptParticleEmitterShape::create(const SPtr<ParticleEmitterShape>& value)
{
if(value == nullptr) return nullptr;
bool dummy = false;
void* ctorParams[1] = { &dummy };
MonoObject* managedInstance = metaData.scriptClass->createInstance("bool", ctorParams);
new (bs_alloc<ScriptParticleEmitterShape>()) ScriptParticleEmitterShape(managedInstance, value);
return managedInstance;
}
}
| 424 |
858 |
//===-- asmstmt.cpp -------------------------------------------------------===//
//
// LDC – the LLVM D compiler
//
// This file originates from work by <NAME> for GDC released under
// the GPL 2 and Artistic licenses. See the LICENSE file for details.
//
//===----------------------------------------------------------------------===//
#include "dmd/declaration.h"
#include "dmd/dsymbol.h"
#include "dmd/errors.h"
#include "dmd/ldcbindings.h"
#include "dmd/scope.h"
#include "dmd/statement.h"
#include "gen/dvalue.h"
#include "gen/functions.h"
#include "gen/irstate.h"
#include "gen/llvm.h"
#include "gen/llvmhelpers.h"
#include "gen/logger.h"
#include "gen/tollvm.h"
#include "ir/irfunction.h"
#include "llvm/IR/InlineAsm.h"
#include <cassert>
#include <cstring>
#include <deque>
#include <string>
#include <sstream>
typedef enum {
Arg_Integer,
Arg_Pointer,
Arg_Memory,
Arg_FrameRelative,
Arg_LocalSize,
Arg_Dollar
} AsmArgType;
typedef enum { Mode_Input, Mode_Output, Mode_Update } AsmArgMode;
struct AsmArg {
Expression *expr;
AsmArgType type;
AsmArgMode mode;
AsmArg(AsmArgType type, Expression *expr, AsmArgMode mode) {
this->type = type;
this->expr = expr;
this->mode = mode;
}
};
struct AsmCode {
std::string insnTemplate;
std::vector<AsmArg> args;
std::vector<bool> regs;
unsigned dollarLabel;
int clobbersMemory;
explicit AsmCode(int n_regs) {
regs.resize(n_regs, false);
dollarLabel = 0;
clobbersMemory = 0;
}
};
struct AsmParserCommon {
virtual ~AsmParserCommon() = default;
virtual void run(Scope *sc, InlineAsmStatement *asmst) = 0;
virtual std::string getRegName(int i) = 0;
};
AsmParserCommon *asmparser = nullptr;
#include "asm-x86.h" // x86 assembly parser
#define ASM_X86_64
#include "asm-x86.h" // x86_64 assembly parser
#undef ASM_X86_64
/**
* Replaces <<func>> with the name of the currently codegen'd function.
*
* This kludge is required to handle labels correctly, as the instruction
* strings for jumps, … are generated during semantic3, but attribute inference
* might change the function type (and hence the mangled name) right at the end
* of semantic3.
*/
static void replace_func_name(IRState *p, std::string &insnt) {
static const std::string needle("<<func>>");
const char *mangle = mangleExact(p->func()->decl);
size_t pos;
while (std::string::npos != (pos = insnt.find(needle))) {
// This will only happen for few instructions, and only once for those.
insnt.replace(pos, needle.size(), mangle);
}
}
Statement *gccAsmSemantic(GccAsmStatement *s, Scope *sc);
Statement *asmSemantic(AsmStatement *s, Scope *sc) {
if (!s->tokens) {
return nullptr;
}
sc->func->hasReturnExp |= 8;
// GCC-style asm starts with a string literal or a `(`
if (s->tokens->value == TOKstring || s->tokens->value == TOKlparen) {
auto gas = createGccAsmStatement(s->loc, s->tokens);
return gccAsmSemantic(gas, sc);
}
// this is DMD-style asm
sc->func->hasReturnExp |= 32;
auto ias = createInlineAsmStatement(s->loc, s->tokens);
s = ias;
bool err = false;
llvm::Triple const &t = *global.params.targetTriple;
if (!(t.getArch() == llvm::Triple::x86 ||
t.getArch() == llvm::Triple::x86_64)) {
s->error(
"DMD-style `asm { op; }` statements are not supported for the \"%s\" "
"architecture.",
t.getArchName().str().c_str());
errorSupplemental(s->loc, "Use GDC-style `asm { \"op\" : …; }` syntax or "
"`ldc.llvmasm.__asm` instead.");
err = true;
}
if (!global.params.useInlineAsm) {
s->error(
"the `asm` statement is not allowed when the -noasm switch is used");
err = true;
}
if (err) {
if (!global.gag) {
fatal();
}
return s;
}
// puts(toChars());
if (!asmparser) {
if (t.getArch() == llvm::Triple::x86) {
asmparser = new AsmParserx8632::AsmParser;
} else if (t.getArch() == llvm::Triple::x86_64) {
asmparser = new AsmParserx8664::AsmParser;
}
}
asmparser->run(sc, ias);
return s;
}
void AsmStatement_toIR(InlineAsmStatement *stmt, IRState *irs) {
IF_LOG Logger::println("InlineAsmStatement::toIR(): %s", stmt->loc.toChars());
LOG_SCOPE;
// sanity check
assert((irs->func()->decl->hasReturnExp & 40) == 40);
// get asm block
IRAsmBlock *asmblock = irs->asmBlock;
assert(asmblock);
// debug info
gIR->DBuilder.EmitStopPoint(stmt->loc);
if (!stmt->asmcode) {
return;
}
static std::string i_cns = "i";
static std::string p_cns = "i";
static std::string m_cns = "*m";
static std::string mw_cns = "=*m";
static std::string mrw_cns = "+*m";
static std::string memory_name = "memory";
AsmCode *code = static_cast<AsmCode *>(stmt->asmcode);
std::vector<LLValue *> input_values;
std::vector<std::string> input_constraints;
std::vector<LLValue *> output_values;
std::vector<std::string> output_constraints;
std::vector<std::string> clobbers;
// FIXME
//#define HOST_WIDE_INT long
// HOST_WIDE_INT var_frame_offset; // "frame_offset" is a macro
bool clobbers_mem = code->clobbersMemory;
int input_idx = 0;
int n_outputs = 0;
int arg_map[10];
assert(code->args.size() <= 10);
auto arg = code->args.begin();
for (unsigned i = 0; i < code->args.size(); i++, ++arg) {
bool is_input = true;
LLValue *arg_val = nullptr;
std::string cns;
switch (arg->type) {
case Arg_Integer:
arg_val = DtoRVal(arg->expr);
do_integer:
cns = i_cns;
break;
case Arg_Pointer:
assert(arg->expr->op == TOKvar);
arg_val = DtoRVal(arg->expr);
cns = p_cns;
break;
case Arg_Memory:
arg_val = DtoRVal(arg->expr);
switch (arg->mode) {
case Mode_Input:
cns = m_cns;
break;
case Mode_Output:
cns = mw_cns;
is_input = false;
break;
case Mode_Update:
cns = mrw_cns;
is_input = false;
break;
}
break;
case Arg_FrameRelative:
// FIXME
llvm_unreachable("Arg_FrameRelative not supported.");
/* if (auto ve = arg->expr->isVarExp())
arg_val = ve->var->toSymbol()->Stree;
else
assert(0);
if ( getFrameRelativeValue(arg_val, & var_frame_offset) ) {
// arg_val = irs->integerConstant(var_frame_offset);
cns = i_cns;
} else {
this->error("%s", "argument not frame relative");
return;
}
if (arg->mode != Mode_Input)
clobbers_mem = true;
break;*/
case Arg_LocalSize:
// FIXME
llvm_unreachable("Arg_LocalSize not supported.");
/* var_frame_offset = cfun->x_frame_offset;
if (var_frame_offset < 0)
var_frame_offset = - var_frame_offset;
arg_val = irs->integerConstant( var_frame_offset );*/
goto do_integer;
default:
llvm_unreachable("Unknown inline asm reference type.");
}
if (is_input) {
arg_map[i] = --input_idx;
input_values.push_back(arg_val);
input_constraints.push_back(cns);
} else {
arg_map[i] = n_outputs++;
output_values.push_back(arg_val);
output_constraints.push_back(cns);
}
}
// Telling GCC that callee-saved registers are clobbered makes it preserve
// those registers. This changes the stack from what a naked function
// expects.
// FIXME
// if (! irs->func->naked) {
assert(asmparser);
for (size_t i = 0; i < code->regs.size(); i++) {
if (code->regs[i]) {
clobbers.push_back(asmparser->getRegName(i));
}
}
if (clobbers_mem) {
clobbers.push_back(memory_name);
}
// }
// Remap argument numbers
for (unsigned i = 0; i < code->args.size(); i++) {
if (arg_map[i] < 0) {
arg_map[i] = -arg_map[i] - 1 + n_outputs;
}
}
bool pct = false;
auto p = code->insnTemplate.begin();
auto q = code->insnTemplate.end();
// printf("start: %.*s\n", code->insnTemplateLen, code->insnTemplate);
while (p < q) {
if (pct) {
if (*p >= '0' && *p <= '9') {
// %% doesn't check against nargs
*p = '0' + arg_map[*p - '0'];
pct = false;
} else if (*p == '$') {
pct = false;
}
// assert(*p == '%');// could be 'a', etc. so forget it..
} else if (*p == '$') {
pct = true;
}
++p;
}
IF_LOG {
Logger::cout() << "final asm: " << code->insnTemplate << '\n';
std::ostringstream ss;
ss << "GCC-style output constraints: {";
for (const auto &oc : output_constraints) {
ss << " " << oc;
}
ss << " }";
Logger::println("%s", ss.str().c_str());
ss.str("");
ss << "GCC-style input constraints: {";
for (const auto &ic : input_constraints) {
ss << " " << ic;
}
ss << " }";
Logger::println("%s", ss.str().c_str());
ss.str("");
ss << "GCC-style clobbers: {";
for (const auto &c : clobbers) {
ss << " " << c;
}
ss << " }";
Logger::println("%s", ss.str().c_str());
}
// rewrite GCC-style constraints to LLVM-style constraints
std::string llvmOutConstraints;
std::string llvmInConstraints;
int n = 0;
for (auto &oc : output_constraints) {
// rewrite update constraint to in and out constraints
if (oc[0] == '+') {
assert(oc == mrw_cns && "What else are we updating except memory?");
/* LLVM doesn't support updating operands, so split into an input
* and an output operand.
*/
// Change update operand to pure output operand.
oc = mw_cns;
// Add input operand with same value, with original as "matching
// output".
std::ostringstream ss;
ss << '*' << (n + asmblock->outputcount);
// Must be at the back; unused operands before used ones screw up
// numbering.
input_constraints.push_back(ss.str());
input_values.push_back(output_values[n]);
}
llvmOutConstraints += oc;
llvmOutConstraints += ",";
n++;
}
asmblock->outputcount += n;
for (const auto &ic : input_constraints) {
llvmInConstraints += ic;
llvmInConstraints += ",";
}
std::string clobstr;
for (const auto &c : clobbers) {
clobstr = "~{" + c + "},";
asmblock->clobs.insert(clobstr);
}
IF_LOG {
{
Logger::println("Output values:");
LOG_SCOPE
size_t i = 0;
for (auto ov : output_values) {
Logger::cout() << "Out " << i++ << " = " << *ov << '\n';
}
}
{
Logger::println("Input values:");
LOG_SCOPE
size_t i = 0;
for (auto iv : input_values) {
Logger::cout() << "In " << i++ << " = " << *iv << '\n';
}
}
}
// excessive commas are removed later...
replace_func_name(irs, code->insnTemplate);
// push asm statement
auto asmStmt = new IRAsmStmt;
asmStmt->code = code->insnTemplate;
asmStmt->out_c = llvmOutConstraints;
asmStmt->in_c = llvmInConstraints;
asmStmt->out.insert(asmStmt->out.begin(), output_values.begin(),
output_values.end());
asmStmt->in.insert(asmStmt->in.begin(), input_values.begin(),
input_values.end());
asmStmt->isBranchToLabel = stmt->isBranchToLabel;
asmblock->s.push_back(asmStmt);
}
//////////////////////////////////////////////////////////////////////////////
// rewrite argument indices to the block scope indices
static void remap_outargs(std::string &insnt, size_t nargs, size_t idx) {
static const std::string digits[10] = {"0", "1", "2", "3", "4",
"5", "6", "7", "8", "9"};
assert(nargs <= 10);
static const std::string prefix("<<out");
static const std::string suffix(">>");
std::string argnum;
std::string needle;
char buf[10];
for (unsigned i = 0; i < nargs; i++) {
needle = prefix + digits[i] + suffix;
size_t pos = insnt.find(needle);
if (std::string::npos != pos) {
sprintf(buf, "%llu", static_cast<unsigned long long>(idx++));
}
while (std::string::npos != (pos = insnt.find(needle))) {
insnt.replace(pos, needle.size(), buf);
}
}
}
// rewrite argument indices to the block scope indices
static void remap_inargs(std::string &insnt, size_t nargs, size_t idx) {
static const std::string digits[10] = {"0", "1", "2", "3", "4",
"5", "6", "7", "8", "9"};
assert(nargs <= 10);
static const std::string prefix("<<in");
static const std::string suffix(">>");
std::string argnum;
std::string needle;
char buf[10];
for (unsigned i = 0; i < nargs; i++) {
needle = prefix + digits[i] + suffix;
size_t pos = insnt.find(needle);
if (std::string::npos != pos) {
sprintf(buf, "%llu", static_cast<unsigned long long>(idx++));
}
while (std::string::npos != (pos = insnt.find(needle))) {
insnt.replace(pos, needle.size(), buf);
}
}
}
void CompoundAsmStatement_toIR(CompoundAsmStatement *stmt, IRState *p) {
IF_LOG Logger::println("CompoundAsmStatement::toIR(): %s",
stmt->loc.toChars());
LOG_SCOPE;
const bool isCompoundGccAsmStatement =
(stmt->statements && stmt->statements->length &&
stmt->statements->front()->isGccAsmStatement());
if (isCompoundGccAsmStatement) {
for (Statement *s : *stmt->statements) {
if (auto gas = s->isGccAsmStatement()) {
Statement_toIR(gas, p);
} else {
s->error("DMD-style assembly statement unsupported within GCC-style "
"`asm` block");
fatal();
}
}
return;
}
// disable inlining by default
if (!p->func()->decl->allowInlining) {
p->func()->setNeverInline();
}
// create asm block structure
assert(!p->asmBlock);
auto asmblock = new IRAsmBlock(stmt);
assert(asmblock);
p->asmBlock = asmblock;
// do asm statements
for (Statement *s : *stmt->statements) {
if (s) {
if (s->isGccAsmStatement()) {
s->error("GCC-style assembly statement unsupported within DMD-style "
"`asm` block");
fatal();
}
Statement_toIR(s, p);
}
}
// build forwarder for in-asm branches to external labels
// this additional asm code sets the __llvm_jump_target variable
// to a unique value that will identify the jump target in
// a post-asm switch
// maps each goto destination to its special value
std::map<LabelDsymbol *, int> gotoToVal;
// location of the special value determining the goto label
// will be set if post-asm dispatcher block is needed
LLValue *jump_target = nullptr;
{
FuncDeclaration *fd = gIR->func()->decl;
OutBuffer mangleBuf;
mangleToBuffer(fd, &mangleBuf);
const char *fdmangle = mangleBuf.peekChars();
// we use a simple static counter to make sure the new end labels are
// unique
static size_t uniqueLabelsId = 0;
std::ostringstream asmGotoEndLabel;
printLabelName(asmGotoEndLabel, fdmangle, "_llvm_asm_end");
asmGotoEndLabel << uniqueLabelsId++;
// initialize the setter statement we're going to build
auto outSetterStmt = new IRAsmStmt;
std::string asmGotoEnd = "\n\tjmp " + asmGotoEndLabel.str() + "\n";
std::ostringstream code;
code << asmGotoEnd;
int n_goto = 1;
for (IRAsmStmt *a : asmblock->s) {
// skip non-branch statements
LabelDsymbol *const targetLabel = a->isBranchToLabel;
if (!targetLabel) {
continue;
}
Identifier *const ident = targetLabel->ident;
// if internal, no special handling is necessary, skip
if (llvm::any_of(asmblock->internalLabels,
[ident](Identifier *i) { return i->equals(ident); })) {
continue;
}
// if we already set things up for this branch target, skip
if (gotoToVal.find(targetLabel) != gotoToVal.end()) {
continue;
}
// record that the jump needs to be handled in the post-asm dispatcher
gotoToVal[targetLabel] = n_goto;
// provide an in-asm target for the branch and set value
IF_LOG Logger::println(
"statement '%s' references outer label '%s': creating forwarder",
a->code.c_str(), ident->toChars());
printLabelName(code, fdmangle, ident->toChars());
code << ":\n\t";
code << "movl $<<in" << n_goto << ">>, $<<out0>>\n";
// FIXME: Store the value -> label mapping somewhere, so it can be
// referenced later
outSetterStmt->in.push_back(DtoConstUint(n_goto));
outSetterStmt->in_c += "i,";
code << asmGotoEnd;
++n_goto;
}
if (code.str() != asmGotoEnd) {
// finalize code
outSetterStmt->code = code.str();
outSetterStmt->code += asmGotoEndLabel.str() + ":\n";
// create storage for and initialize the temporary
jump_target = DtoAllocaDump(DtoConstUint(0), 0, "__llvm_jump_target");
// setup variable for output from asm
outSetterStmt->out_c = "=*m,";
outSetterStmt->out.push_back(jump_target);
asmblock->s.push_back(outSetterStmt);
} else {
delete outSetterStmt;
}
}
// build a fall-off-end-properly asm statement
FuncDeclaration *thisfunc = p->func()->decl;
bool useabiret = false;
p->asmBlock->asmBlock->abiret = nullptr;
if (thisfunc->fbody->endsWithAsm() == stmt &&
thisfunc->type->nextOf()->ty != TY::Tvoid) {
// there can't be goto forwarders in this case
assert(gotoToVal.empty());
emitABIReturnAsmStmt(asmblock, stmt->loc, thisfunc);
useabiret = true;
}
// build asm block
std::vector<LLValue *> outargs;
std::vector<LLValue *> inargs;
std::vector<LLType *> outtypes;
std::vector<LLType *> intypes;
std::string out_c;
std::string in_c;
std::string clobbers;
std::string code;
size_t asmIdx = asmblock->retn;
Logger::println("do outputs");
size_t n = asmblock->s.size();
for (size_t i = 0; i < n; ++i) {
IRAsmStmt *a = asmblock->s[i];
assert(a);
size_t onn = a->out.size();
for (size_t j = 0; j < onn; ++j) {
outargs.push_back(a->out[j]);
outtypes.push_back(a->out[j]->getType());
}
if (!a->out_c.empty()) {
out_c += a->out_c;
}
remap_outargs(a->code, onn + a->in.size(), asmIdx);
asmIdx += onn;
}
Logger::println("do inputs");
for (size_t i = 0; i < n; ++i) {
IRAsmStmt *a = asmblock->s[i];
assert(a);
size_t inn = a->in.size();
for (size_t j = 0; j < inn; ++j) {
inargs.push_back(a->in[j]);
intypes.push_back(a->in[j]->getType());
}
if (!a->in_c.empty()) {
in_c += a->in_c;
}
remap_inargs(a->code, inn + a->out.size(), asmIdx);
asmIdx += inn;
if (!code.empty()) {
code += "\n\t";
}
code += a->code;
}
asmblock->s.clear();
// append inputs
out_c += in_c;
// append clobbers
for (const auto &c : asmblock->clobs) {
out_c += c;
}
// remove excessive comma
if (!out_c.empty()) {
out_c.resize(out_c.size() - 1);
}
IF_LOG {
Logger::println("code = \"%s\"", code.c_str());
Logger::println("constraints = \"%s\"", out_c.c_str());
}
// build return types
LLType *retty;
if (asmblock->retn) {
retty = asmblock->retty;
} else {
retty = llvm::Type::getVoidTy(gIR->context());
}
// build argument types
std::vector<LLType *> types;
types.insert(types.end(), outtypes.begin(), outtypes.end());
types.insert(types.end(), intypes.begin(), intypes.end());
llvm::FunctionType *fty = llvm::FunctionType::get(retty, types, false);
IF_LOG Logger::cout() << "function type = " << *fty << '\n';
std::vector<LLValue *> args;
args.insert(args.end(), outargs.begin(), outargs.end());
args.insert(args.end(), inargs.begin(), inargs.end());
IF_LOG {
Logger::cout() << "Arguments:" << '\n';
Logger::indent();
size_t i = 0;
for (auto arg : args) {
Stream cout = Logger::cout();
cout << '$' << i << " ==> " << *arg;
if (!llvm::isa<llvm::Instruction>(arg) &&
!llvm::isa<LLGlobalValue>(arg)) {
cout << '\n';
}
++i;
}
Logger::undent();
}
llvm::InlineAsm *ia = llvm::InlineAsm::get(fty, code, out_c, true);
llvm::CallInst *call = p->ir->CreateCall(
ia, args, retty == LLType::getVoidTy(gIR->context()) ? "" : "asm");
p->addInlineAsmSrcLoc(stmt->loc, call);
IF_LOG Logger::cout() << "Complete asm statement: " << *call << '\n';
// capture abi return value
if (useabiret) {
IRAsmBlock *block = p->asmBlock;
if (block->retfixup) {
block->asmBlock->abiret = (*block->retfixup)(p->ir, call);
} else if (p->asmBlock->retemu) {
block->asmBlock->abiret = DtoLoad(block->asmBlock->abiret);
} else {
block->asmBlock->abiret = call;
}
}
p->asmBlock = nullptr;
// if asm contained external branches, emit goto forwarder code
if (!gotoToVal.empty()) {
assert(jump_target);
// make new blocks
llvm::BasicBlock *bb = p->insertBB("afterasmgotoforwarder");
llvm::LoadInst *val =
p->ir->CreateLoad(jump_target, "__llvm_jump_target_value");
llvm::SwitchInst *sw = p->ir->CreateSwitch(val, bb, gotoToVal.size());
// add all cases
for (const auto &pair : gotoToVal) {
llvm::BasicBlock *casebb = p->insertBBBefore(bb, "case");
sw->addCase(LLConstantInt::get(llvm::IntegerType::get(gIR->context(), 32),
pair.second),
casebb);
p->ir->SetInsertPoint(casebb);
DtoGoto(stmt->loc, pair.first);
}
p->ir->SetInsertPoint(bb);
}
}
//////////////////////////////////////////////////////////////////////////////
void AsmStatement_toNakedIR(InlineAsmStatement *stmt, IRState *irs) {
IF_LOG Logger::println("InlineAsmStatement::toNakedIR(): %s",
stmt->loc.toChars());
LOG_SCOPE;
// is there code?
if (!stmt->asmcode) {
return;
}
AsmCode *code = static_cast<AsmCode *>(stmt->asmcode);
// build asm stmt
replace_func_name(irs, code->insnTemplate);
irs->nakedAsm << "\t" << code->insnTemplate << std::endl;
}
| 9,583 |
999 |
import repokid.hooks as hooks
from repokid.types import RepokidHookInput
from repokid.types import RepokidHookOutput
@hooks.implements_hook("TEST_HOOK", 2)
def function_2(input_dict: RepokidHookInput) -> RepokidHookOutput:
return input_dict
@hooks.implements_hook("TEST_HOOK", 1)
def function_1(input_dict: RepokidHookInput) -> RepokidHookOutput:
return input_dict
| 144 |
32,544 |
package com.baeldung.dagger.intro;
import javax.inject.Singleton;
import dagger.Module;
import dagger.Provides;
/**
* Dagger module for providing vehicles components.
*
* @author <NAME>
*
*/
@Module
public class VehiclesModule {
/**
* Creates an {@link Engine}.
*
* @return an {@link Engine}
*/
@Provides
public Engine provideEngine() {
return new Engine();
}
/**
* Creates a {@link Brand}.
*
* @return a {@link Brand}
*/
@Provides
@Singleton
public Brand provideBrand() {
return new Brand("Baeldung");
}
}
| 198 |
427 |
<filename>include/swift/SIL/SILDebuggerClient.h
//===--- SILDebuggerClient.h - Interfaces from SILGen to LLDB ---*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines the abstract SILDebuggerClient class.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_SILDEBUGGERCLIENT_H
#define SWIFT_SILDEBUGGERCLIENT_H
#include "swift/AST/DebuggerClient.h"
#include "swift/SIL/SILLocation.h"
#include "swift/SIL/SILValue.h"
namespace swift {
class SILBuilder;
class SILDebuggerClient : public DebuggerClient {
public:
typedef SmallVectorImpl<UnqualifiedLookupResult> ResultVector;
SILDebuggerClient(ASTContext &C) : DebuggerClient(C) { }
virtual ~SILDebuggerClient() = default;
/// DebuggerClient is asked to emit SIL references to locals,
/// permitting SILGen to access them like any other variables.
/// This avoids generation of properties.
virtual SILValue emitLValueForVariable(VarDecl *var,
SILBuilder &builder) = 0;
inline SILDebuggerClient *getAsSILDebuggerClient() {
return this;
}
private:
virtual void anchor();
};
} // namespace swift
#endif
| 484 |
1,108 |
<filename>excalibur/utils/database.py
from ..settings import engine
def initialize_database():
from ..models import Base
Base.metadata.create_all(engine)
def reset_database():
from ..models import Base
Base.metadata.drop_all(engine)
initialize_database()
| 87 |
2,346 |
/*
* Copyright 2012 - 2018 <NAME> (<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.traccar.protocol;
import org.traccar.BaseFrameDecoder;
import org.traccar.helper.BufferUtil;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
public class XexunFrameDecoder extends BaseFrameDecoder {
@Override
protected Object decode(
ChannelHandlerContext ctx, Channel channel, ByteBuf buf) throws Exception {
if (buf.readableBytes() < 80) {
return null;
}
int beginIndex = BufferUtil.indexOf("GPRMC", buf);
if (beginIndex == -1) {
beginIndex = BufferUtil.indexOf("GNRMC", buf);
if (beginIndex == -1) {
return null;
}
}
int identifierIndex = BufferUtil.indexOf("imei:", buf, beginIndex, buf.writerIndex());
if (identifierIndex == -1) {
return null;
}
int endIndex = buf.indexOf(identifierIndex, buf.writerIndex(), (byte) ',');
if (endIndex == -1) {
return null;
}
buf.skipBytes(beginIndex - buf.readerIndex());
return buf.readRetainedSlice(endIndex - beginIndex + 1);
}
}
| 672 |
30,023 |
{
"productName": "Elgato Key Light",
"hardwareBoardType": 53,
"firmwareBuildNumber": 192,
"firmwareVersion": "1.0.3",
"serialNumber": "CN11A1A00001",
"displayName": "Frenck",
"features": ["lights"]
}
| 87 |
368 |
<reponame>vfreex/teamtalk
package com.polites.android;
public class FlingAnimation implements Animation {
private float velocityX;
private float velocityY;
private float factor = 0.95f;
private float threshold = 10;
private FlingAnimationListener listener;
@Override
public boolean update(GestureImageView view, long time) {
float seconds = (float) time / 1000.0f;
float dx = velocityX * seconds;
float dy = velocityY * seconds;
velocityX *= factor;
velocityY *= factor;
boolean active = (Math.abs(velocityX) > threshold && Math
.abs(velocityY) > threshold);
if (listener != null) {
listener.onMove(dx, dy);
if (!active) {
listener.onComplete();
}
}
return active;
}
public void setVelocityX(float velocityX) {
this.velocityX = velocityX;
}
public void setVelocityY(float velocityY) {
this.velocityY = velocityY;
}
public void setFactor(float factor) {
this.factor = factor;
}
public void setListener(FlingAnimationListener listener) {
this.listener = listener;
}
}
| 360 |
3,765 |
<filename>pmd-java/src/main/java/net/sourceforge/pmd/lang/java/ast/ASTRUNSIGNEDSHIFT.java
/**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.ast;
import net.sourceforge.pmd.annotation.InternalApi;
/**
* @deprecated Will be removed in 7.0.0. Use {@link ASTShiftExpression#getOperator()}
*/
@Deprecated
public class ASTRUNSIGNEDSHIFT extends AbstractJavaNode {
@InternalApi
@Deprecated
public ASTRUNSIGNEDSHIFT(int id) {
super(id);
}
@InternalApi
@Deprecated
public ASTRUNSIGNEDSHIFT(JavaParser p, int id) {
super(p, id);
}
@Override
public Object jjtAccept(JavaParserVisitor visitor, Object data) {
return visitor.visit(this, data);
}
}
| 311 |
1,252 |
package com.github.glomadrian.roadrunner;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.util.AttributeSet;
import android.util.Log;
import com.github.glomadrian.roadrunner.builder.RoadRunnerBuilder;
import com.github.glomadrian.roadrunner.painter.configuration.PathPainterConfiguration;
import com.github.glomadrian.roadrunner.painter.configuration.factory.PathPainterConfigurationFactory;
import com.github.glomadrian.roadrunner.painter.determinate.DeterminatePainter;
import com.github.glomadrian.roadrunner.painter.determinate.DeterminatePathPainter;
import com.github.glomadrian.roadrunner.painter.determinate.ProgressDeterminatePainter;
import com.github.glomadrian.roadrunner.painter.determinate.factory.DeterminatePainterFactory;
import com.github.glomadrian.roadrunner.path.PathContainer;
import com.github.glomadrian.roadrunner.utils.AssertUtils;
import com.github.glomadrian.roadrunner.utils.RangeUtils;
import java.text.ParseException;
/**
* Created by <NAME> on 08/04/16.
*/
public class ProgressRoadRunner extends RoadRunner {
private static final String TAG = "ProgressRoadRunner";
// Property to create an animation
public static final String PROGRESS = "progress";
private int originalWidth;
private int originalHeight;
private String pathData;
private PathContainer pathContainer;
private ProgressDeterminatePainter progressDeterminatePainter;
private int min = 0;
private int max = 100;
private int progress = 0;
private PathPainterConfiguration pathPainterConfiguration;
private boolean firstDraw = true;
private ProgressRoadRunner(Builder builder) {
super(builder.context);
pathData = builder.pathData;
originalWidth = builder.originalWidth;
originalHeight = builder.originalHeight;
pathPainterConfiguration = PathPainterConfigurationFactory.makeConfiguration(builder, DeterminatePainter.PROGRESS);
}
public ProgressRoadRunner(Context context, AttributeSet attrs) {
super(context, attrs);
initPath(attrs);
initConfiguration(attrs);
}
public ProgressRoadRunner(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
initPath(attrs);
initConfiguration(attrs);
}
public Paint getPaint() {
return progressDeterminatePainter.getPaint();
}
public void setProgress(int value) {
if (value <= max || value >= min) {
this.progress = value;
float progress = RangeUtils.getFloatValueInRange(min, max, 0f, 1f, value);
if (progressDeterminatePainter != null) {
progressDeterminatePainter.setProgress(progress);
}
}
}
public int getProgress() {
return progress;
}
public int getMin() {
return min;
}
public void setMin(int min) {
this.min = min;
}
public int getMax() {
return max;
}
public void setMax(int max) {
this.max = max;
}
@Override
public void setColor(int color) {
progressDeterminatePainter.setColor(color);
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
try {
pathContainer = buildPathData(w, h, pathData, originalWidth, originalHeight);
initPathPainter();
} catch (ParseException e) {
Log.e(TAG, "Path parse exception:", e);
} catch (NullPointerException e) {
Log.e(TAG, "Path data or original sizes are not initialized yet.");
}
}
@Override
protected void onDraw(Canvas canvas) {
if (progressDeterminatePainter == null)
return;
if (!firstDraw) {
progressDeterminatePainter.paintPath(canvas);
} else {
firstDraw = false;
}
}
private void initPath(AttributeSet attrs) {
TypedArray attributes = getContext().obtainStyledAttributes(attrs, R.styleable.RoadRunner);
pathData = attributes.getString(R.styleable.RoadRunner_path_data);
originalWidth = attributes.getInteger(R.styleable.RoadRunner_path_original_width, 0);
originalHeight = attributes.getInteger(R.styleable.RoadRunner_path_original_height, 0);
AssertUtils.assertThis(pathData != null, "Path data must be defined", this.getClass());
AssertUtils.assertThis(!pathData.isEmpty(), "Path data must be defined", this.getClass());
AssertUtils.assertThis(!pathData.equals(""), "Path data must be defined", this.getClass());
AssertUtils.assertThis(originalWidth > 0, "Original with of the path must be defined",
this.getClass());
AssertUtils.assertThis(originalHeight > 0, "Original height of the path must be defined",
this.getClass());
}
private void initConfiguration(AttributeSet attrs) {
TypedArray attributes = getContext().obtainStyledAttributes(attrs, R.styleable.RoadRunner);
min = attributes.getInteger(R.styleable.RoadRunner_min, min);
max = attributes.getInteger(R.styleable.RoadRunner_max, max);
pathPainterConfiguration =
PathPainterConfigurationFactory.makeConfiguration(attributes, DeterminatePainter.PROGRESS);
attributes.recycle();
}
private void initPathPainter() {
progressDeterminatePainter = (ProgressDeterminatePainter) DeterminatePainterFactory.makeIndeterminatePathPainter(
DeterminatePainter.PROGRESS, pathContainer, this, pathPainterConfiguration);
}
public static class Builder extends RoadRunnerBuilder {
public Builder(Context context) {
super(context);
}
@Override
public ProgressRoadRunner build() {
AssertUtils.assertThis(pathData != null, "Path data must be defined", this.getClass());
AssertUtils.assertThis(!pathData.isEmpty(), "Path data must be defined", this.getClass());
AssertUtils.assertThis(!pathData.equals(""), "Path data must be defined", this.getClass());
AssertUtils.assertThis(originalWidth > 0, "Original with of the path must be defined",
this.getClass());
AssertUtils.assertThis(originalHeight > 0, "Original height of the path must be defined",
this.getClass());
return new ProgressRoadRunner(this);
}
}
}
| 2,477 |
1,338 |
<gh_stars>1000+
/*
* Copyright 2010, <NAME>, <EMAIL>
* Distributed under the terms of the MIT license.
*/
#include <unistd.h>
#include <memory.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
const unsigned short TEST_PORT = 40000;
int
main(int argc, char *argv[])
{
int socketFamily = AF_INET;
int i = 1;
if (argc > i) {
if (!strcmp(argv[i], "-4")) {
i++;
socketFamily = AF_INET;
} else if (!strcmp(argv[i], "-6")) {
i++;
socketFamily = AF_INET6;
}
}
in_addr multicastSource;
multicastSource.s_addr = 0;
if (argc > i) {
if (!strcmp(argv[i], "-s")) {
++i;
if (inet_aton(argv[i], &multicastSource) == 0) {
fprintf(stderr, "IPv4 source address expected");
return -1;
}
++i;
}
}
int fd = socket(socketFamily, SOCK_DGRAM, 0);
if (fd < 0) {
perror("socket");
return -1;
}
sockaddr_storage saddr;
memset(&saddr, 0, sizeof(saddr));
if (socketFamily == AF_INET) {
sockaddr_in *sa = (sockaddr_in *) &saddr;
sa->sin_family = AF_INET;
sa->sin_port = htons(TEST_PORT);
sa->sin_addr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
} else {
sockaddr_in6 *sa = (sockaddr_in6 *) &saddr;
sa->sin6_family = AF_INET6;
sa->sin6_port = htons(TEST_PORT);
// fc00:db20:35b:7399::5 (all nodes multicast)
sa->sin6_addr.s6_addr[0] = 0xff;
sa->sin6_addr.s6_addr[1] = 0x01;
sa->sin6_addr.s6_addr[15] = 0x01;
}
if (multicastSource.s_addr) {
if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF,
&multicastSource, sizeof(multicastSource)) < 0) {
perror("setsockopt IP_MULTICAST_IF");
return -1;
}
}
const char *buffer = "hello world";
unsigned length = strlen(buffer);
int status = sendto(fd, buffer, length, 0, (sockaddr *) &saddr,
socketFamily == AF_INET ? sizeof(sockaddr_in) : sizeof(sockaddr_in6));
if (status < length) {
if (status < 0)
perror("sendto");
else if (status == 0)
printf("no data sent!\n");
else
printf("not all data sent!\n");
} else
printf("send(): success\n");
close(fd);
return 0;
}
| 960 |
314 |
<reponame>kolinkrewinkel/Multiplex
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "CDStructures.h"
#import <IDEFoundation/IDEActivityLogSectionRecorder.h>
@class IDEActivityLogSection, NSMapTable, NSMutableArray;
@interface IDECommandLineBuildLogRecorder : IDEActivityLogSectionRecorder
{
NSMutableArray *_activeSections;
NSMapTable *_emittedSubsectionsBySection;
}
+ (void)initialize;
@property(retain) NSMapTable *emittedSubsectionsBySection; // @synthesize emittedSubsectionsBySection=_emittedSubsectionsBySection;
@property(retain) NSMutableArray *activeSections; // @synthesize activeSections=_activeSections;
- (void)noteDescendantLogSectionDidClose:(id)arg1 inSupersection:(id)arg2;
- (BOOL)_findNewEffectiveSectionFromSubsectionsOfSection:(id)arg1;
- (BOOL)_findNewEffectiveSectionFromSubsectionsOfSection:(id)arg1 startingAtIndex:(unsigned long long)arg2;
- (void)noteDescendantLogSection:(id)arg1 didAppendText:(id)arg2;
- (void)noteDescendantLogSection:(id)arg1 didAddSubsection:(id)arg2;
- (void)_noteLogSection:(id)arg1 didAddSubsection:(id)arg2;
- (void)_emitSection:(id)arg1 inSupersection:(id)arg2;
- (void)_cleanupClosedSection:(id)arg1 inSupersection:(id)arg2;
- (void)_emitString:(id)arg1 withNewlineIfNeeded:(BOOL)arg2;
@property(readonly) IDEActivityLogSection *effectiveSection;
- (id)section;
- (id)initWithLogSection:(id)arg1;
@end
| 521 |
2,206 |
/*
*
* Copyright (c) 2006-2020, Speedment, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); You may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.speedment.tool.core.internal.util;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
/**
* Throttles requests so that they are only invoked at a maximum frequency. If
* more calls are made, others will be dropped.
* <p>
* This class is concurrent.
*
* @author <NAME>
* @since 3.0.16
*/
public final class Throttler {
/**
* Returns a {@link Throttler} that never executes the same request more
* than once every {@code millis}.
*
* @param millis minimum frequency in milliseconds
* @return the created throttler
*/
public static Throttler limitToOnceEvery(long millis) {
return new Throttler(millis);
}
private final ConcurrentHashMap<String, AtomicLong> timers;
private final long millis;
private Throttler(long millis) {
this.millis = millis;
this.timers = new ConcurrentHashMap<>();
}
/**
* Attempts to invoke the specified {@code runnable} if no other action with
* the same specifier has been called recently. Otherwise, the method will
* return with no effect.
*
* @param action the action specifier
* @param runnable the callable action to invoke
*/
public void call(String action, Runnable runnable) {
final long now = now();
final AtomicLong timer = timers.computeIfAbsent(action, a -> new AtomicLong(0));
if (now == timer.updateAndGet(
lastCall -> lastCall + millis < now
? now : lastCall
)) runnable.run();
}
private static long now() {
return System.currentTimeMillis();
}
}
| 782 |
766 |
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.tsunami.plugins.portscan.nmap.client.data;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.auto.value.AutoValue;
import com.google.tsunami.proto.TransportProtocol;
/** A range of ports for nmap. */
@AutoValue
public abstract class PortRange implements IPortTarget {
abstract int startPort();
abstract int endPort();
abstract TransportProtocol protocol();
public static PortRange create(int startPort, int endPort, TransportProtocol protocol) {
// Ports in the range of 0-65535. The following checks verifies that 0 <= start < end <= 65535
checkArgument(startPort < endPort, "Expected %s < %s", startPort, endPort);
checkArgument(0 <= startPort, "Expected 0 <= %s", startPort);
checkArgument(endPort <= MAX_PORT_NUMBER, "Expected %s <= %s", endPort, MAX_PORT_NUMBER);
return new AutoValue_PortRange(startPort, endPort, protocol);
}
@Override
public String getCommandLineRepresentation() {
return String.format(
"%s%d-%d", IPortTarget.protocolCliString(protocol()), startPort(), endPort());
}
@Override
public boolean isProtocolSpecified() {
return protocol() != TransportProtocol.TRANSPORT_PROTOCOL_UNSPECIFIED;
}
}
| 544 |
5,964 |
<gh_stars>1000+
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "config.h"
#include "platform/credentialmanager/PlatformCredential.h"
namespace blink {
PlatformCredential* PlatformCredential::create(const String& id, const String& name, const KURL& iconURL)
{
return new PlatformCredential(id, name, iconURL);
}
PlatformCredential::PlatformCredential(const String& id, const String& name, const KURL& iconURL)
: m_id(id)
, m_name(name)
, m_iconURL(iconURL)
, m_type("credential")
{
}
PlatformCredential::~PlatformCredential()
{
}
} // namespace blink
| 239 |
308 |
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
from super_gradients.training.utils.detection_utils import calculate_bbox_iou_matrix
from super_gradients.training.utils.ssd_utils import DefaultBoxes
class SSDLoss(_Loss):
"""
Implements the loss as the sum of the followings:
1. Confidence Loss: All labels, with hard negative mining
2. Localization Loss: Only on positive labels
"""
def __init__(self, dboxes: DefaultBoxes, alpha: float = 1.0):
super(SSDLoss, self).__init__()
self.scale_xy = 1.0 / dboxes.scale_xy
self.scale_wh = 1.0 / dboxes.scale_wh
self.alpha = alpha
self.sl1_loss = nn.SmoothL1Loss(reduce=False)
self.dboxes = nn.Parameter(dboxes(order="xywh").transpose(0, 1).unsqueeze(dim=0), requires_grad=False)
self.con_loss = nn.CrossEntropyLoss(reduce=False)
def _norm_relative_bbox(self, loc):
"""
convert bbox locations into relative locations (relative to the dboxes) and normalized by w,h
:param loc a tensor of shape [batch, 4, num_boxes]
"""
gxy = self.scale_xy * (loc[:, :2, :] - self.dboxes[:, :2, :]) / self.dboxes[:, 2:, ]
gwh = self.scale_wh * (loc[:, 2:, :] / self.dboxes[:, 2:, :]).log()
return torch.cat((gxy, gwh), dim=1).contiguous()
def match_dboxes(self, targets):
"""
convert ground truth boxes into a tensor with the same size as dboxes. each gt bbox is matched to every
destination box which overlaps it over 0.5 (IoU). so some gt bboxes can be duplicated to a few destination boxes
:param targets: a tensor containing the boxes for a single image. shape [num_boxes, 5] (x,y,w,h,label)
:return: two tensors
boxes - shape of dboxes [4, num_dboxes] (x,y,w,h)
labels - sahpe [num_dboxes]
"""
target_locations = self.dboxes.data.clone().squeeze()
target_labels = torch.zeros((self.dboxes.data.shape[2])).to(self.dboxes.device)
if len(targets) > 0:
boxes = targets[:, 2:]
ious = calculate_bbox_iou_matrix(boxes, self.dboxes.data.squeeze().T, x1y1x2y2=False)
values, indices = torch.max(ious, dim=0)
mask = values > 0.5
target_locations[:, mask] = targets[indices[mask], 2:].T
target_labels[mask] = targets[indices[mask], 1]
return target_locations, target_labels
def forward(self, predictions, targets):
"""
Compute the loss
:param predictions - predictions tensor coming from the network. shape [N, num_classes+4, num_dboxes]
were the first four items are (x,y,w,h) and the rest are class confidence
:param targets - targets for the batch. [num targets, 6] (index in batch, label, x,y,w,h)
"""
batch_target_locations = []
batch_target_labels = []
(ploc, plabel) = predictions
targets = targets.to(self.dboxes.device)
for i in range(ploc.shape[0]):
target_locations, target_labels = self.match_dboxes(targets[targets[:, 0] == i])
batch_target_locations.append(target_locations)
batch_target_labels.append(target_labels)
batch_target_locations = torch.stack(batch_target_locations)
batch_target_labels = torch.stack(batch_target_labels).type(torch.long)
mask = batch_target_labels > 0
pos_num = mask.sum(dim=1)
vec_gd = self._norm_relative_bbox(batch_target_locations)
# SUM ON FOUR COORDINATES, AND MASK
sl1 = self.sl1_loss(ploc, vec_gd).sum(dim=1)
sl1 = (mask.float() * sl1).sum(dim=1)
# HARD NEGATIVE MINING
con = self.con_loss(plabel, batch_target_labels)
# POSITIVE MASK WILL NEVER SELECTED
con_neg = con.clone()
con_neg[mask] = 0
_, con_idx = con_neg.sort(dim=1, descending=True)
_, con_rank = con_idx.sort(dim=1)
# NUMBER OF NEGATIVE THREE TIMES POSITIVE
neg_num = torch.clamp(3 * pos_num, max=mask.size(1)).unsqueeze(-1)
neg_mask = con_rank < neg_num
closs = (con * (mask.float() + neg_mask.float())).sum(dim=1)
# AVOID NO OBJECT DETECTED
total_loss = (2 - self.alpha) * sl1 + self.alpha * closs
num_mask = (pos_num > 0).float()
pos_num = pos_num.float().clamp(min=1e-6)
ret = (total_loss * num_mask / pos_num).mean(dim=0)
return ret, torch.cat((sl1.mean().unsqueeze(0), closs.mean().unsqueeze(0), ret.unsqueeze(0))).detach()
| 2,036 |
309 |
{
"name": "firstaidgit",
"version": "0.0.3",
"description": "Search git problems and their solutions via the CLI.",
"keywords": [
"firstaidgit",
"first aid git",
"github issues",
"git help",
"git cli"
],
"author": {
"name": "<NAME>",
"email": "<EMAIL>",
"url": "http://www.ricardofilipe.com"
},
"licenses": [{
"type": "MIT",
"url": "https://github.com/mycozycloud/request-json/blob/master/LICENSE"
}],
"engines": {
"node": ">=0.10"
},
"repository": {
"type": "git",
"url": "<EMAIL>:magalhini/firstaidgit.git"
},
"dependencies": {
"chalk": "^1.0.0",
"commander": "^2.8.1",
"fuzzy": "^0.1.0",
"request-json": "^0.5.3"
},
"bin": {
"firstaidgit": "firstaidgit.js"
}
}
| 389 |
678 |
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#import "NSObject.h"
@class NSString, UIGestureRecognizer, UIImage, UIView;
@protocol MultiImageScrollViewDelegate <NSObject>
- (UIImage *)imageAtPage:(unsigned int)arg1;
- (UIView *)viewAtPage:(unsigned int)arg1 frame:(struct CGRect)arg2;
@optional
- (NSString *)imagePathAtPage:(unsigned int)arg1;
- (void)onChangePage;
- (void)multiImageScrollViewWillBeginDragging;
- (void)OnLongPressBegin:(id)arg1;
- (void)OnLongPress:(id)arg1;
- (void)onDoubleTap:(UIGestureRecognizer *)arg1;
- (void)onSingleTap:(UIGestureRecognizer *)arg1;
@end
| 259 |
428 |
<reponame>cping/LGame
/**
* Copyright 2008 - 2012
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* @project loon
* @author cping
* @email:<EMAIL>
* @version 0.3.3
*/
package loon.action.sprite;
import java.util.HashMap;
import loon.LSystem;
import loon.core.LRelease;
import loon.core.geom.RectBox;
import loon.core.geom.Shape;
import loon.core.geom.Triangle;
import loon.core.geom.Vector2f;
import loon.core.graphics.device.LColor;
import loon.core.graphics.device.LFont;
import loon.core.graphics.opengl.GL;
import loon.core.graphics.opengl.GL10;
import loon.core.graphics.opengl.GLAttributes;
import loon.core.graphics.opengl.GLBatch;
import loon.core.graphics.opengl.GLEx;
import loon.core.graphics.opengl.GLMesh;
import loon.core.graphics.opengl.LSTRDictionary;
import loon.core.graphics.opengl.LTexture;
import loon.core.graphics.opengl.LTextureRegion;
import loon.core.graphics.opengl.TextureUtils;
import loon.core.graphics.opengl.GLAttributes.Usage;
import loon.core.graphics.opengl.GLMesh.VertexDataType;
import loon.utils.MathUtils;
public class SpriteBatch implements LRelease {
public void draw(SpriteFont font, CharSequence cs, float x, float y) {
font.drawString(this, cs, x, y);
}
public void draw(SpriteFont font, CharSequence cs, float x, float y,
LColor color) {
font.drawString(this, cs, x, y, color);
}
/**
* Sample: batch.draw(font, "Test", new Vector2f(150, 150), LColor.red, 0,
* Vector2f.Zero, new Vector2f(1f, 1f), SpriteEffects.None);
*
* @param font
* @param cs
* @param local
* @param color
* @param rotation
* @param origin
* @param scale
* @param spriteEffects
*/
public void draw(SpriteFont font, CharSequence cs, Vector2f local,
LColor color, float rotation, Vector2f origin, Vector2f scale,
SpriteEffects spriteEffects) {
font.drawString(this, cs, local, color, rotation, origin, scale,
spriteEffects);
}
public static enum SpriteEffects {
None, FlipHorizontally, FlipVertically;
}
static class TextureLine {
private Vector2f pstart = new Vector2f();
private Vector2f pend = new Vector2f();
private float pstrokeWidth;
private float pangle;
private Vector2f pdirection;
private Vector2f pcentre;
private float plength;
private boolean pchanged;
public TextureLine() {
pchanged = true;
if (whitePixel == null) {
whitePixel = TextureUtils.createTexture(1, 1, LColor.white);
}
}
public void setStart(float x, float y) {
pstart.set(x, y);
pchanged = true;
}
public void setEnd(float x, float y) {
pend.set(x, y);
pchanged = true;
}
public float getStrokeWidth() {
return pstrokeWidth;
}
public void setStrokeWidth(float value) {
pstrokeWidth = value;
pchanged = true;
}
public void update() {
pdirection = new Vector2f(pend.x - pstart.x, pend.y - pstart.y);
pdirection.nor();
pangle = MathUtils.toDegrees(MathUtils.atan2(pend.y - pstart.y,
pend.x - pstart.x));
plength = MathUtils.ceil(Vector2f.dst(pstart, pend));
pcentre = new Vector2f((pend.x + pstart.x) / 2,
(pend.y + pstart.y) / 2);
pchanged = false;
}
public void draw(SpriteBatch batch) {
if (pchanged) {
update();
}
if (pstrokeWidth > 0) {
batch.draw(whitePixel, pcentre.x, pcentre.y, plength / 2f,
pstrokeWidth / 2, plength, pstrokeWidth, 1f, 1f,
pangle, 0, 0, 1f, 1f, false, false, true);
}
}
}
private HashMap<Integer, SpriteBatch.TextureLine> lineLazy = new HashMap<Integer, SpriteBatch.TextureLine>(
1000);
private LColor tempColor = new LColor(1f, 1f, 1f, 1f);
public float color = LColor.white.toFloatBits();
private LTexture lastTexture = null;
private int idx = 0;
private final float[] vertices;
private boolean drawing = false;
public int maxSpritesInBatch = 0;
public static final int VERTEX_SIZE = 2 + 1 + 2;
public static final int SPRITE_SIZE = 4 * VERTEX_SIZE;
private static LTexture whitePixel;
private float alpha = 1f;
private float invTexWidth;
private float invTexHeight;
private final GLMesh mesh;
public SpriteBatch() {
this(1000);
}
public SpriteBatch(int size) {
this.vertices = new float[size * SPRITE_SIZE];
mesh = new GLMesh(VertexDataType.VertexArray, false, size * 4,
size * 6, new GLAttributes.VertexAttribute(Usage.Position, 2,
"POSITION"), new GLAttributes.VertexAttribute(
Usage.ColorPacked, 4, "COLOR"),
new GLAttributes.VertexAttribute(Usage.TextureCoordinates, 2,
"TEXCOORD"));
int len = size * 6;
short[] indices = new short[len];
short j = 0;
for (int i = 0; i < len; i += 6, j += 4) {
indices[i + 0] = (short) (j + 0);
indices[i + 1] = (short) (j + 1);
indices[i + 2] = (short) (j + 2);
indices[i + 3] = (short) (j + 2);
indices[i + 4] = (short) (j + 3);
indices[i + 5] = (short) (j + 0);
}
mesh.setIndices(indices);
}
public void halfAlpha() {
color = 1.7014117E38f;
alpha = 0.5f;
}
public void resetColor() {
color = -1.7014117E38f;
alpha = 1f;
}
public static enum BlendState {
Additive, AlphaBlend, NonPremultiplied, Opaque;
}
private BlendState lastBlendState = BlendState.NonPremultiplied;
public void begin() {
if (drawing) {
throw new IllegalStateException("Not implemented end !");
}
GLEx.self.glTex2DEnable();
idx = 0;
lastTexture = null;
drawing = true;
}
public void end() {
checkDrawing();
if (idx > 0) {
submit();
}
lastTexture = null;
drawing = false;
GLEx.self.glTex2DDisable();
}
public void setColor(LColor c) {
color = c.toFloatBits();
}
public void setColor(int r, int g, int b, int a) {
color = LColor.toFloatBits(r, g, b, alpha == 1f ? a
: (int) (alpha * 255));
}
public void setColor(float r, float g, float b, float a) {
color = LColor.toFloatBits(r, g, b, alpha == 1f ? a : alpha);
}
public void setColor(int r, int g, int b) {
color = LColor.toFloatBits(r, g, b, (int) (alpha * 255));
}
public void setColor(float r, float g, float b) {
color = LColor.toFloatBits(r, g, b, alpha);
}
public void setColor(int v) {
color = Float.intBitsToFloat(v & 0xfeffffff);
}
public void setColor(float color) {
this.color = color;
}
public void setAlpha(float alpha) {
this.alpha = alpha;
int intBits = Float.floatToRawIntBits(color);
int r = (intBits & 0xff);
int g = ((intBits >>> 8) & 0xff);
int b = ((intBits >>> 16) & 0xff);
int a = (int) (alpha * 255);
color = LColor.toFloatBits(r, g, b, a);
}
public float getAlpha() {
return alpha;
}
public LColor getColor() {
int intBits = Float.floatToRawIntBits(color);
LColor color = this.tempColor;
color.r = (intBits & 0xff) / 255f;
color.g = ((intBits >>> 8) & 0xff) / 255f;
color.b = ((intBits >>> 16) & 0xff) / 255f;
color.a = ((intBits >>> 24) & 0xff) / 255f;
return color;
}
public float getFloatColor() {
return color;
}
private GLBatch batch = new GLBatch(1000);
public void drawSpriteBounds(SpriteRegion sprite, LColor color) {
float[] vertices = sprite.getVertices();
float x1 = vertices[0];
float y1 = vertices[1];
float x2 = vertices[5];
float y2 = vertices[6];
float x3 = vertices[10];
float y3 = vertices[11];
float x4 = vertices[15];
float y4 = vertices[16];
setColor(color);
drawLine(x1, y1, x2, y2);
drawLine(x2, y2, x3, y3);
drawLine(x3, y3, x4, y4);
drawLine(x4, y4, x1, y1);
resetColor();
}
public final void draw(Shape shape) {
float[] points = shape.getPoints();
if (points.length == 0) {
return;
}
submit();
LColor color = getColor();
batch.begin(GL.GL_LINE_STRIP);
for (int i = 0; i < points.length; i += 2) {
batch.color(color);
batch.vertex(points[i], points[i + 1]);
}
if (shape.closed()) {
batch.color(color);
batch.vertex(points[0], points[1]);
}
batch.end();
}
public final void fill(Shape shape) {
if (shape == null) {
return;
}
Triangle tris = shape.getTriangles();
if (tris.getTriangleCount() == 0) {
return;
}
submit();
LColor color = getColor();
batch.begin(GL.GL_TRIANGLES);
for (int i = 0; i < tris.getTriangleCount(); i++) {
for (int p = 0; p < 3; p++) {
float[] pt = tris.getTrianglePoint(i, p);
batch.color(color);
batch.vertex(pt[0], pt[1]);
}
}
batch.end();
}
public void fillPolygon(float xPoints[], float yPoints[], int nPoints) {
submit();
LColor color = getColor();
batch.begin(GL.GL_POLYGON);
for (int i = 0; i < nPoints; i++) {
batch.color(color);
batch.vertex(xPoints[i], yPoints[i]);
}
batch.end();
}
public void drawPolygon(float[] xPoints, float[] yPoints, int nPoints) {
submit();
LColor color = getColor();
batch.begin(GL.GL_LINE_LOOP);
for (int i = 0; i < nPoints; i++) {
batch.color(color);
batch.vertex(xPoints[i], yPoints[i]);
}
batch.end();
}
public void drawOval(float x1, float y1, float width, float height) {
this.drawArc(x1, y1, width, height, 32, 0, 360);
}
public void fillOval(float x1, float y1, float width, float height) {
this.fillArc(x1, y1, width, height, 32, 0, 360);
}
public void drawArc(RectBox rect, int segments, float start, float end) {
drawArc(rect.x, rect.y, rect.width, rect.height, segments, start, end);
}
public void drawArc(float x1, float y1, float width, float height,
int segments, float start, float end) {
submit();
LColor color = getColor();
while (end < start) {
end += 360;
}
float cx = x1 + (width / 2.0f);
float cy = y1 + (height / 2.0f);
batch.begin(GL.GL_LINE_STRIP);
int step = 360 / segments;
for (float a = start; a < (end + step); a += step) {
float ang = a;
if (ang > end) {
ang = end;
}
float x = (cx + (MathUtils.cos(MathUtils.toRadians(ang)) * width / 2.0f));
float y = (cy + (MathUtils.sin(MathUtils.toRadians(ang)) * height / 2.0f));
batch.color(color);
batch.vertex(x, y);
}
batch.end();
}
public final void fillArc(float x1, float y1, float width, float height,
float start, float end) {
fillArc(x1, y1, width, height, 40, start, end);
}
public final void fillArc(float x1, float y1, float width, float height,
int segments, float start, float end) {
submit();
LColor color = getColor();
while (end < start) {
end += 360;
}
float cx = x1 + (width / 2.0f);
float cy = y1 + (height / 2.0f);
batch.begin(GL.GL_TRIANGLE_FAN);
int step = 360 / segments;
batch.vertex(cx, cy);
for (float a = start; a < (end + step); a += step) {
float ang = a;
if (ang > end) {
ang = end;
}
float x = (cx + (MathUtils.cos(MathUtils.toRadians(ang)) * width / 2.0f));
float y = (cy + (MathUtils.sin(MathUtils.toRadians(ang)) * height / 2.0f));
batch.color(color);
batch.vertex(x, y);
}
batch.end();
}
public final void drawRoundRect(float x, float y, float width,
float height, int radius) {
drawRoundRect(x, y, width, height, radius, 40);
}
public final void drawRoundRect(float x, float y, float width,
float height, int radius, int segs) {
if (radius < 0) {
throw new IllegalArgumentException("radius > 0");
}
if (radius == 0) {
drawRect(x, y, width, height);
return;
}
int mr = (int) MathUtils.min(width, height) / 2;
if (radius > mr) {
radius = mr;
}
drawLine(x + radius, y, x + width - radius, y);
drawLine(x, y + radius, x, y + height - radius);
drawLine(x + width, y + radius, x + width, y + height - radius);
drawLine(x + radius, y + height, x + width - radius, y + height);
float d = radius * 2;
drawArc(x + width - d, y + height - d, d, d, segs, 0, 90);
drawArc(x, y + height - d, d, d, segs, 90, 180);
drawArc(x + width - d, y, d, d, segs, 270, 360);
drawArc(x, y, d, d, segs, 180, 270);
}
public final void fillRoundRect(float x, float y, float width,
float height, int cornerRadius) {
fillRoundRect(x, y, width, height, cornerRadius, 40);
}
public final void fillRoundRect(float x, float y, float width,
float height, int radius, int segs) {
if (radius < 0) {
throw new IllegalArgumentException("radius > 0");
}
if (radius == 0) {
fillRect(x, y, width, height);
return;
}
int mr = (int) MathUtils.min(width, height) / 2;
if (radius > mr) {
radius = mr;
}
float d = radius * 2;
fillRect(x + radius, y, width - d, radius);
fillRect(x, y + radius, radius, height - d);
fillRect(x + width - radius, y + radius, radius, height - d);
fillRect(x + radius, y + height - radius, width - d, radius);
fillRect(x + radius, y + radius, width - d, height - d);
fillArc(x + width - d, y + height - d, d, d, segs, 0, 90);
fillArc(x, y + height - d, d, d, segs, 90, 180);
fillArc(x + width - d, y, d, d, segs, 270, 360);
fillArc(x, y, d, d, segs, 180, 270);
}
public void fillRect(float x, float y, float width, float height) {
LColor color = getColor();
submit();
batch.begin(GL10.GL_TRIANGLE_FAN);
{
batch.color(color);
batch.vertex(x, y);
batch.color(color);
batch.vertex(x + width, y);
batch.color(color);
batch.vertex(x + width, y + height);
batch.color(color);
batch.vertex(x, y + height);
}
batch.end();
}
// 因为效率关系,矩形区域绘制与GLEx类处理方式不同,改为纹理渲染
public void drawRect(float x, float y, float width, float height) {
drawLine(x, y, x + width, y);
drawLine(x + width, y, x + width, y + height);
drawLine(x + width, y + height, x, y + height);
drawLine(x, y + height, x, y);
}
public void drawPoint(int x, int y, LColor c) {
float old = color;
setColor(c);
drawLine(x, y, x + 1, y + 1);
setColor(old);
}
public void drawPoints(int[] x, int[] y, LColor c) {
int size = y.length;
for (int i = 0; i < size; i++) {
drawPoint(x[i], y[i], c);
}
}
public void drawPoints(int[] x, int[] y) {
int size = y.length;
for (int i = 0; i < size; i++) {
drawPoint(x[i], y[i]);
}
}
public void drawPoint(int x, int y) {
drawLine(x, y, x + 1, y + 1);
}
public void drawLine(float x1, float y1, float x2, float y2) {
int hashCode = 1;
hashCode = LSystem.unite(hashCode, x1);
hashCode = LSystem.unite(hashCode, y1);
hashCode = LSystem.unite(hashCode, x2);
hashCode = LSystem.unite(hashCode, y2);
TextureLine line = lineLazy.get(hashCode);
if (line == null) {
line = new TextureLine();
line.setStart(x1, y1);
line.setEnd(x2, y2);
line.setStrokeWidth(1f);
lineLazy.put(hashCode, line);
}
line.draw(this);
}
private void checkTexture(final LTexture texture) {
checkDrawing();
if (!texture.isLoaded()) {
texture.loadTexture();
}
LTexture tex2d = texture.getParent();
if (tex2d != null) {
if (tex2d != lastTexture) {
submit();
lastTexture = tex2d;
} else if (idx == vertices.length) {
submit();
}
invTexWidth = (1f / texture.getWidth()) * texture.widthRatio;
invTexHeight = (1f / texture.getHeight()) * texture.heightRatio;
} else if (texture != lastTexture) {
submit();
lastTexture = texture;
invTexWidth = (1f / texture.getWidth()) * texture.widthRatio;
invTexHeight = (1f / texture.getHeight()) * texture.heightRatio;
} else if (idx == vertices.length) {
submit();
}
}
public void draw(LTexture texture, float x, float y, float rotation) {
draw(texture, x, y, texture.getWidth() / 2, texture.getHeight() / 2,
texture.getWidth(), texture.getHeight(), 1f, 1f, rotation, 0,
0, texture.getWidth(), texture.getHeight(), false, false);
}
public void draw(LTexture texture, float x, float y, float width,
float height, float rotation) {
if (rotation == 0 && texture.getWidth() == width
&& texture.getHeight() == height) {
draw(texture, x, y, width, height);
} else {
draw(texture, x, y, width / 2, height / 2, width, height, 1f, 1f,
rotation, 0, 0, texture.getWidth(), texture.getHeight(),
false, false);
}
}
public void draw(LTexture texture, float x, float y, float rotation,
float srcX, float srcY, float srcWidth, float srcHeight) {
draw(texture, x, y, srcWidth / 2, srcHeight / 2, texture.getWidth(),
texture.getHeight(), 1f, 1f, rotation, srcX, srcY, srcWidth,
srcHeight, false, false);
}
public void draw(LTexture texture, Vector2f pos, Vector2f origin,
float width, float height, float scale, float rotation,
RectBox src, boolean flipX, boolean flipY) {
draw(texture, pos.x, pos.y, origin.x, origin.y, width, height, scale,
scale, rotation, src.x, src.y, src.width, src.height, flipX,
flipY, false);
}
public void draw(LTexture texture, Vector2f pos, Vector2f origin,
float scale, float rotation, RectBox src, boolean flipX,
boolean flipY) {
draw(texture, pos.x, pos.y, origin.x, origin.y, src.width, src.height,
scale, scale, rotation, src.x, src.y, src.width, src.height,
flipX, flipY, false);
}
public void draw(LTexture texture, Vector2f pos, Vector2f origin,
float scale, RectBox src, boolean flipX, boolean flipY) {
draw(texture, pos.x, pos.y, origin.x, origin.y, src.width, src.height,
scale, scale, 0, src.x, src.y, src.width, src.height, flipX,
flipY, false);
}
public void draw(LTexture texture, Vector2f pos, Vector2f origin,
RectBox src, boolean flipX, boolean flipY) {
draw(texture, pos.x, pos.y, origin.x, origin.y, src.width, src.height,
1f, 1f, 0, src.x, src.y, src.width, src.height, flipX, flipY,
false);
}
public void draw(LTexture texture, Vector2f pos, RectBox src,
boolean flipX, boolean flipY) {
draw(texture, pos.x, pos.y, src.width / 2, src.height / 2, src.width,
src.height, 1f, 1f, 0, src.x, src.y, src.width, src.height,
flipX, flipY, false);
}
public void draw(LTexture texture, float x, float y, float originX,
float originY, float width, float height, float scaleX,
float scaleY, float rotation, float srcX, float srcY,
float srcWidth, float srcHeight, boolean flipX, boolean flipY) {
draw(texture, x, y, originX, originY, width, height, scaleX, scaleY,
rotation, srcX, srcY, srcWidth, srcHeight, flipX, flipY, false);
}
public void draw(LTexture texture, float x, float y, float originX,
float originY, float scaleX, float scaleY, float rotation,
float srcX, float srcY, float srcWidth, float srcHeight,
boolean flipX, boolean flipY) {
draw(texture, x, y, originX, originY, srcWidth, srcHeight, scaleX,
scaleY, rotation, srcX, srcY, srcWidth, srcHeight, flipX,
flipY, false);
}
public void draw(LTexture texture, Vector2f position, RectBox src,
LColor c, float rotation, Vector2f origin, Vector2f scale,
SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
if (src != null) {
draw(texture, position.x, position.y, origin.x, origin.y,
src.width, src.height, scale.x, scale.y, rotation, src.x,
src.y, src.width, src.height, flipX, flipY, true);
} else {
draw(texture, position.x, position.y, origin.x, origin.y,
texture.getWidth(), texture.getHeight(), scale.x, scale.y,
rotation, 0, 0, texture.getWidth(), texture.getHeight(),
flipX, flipY, true);
}
setColor(old);
}
public void draw(LTexture texture, Vector2f position, RectBox src,
LColor c, float rotation, float sx, float sy, float scale,
SpriteEffects effects) {
if (src == null && rotation == 0 && scale == 1f && sx == 0 && sy == 0) {
draw(texture, position, c);
return;
}
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
if (src != null) {
draw(texture, position.x, position.y, sx, sy, src.width,
src.height, scale, scale, rotation, src.x, src.y,
src.width, src.height, flipX, flipY, true);
} else {
draw(texture, position.x, position.y, sx, sy, texture.getWidth(),
texture.getHeight(), scale, scale, rotation, 0, 0,
texture.getWidth(), texture.getHeight(), flipX, flipY, true);
}
setColor(old);
}
public void draw(LTexture texture, Vector2f position, RectBox src,
LColor c, float rotation, Vector2f origin, float scale,
SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
if (src != null) {
draw(texture, position.x, position.y, origin.x, origin.y,
src.width, src.height, scale, scale, rotation, src.x,
src.y, src.width, src.height, flipX, flipY, true);
} else {
draw(texture, position.x, position.y, origin.x, origin.y,
texture.getWidth(), texture.getHeight(), scale, scale,
rotation, 0, 0, texture.getWidth(), texture.getHeight(),
flipX, flipY, true);
}
setColor(old);
}
public void draw(LTexture texture, float px, float py, float srcX,
float srcY, float srcWidth, float srcHeight, LColor c,
float rotation, float originX, float originY, float scale,
SpriteEffects effects) {
if (effects == SpriteEffects.None && rotation == 0f && originX == 0f
&& originY == 0f && scale == 1f) {
draw(texture, px, py, srcX, srcY, srcWidth, srcHeight, c);
return;
}
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
draw(texture, px, py, originX, originY, srcWidth, srcHeight, scale,
scale, rotation, srcX, srcY, srcWidth, srcHeight, flipX, flipY,
true);
setColor(old);
}
public void draw(LTexture texture, float px, float py, RectBox src,
LColor c, float rotation, Vector2f origin, float scale,
SpriteEffects effects) {
draw(texture, px, py, src, c, rotation, origin.x, origin.y, scale,
effects);
}
public void draw(LTexture texture, float px, float py, RectBox src,
LColor c, float rotation, float ox, float oy, float scale,
SpriteEffects effects) {
draw(texture, px, py, src, c, rotation, ox, oy, scale, scale, effects);
}
public void draw(LTexture texture, float px, float py, RectBox src,
LColor c, float rotation, float ox, float oy, float scaleX,
float scaleY, SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
if (src != null) {
draw(texture, px, py, ox, oy, src.width, src.height, scaleX,
scaleY, rotation, src.x, src.y, src.width, src.height,
flipX, flipY, true);
} else {
draw(texture, px, py, ox, oy, texture.getWidth(),
texture.getHeight(), scaleX, scaleY, rotation, 0, 0,
texture.getWidth(), texture.getHeight(), flipX, flipY, true);
}
setColor(old);
}
public void draw(LTexture texture, Vector2f position, LColor c,
float rotation, Vector2f origin, Vector2f scale,
SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
draw(texture, position.x, position.y, origin.x, origin.y,
texture.getWidth(), texture.getHeight(), scale.x, scale.y,
rotation, 0, 0, texture.getWidth(), texture.getHeight(), flipX,
flipY, true);
setColor(old);
}
public void draw(LTexture texture, Vector2f position, LColor c,
float rotation, float originX, float originY, float scale,
SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
draw(texture, position.x, position.y, originX, originY,
texture.getWidth(), texture.getHeight(), scale, scale,
rotation, 0, 0, texture.getWidth(), texture.getHeight(), flipX,
flipY, true);
setColor(old);
}
public void draw(LTexture texture, float posX, float posY, float srcX,
float srcY, float srcWidth, float srcHeight, LColor c,
float rotation, float originX, float originY, float scaleX,
float scaleY, SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
draw(texture, posX, posY, originX, originY, srcWidth, srcHeight,
scaleX, scaleY, rotation, srcX, srcY, srcWidth, srcHeight,
flipX, flipY, true);
setColor(old);
}
public void draw(LTexture texture, Vector2f position, float srcX,
float srcY, float srcWidth, float srcHeight, LColor c,
float rotation, Vector2f origin, Vector2f scale,
SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
draw(texture, position.x, position.y, origin.x, origin.y, srcWidth,
srcHeight, scale.x, scale.y, rotation, srcX, srcY, srcWidth,
srcHeight, flipX, flipY, true);
setColor(old);
}
public void draw(LTexture texture, RectBox dst, RectBox src, LColor c,
float rotation, Vector2f origin, SpriteEffects effects) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
if (src != null) {
draw(texture, dst.x, dst.y, origin.x, origin.y, dst.width,
dst.height, 1f, 1f, rotation, src.x, src.y, src.width,
src.height, flipX, flipY, true);
} else {
draw(texture, dst.x, dst.y, origin.x, origin.y, dst.width,
dst.height, 1f, 1f, rotation, 0, 0, texture.getWidth(),
texture.getHeight(), flipX, flipY, true);
}
setColor(old);
}
public void draw(LTexture texture, float dstX, float dstY, float dstWidth,
float dstHeight, float srcX, float srcY, float srcWidth,
float srcHeight, LColor c, float rotation, float originX,
float originY, SpriteEffects effects) {
if (effects == SpriteEffects.None && rotation == 0 && originX == 0
&& originY == 0) {
draw(texture, dstX, dstY, dstWidth, dstHeight, srcX, srcY,
srcWidth, srcHeight, c);
return;
}
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
boolean flipX = false;
boolean flipY = false;
switch (effects) {
case FlipHorizontally:
flipX = true;
break;
case FlipVertically:
flipY = true;
break;
default:
break;
}
draw(texture, dstX, dstY, originX, originY, dstWidth, dstHeight, 1f,
1f, rotation, srcX, srcY, srcWidth, srcHeight, flipX, flipY,
true);
setColor(old);
}
public void draw(LTexture texture, float x, float y, float originX,
float originY, float width, float height, float scaleX,
float scaleY, float rotation, float srcX, float srcY,
float srcWidth, float srcHeight, boolean flipX, boolean flipY,
boolean off) {
checkTexture(texture);
float worldOriginX = x + originX;
float worldOriginY = y + originY;
if (off) {
worldOriginX = x;
worldOriginY = y;
}
float fx = -originX;
float fy = -originY;
float fx2 = width - originX;
float fy2 = height - originY;
if (scaleX != 1 || scaleY != 1) {
fx *= scaleX;
fy *= scaleY;
fx2 *= scaleX;
fy2 *= scaleY;
}
final float p1x = fx;
final float p1y = fy;
final float p2x = fx;
final float p2y = fy2;
final float p3x = fx2;
final float p3y = fy2;
final float p4x = fx2;
final float p4y = fy;
float x1;
float y1;
float x2;
float y2;
float x3;
float y3;
float x4;
float y4;
if (rotation != 0) {
final float cos = MathUtils.cosDeg(rotation);
final float sin = MathUtils.sinDeg(rotation);
x1 = cos * p1x - sin * p1y;
y1 = sin * p1x + cos * p1y;
x2 = cos * p2x - sin * p2y;
y2 = sin * p2x + cos * p2y;
x3 = cos * p3x - sin * p3y;
y3 = sin * p3x + cos * p3y;
x4 = x1 + (x3 - x2);
y4 = y3 - (y2 - y1);
} else {
x1 = p1x;
y1 = p1y;
x2 = p2x;
y2 = p2y;
x3 = p3x;
y3 = p3y;
x4 = p4x;
y4 = p4y;
}
x1 += worldOriginX;
y1 += worldOriginY;
x2 += worldOriginX;
y2 += worldOriginY;
x3 += worldOriginX;
y3 += worldOriginY;
x4 += worldOriginX;
y4 += worldOriginY;
float u = srcX * invTexWidth + texture.xOff;
float v = srcY * invTexHeight + texture.yOff;
float u2 = (srcX + srcWidth) * invTexWidth;
float v2 = (srcY + srcHeight) * invTexHeight;
if (flipX) {
float tmp = u;
u = u2;
u2 = tmp;
}
if (flipY) {
float tmp = v;
v = v2;
v2 = tmp;
}
vertices[idx++] = x1;
vertices[idx++] = y1;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v;
vertices[idx++] = x2;
vertices[idx++] = y2;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v2;
vertices[idx++] = x3;
vertices[idx++] = y3;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v2;
vertices[idx++] = x4;
vertices[idx++] = y4;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v;
}
public void draw(LTexture texture, float x, float y, float width,
float height, float rotation, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
draw(texture, x, y, width, height, rotation);
setColor(old);
}
public void drawFlipX(LTexture texture, float x, float y) {
draw(texture, x, y, texture.getWidth(), texture.getHeight(), 0, 0,
texture.getWidth(), texture.getHeight(), true, false);
}
public void drawFlipY(LTexture texture, float x, float y) {
draw(texture, x, y, texture.getWidth(), texture.getHeight(), 0, 0,
texture.getWidth(), texture.getHeight(), false, true);
}
public void drawFlipX(LTexture texture, float x, float y, float width,
float height) {
draw(texture, x, y, width, height, 0, 0, texture.getWidth(),
texture.getHeight(), true, false);
}
public void drawFlipY(LTexture texture, float x, float y, float width,
float height) {
draw(texture, x, y, width, height, 0, 0, texture.getWidth(),
texture.getHeight(), false, true);
}
public void drawFlipX(LTexture texture, float x, float y, float rotation) {
draw(texture, x, y, texture.getWidth() / 2, texture.getHeight() / 2,
texture.getWidth(), texture.getHeight(), 1f, 1f, rotation, 0,
0, texture.getWidth(), texture.getHeight(), true, false);
}
public void drawFlipY(LTexture texture, float x, float y, float rotation) {
draw(texture, x, y, texture.getWidth() / 2, texture.getHeight() / 2,
texture.getWidth(), texture.getHeight(), 1f, 1f, rotation, 0,
0, texture.getWidth(), texture.getHeight(), false, true);
}
public void drawFlipX(LTexture texture, float x, float y, float width,
float height, float rotation) {
draw(texture, x, y, width / 2, height / 2, width, height, 1f, 1f,
rotation, 0, 0, texture.getWidth(), texture.getHeight(), true,
false);
}
public void drawFlipY(LTexture texture, float x, float y, float width,
float height, float rotation) {
draw(texture, x, y, width / 2, height / 2, width, height, 1f, 1f,
rotation, 0, 0, texture.getWidth(), texture.getHeight(), false,
true);
}
public void draw(LTexture texture, RectBox dstBox, RectBox srcBox, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
draw(texture, dstBox.x, dstBox.y, dstBox.width, dstBox.height,
srcBox.x, srcBox.y, srcBox.width, srcBox.height, false, false);
setColor(old);
}
public void draw(LTexture texture, float x, float y, float width,
float height, float srcX, float srcY, float srcWidth,
float srcHeight) {
draw(texture, x, y, width, height, srcX, srcY, srcWidth, srcHeight,
false, false);
}
public void draw(LTexture texture, float x, float y, float width,
float height, float srcX, float srcY, float srcWidth,
float srcHeight, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
draw(texture, x, y, width, height, srcX, srcY, srcWidth, srcHeight,
false, false);
setColor(old);
}
public void drawEmbedded(LTexture texture, float x, float y, float width,
float height, float srcX, float srcY, float srcWidth,
float srcHeight, LColor c) {
draw(texture, x, y, width - x, height - y, srcX, srcY, srcWidth - srcX,
srcHeight - srcY, c);
}
public void draw(LTexture texture, float x, float y, float width,
float height, float srcX, float srcY, float srcWidth,
float srcHeight, boolean flipX, boolean flipY) {
checkTexture(texture);
float u = srcX * invTexWidth + texture.xOff;
float v = srcY * invTexHeight + texture.yOff;
float u2 = (srcX + srcWidth) * invTexWidth;
float v2 = (srcY + srcHeight) * invTexHeight;
final float fx2 = x + width;
final float fy2 = y + height;
if (flipX) {
float tmp = u;
u = u2;
u2 = tmp;
}
if (flipY) {
float tmp = v;
v = v2;
v2 = tmp;
}
vertices[idx++] = x;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v;
vertices[idx++] = x;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v;
}
public void draw(LTexture texture, Vector2f pos, RectBox srcBox, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
if (srcBox == null) {
draw(texture, pos.x, pos.y, 0, 0, texture.getWidth(),
texture.getHeight());
} else {
draw(texture, pos.x, pos.y, srcBox.x, srcBox.y, srcBox.width,
srcBox.height);
}
setColor(old);
}
public void draw(LTexture texture, float x, float y, float srcX,
float srcY, float srcWidth, float srcHeight, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
draw(texture, x, y, srcX, srcY, srcWidth, srcHeight);
setColor(old);
}
public void draw(LTexture texture, float x, float y, float srcX,
float srcY, float srcWidth, float srcHeight) {
checkTexture(texture);
float u = srcX * invTexWidth + texture.xOff;
float v = srcY * invTexHeight + texture.yOff;
float u2 = (srcX + srcWidth) * invTexWidth;
float v2 = (srcY + srcHeight) * invTexHeight;
final float fx2 = x + srcWidth;
final float fy2 = y + srcHeight;
vertices[idx++] = x;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v;
vertices[idx++] = x;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v;
}
public void draw(LTexture texture, float x, float y) {
if (texture == null) {
return;
}
draw(texture, x, y, texture.getWidth(), texture.getHeight());
}
public void draw(LTexture texture, float x, float y, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
draw(texture, x, y, texture.getWidth(), texture.getHeight());
setColor(old);
}
public void draw(LTexture texture, RectBox rect, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
draw(texture, rect.x, rect.y, rect.width, rect.height);
setColor(old);
}
public void draw(LTexture texture, Vector2f pos, LColor c) {
float old = color;
if (!c.equals(LColor.white)) {
setColor(c);
}
draw(texture, pos.x, pos.y, texture.getWidth(), texture.getHeight());
setColor(old);
}
public void draw(LTexture texture, float x, float y, float width,
float height) {
if (texture == null) {
return;
}
checkTexture(texture);
final float fx2 = x + width;
final float fy2 = y + height;
final float u = texture.xOff;
final float v = texture.yOff;
final float u2 = texture.widthRatio;
final float v2 = texture.heightRatio;
vertices[idx++] = x;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v;
vertices[idx++] = x;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v;
}
public void draw(LTexture texture, float[] spriteVertices, int offset,
int length) {
checkTexture(texture);
int remainingVertices = vertices.length - idx;
if (remainingVertices == 0) {
submit();
remainingVertices = vertices.length;
}
int vertexCount = MathUtils.min(remainingVertices, length - offset);
System.arraycopy(spriteVertices, offset, vertices, idx, vertexCount);
offset += vertexCount;
idx += vertexCount;
while (offset < length) {
submit();
vertexCount = MathUtils.min(vertices.length, length - offset);
System.arraycopy(spriteVertices, offset, vertices, 0, vertexCount);
offset += vertexCount;
idx += vertexCount;
}
}
public void draw(LTextureRegion region, float x, float y, float rotation) {
draw(region, x, y, region.getRegionWidth(), region.getRegionHeight(),
rotation);
}
public void draw(LTextureRegion region, float x, float y, float width,
float height, float rotation) {
draw(region, x, y, region.getRegionWidth() / 2,
region.getRegionHeight() / 2, width, height, 1f, 1f, rotation);
}
public void draw(LTextureRegion region, float x, float y) {
draw(region, x, y, region.getRegionWidth(), region.getRegionHeight());
}
public void draw(LTextureRegion region, float x, float y, float width,
float height) {
checkTexture(region.getTexture());
final float fx2 = x + width;
final float fy2 = y + height;
final float u = region.xOff;
final float v = region.yOff;
final float u2 = region.widthRatio;
final float v2 = region.heightRatio;
vertices[idx++] = x;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v;
vertices[idx++] = x;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = fy2;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v2;
vertices[idx++] = fx2;
vertices[idx++] = y;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v;
}
public void draw(LTextureRegion region, float x, float y, float originX,
float originY, float width, float height, float scaleX,
float scaleY, float rotation) {
checkTexture(region.getTexture());
final float worldOriginX = x + originX;
final float worldOriginY = y + originY;
float fx = -originX;
float fy = -originY;
float fx2 = width - originX;
float fy2 = height - originY;
if (scaleX != 1 || scaleY != 1) {
fx *= scaleX;
fy *= scaleY;
fx2 *= scaleX;
fy2 *= scaleY;
}
final float p1x = fx;
final float p1y = fy;
final float p2x = fx;
final float p2y = fy2;
final float p3x = fx2;
final float p3y = fy2;
final float p4x = fx2;
final float p4y = fy;
float x1;
float y1;
float x2;
float y2;
float x3;
float y3;
float x4;
float y4;
if (rotation != 0) {
final float cos = MathUtils.cosDeg(rotation);
final float sin = MathUtils.sinDeg(rotation);
x1 = cos * p1x - sin * p1y;
y1 = sin * p1x + cos * p1y;
x2 = cos * p2x - sin * p2y;
y2 = sin * p2x + cos * p2y;
x3 = cos * p3x - sin * p3y;
y3 = sin * p3x + cos * p3y;
x4 = x1 + (x3 - x2);
y4 = y3 - (y2 - y1);
} else {
x1 = p1x;
y1 = p1y;
x2 = p2x;
y2 = p2y;
x3 = p3x;
y3 = p3y;
x4 = p4x;
y4 = p4y;
}
x1 += worldOriginX;
y1 += worldOriginY;
x2 += worldOriginX;
y2 += worldOriginY;
x3 += worldOriginX;
y3 += worldOriginY;
x4 += worldOriginX;
y4 += worldOriginY;
final float u = region.xOff;
final float v = region.yOff;
final float u2 = region.widthRatio;
final float v2 = region.heightRatio;
vertices[idx++] = x1;
vertices[idx++] = y1;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v;
vertices[idx++] = x2;
vertices[idx++] = y2;
vertices[idx++] = color;
vertices[idx++] = u;
vertices[idx++] = v2;
vertices[idx++] = x3;
vertices[idx++] = y3;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v2;
vertices[idx++] = x4;
vertices[idx++] = y4;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v;
}
public void draw(LTextureRegion region, float x, float y, float originX,
float originY, float width, float height, float scaleX,
float scaleY, float rotation, boolean clockwise) {
checkTexture(region.getTexture());
final float worldOriginX = x + originX;
final float worldOriginY = y + originY;
float fx = -originX;
float fy = -originY;
float fx2 = width - originX;
float fy2 = height - originY;
if (scaleX != 1 || scaleY != 1) {
fx *= scaleX;
fy *= scaleY;
fx2 *= scaleX;
fy2 *= scaleY;
}
final float p1x = fx;
final float p1y = fy;
final float p2x = fx;
final float p2y = fy2;
final float p3x = fx2;
final float p3y = fy2;
final float p4x = fx2;
final float p4y = fy;
float x1;
float y1;
float x2;
float y2;
float x3;
float y3;
float x4;
float y4;
if (rotation != 0) {
final float cos = MathUtils.cosDeg(rotation);
final float sin = MathUtils.sinDeg(rotation);
x1 = cos * p1x - sin * p1y;
y1 = sin * p1x + cos * p1y;
x2 = cos * p2x - sin * p2y;
y2 = sin * p2x + cos * p2y;
x3 = cos * p3x - sin * p3y;
y3 = sin * p3x + cos * p3y;
x4 = x1 + (x3 - x2);
y4 = y3 - (y2 - y1);
} else {
x1 = p1x;
y1 = p1y;
x2 = p2x;
y2 = p2y;
x3 = p3x;
y3 = p3y;
x4 = p4x;
y4 = p4y;
}
x1 += worldOriginX;
y1 += worldOriginY;
x2 += worldOriginX;
y2 += worldOriginY;
x3 += worldOriginX;
y3 += worldOriginY;
x4 += worldOriginX;
y4 += worldOriginY;
float u1, v1, u2, v2, u3, v3, u4, v4;
if (clockwise) {
u1 = region.widthRatio;
v1 = region.heightRatio;
u2 = region.xOff;
v2 = region.heightRatio;
u3 = region.xOff;
v3 = region.yOff;
u4 = region.widthRatio;
v4 = region.yOff;
} else {
u1 = region.xOff;
v1 = region.yOff;
u2 = region.widthRatio;
v2 = region.yOff;
u3 = region.widthRatio;
v3 = region.heightRatio;
u4 = region.xOff;
v4 = region.heightRatio;
}
vertices[idx++] = x1;
vertices[idx++] = y1;
vertices[idx++] = color;
vertices[idx++] = u1;
vertices[idx++] = v1;
vertices[idx++] = x2;
vertices[idx++] = y2;
vertices[idx++] = color;
vertices[idx++] = u2;
vertices[idx++] = v2;
vertices[idx++] = x3;
vertices[idx++] = y3;
vertices[idx++] = color;
vertices[idx++] = u3;
vertices[idx++] = v3;
vertices[idx++] = x4;
vertices[idx++] = y4;
vertices[idx++] = color;
vertices[idx++] = u4;
vertices[idx++] = v4;
}
private LFont font = LFont.getDefaultFont();
public LFont getFont() {
return font;
}
public void setFont(LFont font) {
this.font = font;
}
public void drawString(LFont spriteFont, String text, float px, float py,
LColor color, float rotation, float originx, float originy,
float scale) {
LFont old = font;
if (spriteFont != null) {
setFont(spriteFont);
}
int heigh = ((spriteFont.getHeight() - 2));
if (rotation == 0f) {
drawString(text, px - (originx * scale), (py + heigh)
- (originy * scale), scale, scale, originx, originy,
rotation, color);
} else {
drawString(text, px, (py + heigh), scale, scale, originx, originy,
rotation, color);
}
setFont(old);
}
public void drawString(LFont spriteFont, String text, Vector2f position,
LColor color, float rotation, Vector2f origin, float scale) {
LFont old = font;
if (spriteFont != null) {
setFont(spriteFont);
}
int heigh = ((spriteFont.getHeight() - 2));
if (rotation == 0f) {
drawString(text, position.x - (origin.x * scale),
(position.y + heigh) - (origin.y * scale), scale, scale,
origin.x, origin.y, rotation, color);
} else {
drawString(text, position.x, (position.y + heigh), scale, scale,
origin.x, origin.y, rotation, color);
}
setFont(old);
}
public void drawString(LFont spriteFont, String text, Vector2f position,
LColor color) {
LFont old = font;
if (spriteFont != null) {
setFont(spriteFont);
}
int heigh = (spriteFont.getHeight() - 2);
drawString(text, position.x, (position.y + heigh), 1f, 1f, 0f, 0f, 0f,
color);
setFont(old);
}
public void drawString(LFont spriteFont, String text, float x, float y,
LColor color) {
LFont old = font;
if (spriteFont != null) {
setFont(spriteFont);
}
int heigh = (spriteFont.getHeight() - 2);
drawString(text, x, (y + heigh), 1f, 1f, 0f, 0f, 0f, color);
setFont(old);
}
public void drawString(LFont spriteFont, String text, Vector2f position,
LColor color, float rotation, Vector2f origin, Vector2f scale) {
LFont old = font;
if (spriteFont != null) {
setFont(spriteFont);
}
int heigh = ((spriteFont.getHeight() - 2));
if (rotation == 0f) {
drawString(text, position.x - (origin.x * scale.x),
(position.y + heigh) - (origin.y * scale.y), scale.x,
scale.y, origin.x, origin.y, rotation, color);
} else {
drawString(text, position.x, (position.y + heigh), scale.x,
scale.y, origin.x, origin.y, rotation, color);
}
setFont(old);
}
private boolean lockSubmit = false;
public void drawString(String mes, float x, float y, float scaleX,
float scaleY, float ax, float ay, float rotation, LColor c) {
if (!drawing) {
throw new IllegalStateException("Not implemented begin !");
}
if (c == null) {
return;
}
if (mes == null || mes.length() == 0) {
return;
}
if (!lockSubmit) {
submit();
}
y = y + font.getAscent();
LSTRDictionary.drawString(font, mes, x, y, scaleX, scaleX, ax, ay,
rotation, c);
}
public final void drawString(String mes, Vector2f position) {
drawString(mes, position.x, position.y, getColor());
}
public final void drawString(String mes, Vector2f position, LColor color) {
drawString(mes, position.x, position.y, color);
}
public final void drawString(String mes, float x, float y) {
drawString(mes, x, y, getColor());
}
public final void drawString(String mes, float x, float y, LColor color) {
drawString(mes, x, y, 0, color);
}
public final void drawString(String mes, float x, float y, float rotation) {
drawString(mes, x, y, rotation, getColor());
}
public void drawString(String mes, float x, float y, float rotation,
LColor c) {
drawString(mes, x, y, 1f, 1f, 0, 0, rotation, c);
}
public void drawString(String mes, float x, float y, float sx, float sy,
Vector2f origin, float rotation, LColor c) {
drawString(mes, x, y, sx, sy, origin.x, origin.y, rotation, c);
}
public void drawString(String mes, float x, float y, Vector2f origin,
float rotation, LColor c) {
drawString(mes, x, y, 1f, 1f, origin.x, origin.y, rotation, c);
}
public void drawString(String mes, float x, float y, Vector2f origin,
LColor c) {
drawString(mes, x, y, 1f, 1f, origin.x, origin.y, 0, c);
}
private void checkDrawing() {
if (!drawing) {
throw new IllegalStateException("Not implemented begin !");
}
}
public void flush() {
submit();
}
public BlendState getBlendState() {
return lastBlendState;
}
public void setBlendState(BlendState state) {
this.lastBlendState = state;
}
public void flush(BlendState state) {
submit(state);
}
private void submit() {
submit(lastBlendState);
}
private void submit(BlendState state) {
if (idx == 0) {
return;
}
int spritesInBatch = idx / 20;
if (spritesInBatch > maxSpritesInBatch) {
maxSpritesInBatch = spritesInBatch;
}
GLEx self = GLEx.self;
self.bind(lastTexture);
int old = self.getBlendMode();
switch (lastBlendState) {
case Additive:
self.setBlendMode(GL.MODE_ALPHA_ONE);
break;
case AlphaBlend:
self.setBlendMode(GL.MODE_SPEED);
break;
case Opaque:
self.setBlendMode(GL.MODE_NONE);
break;
case NonPremultiplied:
self.setBlendMode(GL.MODE_NORMAL);
break;
}
mesh.setVertices(vertices, 0, idx);
mesh.getIndicesBuffer().position(0);
mesh.getIndicesBuffer().limit(spritesInBatch * 6);
mesh.render(GL.GL_TRIANGLES, 0, spritesInBatch * 6);
self.setBlendMode(old);
idx = 0;
}
public boolean isLockSubmit() {
return lockSubmit;
}
public void setLockSubmit(boolean lockSubmit) {
this.lockSubmit = lockSubmit;
}
public void dispose() {
if (lineLazy != null) {
lineLazy.clear();
}
if (whitePixel != null) {
whitePixel.destroy();
}
if (batch != null) {
batch.dispose();
}
if (mesh != null) {
mesh.dispose();
}
}
}
| 21,405 |
687 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .prediction_count_evaluation import PredictionCountEvaluator # noqa
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| 76 |
5,803 |
//
// JasonUtilAction.h
// Jasonette
//
// Copyright © 2016 gliechtenstein. All rights reserved.
//
#import "JasonAction.h"
#import <MobileCoreServices/MobileCoreServices.h>
#import "JasonHelper.h"
#import <TWMessageBarManager/TWMessageBarManager.h>
#import "JDStatusBarNotification.h"
#import <AHKActionSheet/AHKActionSheet.h>
#import <RMDateSelectionViewController/RMDateSelectionViewController.h>
#import <SDWebImage/UIImageView+WebCache.h>
@import APAddressBook;
@interface JasonUtilAction : JasonAction
@end
| 171 |
4,391 |
<filename>packages/pyright-internal/src/tests/samples/typeNarrowingLiteralMember1.py
# This sample tests type narrowing based on member accesses
# to members that have literal types.
from typing import ClassVar, Literal, Type, Union
class A:
kind: Literal["A"]
kind_class: ClassVar[Literal["A"]]
d: Literal[1, 2, 3]
class B:
kind: Literal["B"]
kind_class: ClassVar[Literal["B"]]
d: Literal[3, 4, 5]
class C:
kind: str
kind_class: str
c: int
class D:
kind: Literal[1, 2, 3]
def foo_obj1(c: Union[A, B]):
if c.kind == "A":
tc1: Literal["A"] = reveal_type(c)
else:
tc2: Literal["B"] = reveal_type(c)
def foo_obj2(c: Union[A, B]):
if c.kind != "A":
tc1: Literal["B"] = reveal_type(c)
else:
tc2: Literal["A"] = reveal_type(c)
def foo_obj3(c: Union[A, B, C]):
if c.kind == "A":
tc1: Literal["A | B | C"] = reveal_type(c)
else:
tc2: Literal["A | B | C"] = reveal_type(c)
def foo_obj4(c: Union[A, B]):
if c.d == 1:
tc1: Literal["A"] = reveal_type(c)
elif c.d == 3:
tc2: Literal["A | B"] = reveal_type(c)
def foo_obj5(d: D):
if d.kind == 1:
td1: Literal["D"] = reveal_type(d)
elif d.kind == 2:
td2: Literal["D"] = reveal_type(d)
def foo_class2(c: Union[Type[A], Type[B]]):
if c.kind_class == "A":
tc1: Literal["Type[A]"] = reveal_type(c)
else:
tc2: Literal["Type[B]"] = reveal_type(c)
| 725 |
807 |
<reponame>weltonrodrigo/origin<filename>vendor/github.com/google/certificate-transparency/cpp/util/uuid.cc<gh_stars>100-1000
#include <iomanip>
#include <random>
#include <sstream>
#include "util/uuid.h"
using std::hex;
using std::mt19937;
using std::nouppercase;
using std::random_device;
using std::setw;
using std::setfill;
using std::string;
using std::stringstream;
using std::uniform_int_distribution;
namespace cert_trans {
string UUID4() {
random_device rd;
mt19937 twister(rd());
uniform_int_distribution<uint32_t> distribution(0, UINT32_MAX);
const uint32_t a((distribution(twister) & 0xFFFFFFFFUL));
const uint32_t b((distribution(twister) & 0xFFFF0FFFUL) | 0x00004000UL);
const uint32_t c((distribution(twister) & 0x3FFFFFFFUL) | 0x80000000UL);
const uint32_t d((distribution(twister) & 0xFFFFFFFFUL));
stringstream oss;
oss << hex << nouppercase << setfill('0');
oss << setw(8) << (a) << '-';
oss << setw(4) << (b >> 16) << '-';
oss << setw(4) << (b & 0xFFFF) << '-';
oss << setw(4) << (c >> 16) << '-';
oss << setw(4) << (c & 0xFFFF);
oss << setw(8) << d;
return oss.str();
}
} // namespace cert_trans
| 468 |
1,444 |
package org.wiztools.restclient.bean;
/**
*
* @author subwiz
*/
public enum MultipartSubtype {
FORM_DATA, MIXED, DIGEST, MESSAGE, ALTERNATIVE, RELATED, REPORT, SIGNED,
ENCRYPTED, X_MIXED_REPLACE, BYTERANGE;
@Override
public String toString() {
return this.name().toLowerCase().replaceAll("_", "-");
}
}
| 140 |
5,169 |
<reponame>Gantios/Specs<filename>Specs/0/6/f/OnfidoTask/0.2/OnfidoTask.podspec.json
{
"name": "OnfidoTask",
"authors": [
"Kerem",
"OnfidoTask"
],
"summary": "OnfidoTask SDK",
"version": "0.2",
"homepage": "http://www.google.com",
"license": {
"type": "Apache 2.0",
"file": "LICENSE"
},
"platforms": {
"ios": "11.0"
},
"source": {
"git": "https://[email protected]/kerem1905/onfido-release.git",
"tag": "release/v0.2"
},
"source_files": "Framework/OnfidoTask.framework/Headers/*.h",
"public_header_files": "Framework/OnfidoTask.framework/Headers/*.h",
"vendored_frameworks": "Framework/OnfidoTask.framework",
"frameworks": [
"UIKit",
"Foundation"
]
}
| 334 |
308 |
<reponame>Deci-AI/super-gradients
"""
This file is used to define the Dataset used for the Training.
"""
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from super_gradients.training import utils as core_utils
from super_gradients.training.datasets.dataset_interfaces import DatasetInterface
class UserDataset(DatasetInterface):
"""
The user's dataset inherits from SuperGradient's DatasetInterface and must
contain a trainset and testset from which the the data will be loaded using.
All augmentations, resizing and parsing must be done in this class.
- Augmentations are defined below and will be carried out in the order they are given.
super_gradients provides additional dataset reading tools such as ListDataset given a list of files
corresponding to the images and labels.
"""
def __init__(self, name="cifar10", dataset_params={}):
super(UserDataset, self).__init__(dataset_params)
self.dataset_name = name
self.lib_dataset_params = {'mean': (0.4914, 0.4822, 0.4465), 'std': (0.2023, 0.1994, 0.2010)}
crop_size = core_utils.get_param(self.dataset_params, 'crop_size', default_val=32)
transform_train = transforms.Compose([
transforms.RandomCrop(crop_size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(self.lib_dataset_params['mean'], self.lib_dataset_params['std']),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(self.lib_dataset_params['mean'], self.lib_dataset_params['std']),
])
self.trainset = datasets.CIFAR10(root=self.dataset_params.dataset_dir, train=True, download=True,
transform=transform_train)
self.testset = datasets.CIFAR10(root=self.dataset_params.dataset_dir, train=False, download=True,
transform=transform_test)
| 806 |
984 |
/*
* Copyright DataStax, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.dse.driver.internal.core.graph.reactive;
import com.datastax.dse.driver.api.core.graph.GraphNode;
import com.datastax.dse.driver.api.core.graph.reactive.ReactiveGraphNode;
import com.datastax.oss.driver.api.core.cql.ExecutionInfo;
import com.datastax.oss.driver.api.core.type.reflect.GenericType;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.util.List;
import java.util.Map;
import java.util.Set;
import net.jcip.annotations.NotThreadSafe;
import org.apache.tinkerpop.gremlin.process.traversal.Path;
import org.apache.tinkerpop.gremlin.structure.Edge;
import org.apache.tinkerpop.gremlin.structure.Property;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.apache.tinkerpop.gremlin.structure.VertexProperty;
@NotThreadSafe
class DefaultReactiveGraphNode implements ReactiveGraphNode {
private final GraphNode graphNode;
private final ExecutionInfo executionInfo;
DefaultReactiveGraphNode(@NonNull GraphNode graphNode, @NonNull ExecutionInfo executionInfo) {
this.graphNode = graphNode;
this.executionInfo = executionInfo;
}
@NonNull
@Override
public ExecutionInfo getExecutionInfo() {
return executionInfo;
}
@Override
public boolean isNull() {
return graphNode.isNull();
}
@Override
public boolean isMap() {
return graphNode.isMap();
}
@Override
public Iterable<?> keys() {
return graphNode.keys();
}
@Override
public GraphNode getByKey(Object key) {
return graphNode.getByKey(key);
}
@Override
public <K, V> Map<K, V> asMap() {
return graphNode.asMap();
}
@Override
public boolean isList() {
return graphNode.isList();
}
@Override
public int size() {
return graphNode.size();
}
@Override
public GraphNode getByIndex(int index) {
return graphNode.getByIndex(index);
}
@Override
public <T> List<T> asList() {
return graphNode.asList();
}
@Override
public boolean isValue() {
return graphNode.isValue();
}
@Override
public int asInt() {
return graphNode.asInt();
}
@Override
public boolean asBoolean() {
return graphNode.asBoolean();
}
@Override
public long asLong() {
return graphNode.asLong();
}
@Override
public double asDouble() {
return graphNode.asDouble();
}
@Override
public String asString() {
return graphNode.asString();
}
@Override
public <ResultT> ResultT as(Class<ResultT> clazz) {
return graphNode.as(clazz);
}
@Override
public <ResultT> ResultT as(GenericType<ResultT> type) {
return graphNode.as(type);
}
@Override
public boolean isVertex() {
return graphNode.isVertex();
}
@Override
public Vertex asVertex() {
return graphNode.asVertex();
}
@Override
public boolean isEdge() {
return graphNode.isEdge();
}
@Override
public Edge asEdge() {
return graphNode.asEdge();
}
@Override
public boolean isPath() {
return graphNode.isPath();
}
@Override
public Path asPath() {
return graphNode.asPath();
}
@Override
public boolean isProperty() {
return graphNode.isProperty();
}
@Override
public <T> Property<T> asProperty() {
return graphNode.asProperty();
}
@Override
public boolean isVertexProperty() {
return graphNode.isVertexProperty();
}
@Override
public <T> VertexProperty<T> asVertexProperty() {
return graphNode.asVertexProperty();
}
@Override
public boolean isSet() {
return graphNode.isSet();
}
@Override
public <T> Set<T> asSet() {
return graphNode.asSet();
}
@Override
public String toString() {
return "DefaultReactiveGraphNode{graphNode="
+ graphNode
+ ", executionInfo="
+ executionInfo
+ '}';
}
}
| 1,469 |
634 |
<reponame>halotroop2288/consulo<gh_stars>100-1000
/*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.diff.impl.string;
import javax.annotation.Nonnull;
import java.util.Arrays;
public class DiffStringBuilder implements CharSequence {
@Nonnull
private char[] myData;
private int myLength;
public DiffStringBuilder() {
this(16);
}
public DiffStringBuilder(int len) {
myData = new char[len];
myLength = 0;
}
@Override
public int length() {
return myLength;
}
@Override
public char charAt(int index) {
if (index < 0 || index >= myLength) {
throw new StringIndexOutOfBoundsException(index);
}
return myData[index];
}
@Override
@Nonnull
public CharSequence subSequence(int start, int end) {
DiffString.checkBounds(start, end, myLength);
return DiffString.create(myData, start, end - start);
}
@Nonnull
public DiffString toDiffString() {
return DiffString.create(myData, 0, myLength);
}
@Override
@Nonnull
public String toString() {
return toDiffString().toString();
}
private void ensureCapacityInternal(int neededCapacity) {
if (neededCapacity > myData.length) {
int newCapacity = myData.length;
while (newCapacity < neededCapacity) newCapacity *= 2;
myData = Arrays.copyOf(myData, newCapacity);
}
}
public void append(@Nonnull DiffString s) {
if (s.isEmpty()) return;
ensureCapacityInternal(myLength + s.length());
s.copyData(myData, myLength);
myLength += s.length();
}
}
| 703 |
679 |
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef SC_FIELDUNO_HXX
#define SC_FIELDUNO_HXX
#include "address.hxx"
#include "mutexhlp.hxx"
#include <svl/lstner.hxx>
#include <svl/itemprop.hxx>
#include <editeng/editdata.hxx>
#include <com/sun/star/text/XTextField.hpp>
#include <com/sun/star/lang/XServiceInfo.hpp>
#include <com/sun/star/container/XContainer.hpp>
#include <com/sun/star/lang/XUnoTunnel.hpp>
#include <com/sun/star/container/XEnumerationAccess.hpp>
#include <com/sun/star/container/XIndexAccess.hpp>
#include <com/sun/star/beans/XPropertySet.hpp>
#include <com/sun/star/util/XRefreshable.hpp>
#include <cppuhelper/component.hxx>
#include <cppuhelper/implbase5.hxx>
#include <osl/mutex.hxx>
class SvxEditSource;
class SvxFieldItem;
class ScCellFieldObj;
class ScHeaderFieldObj;
class ScHeaderFooterContentObj;
class ScDocShell;
//------------------------------------------------------------------
class ScCellFieldsObj : public cppu::WeakImplHelper5<
com::sun::star::container::XEnumerationAccess,
com::sun::star::container::XIndexAccess,
com::sun::star::container::XContainer,
com::sun::star::util::XRefreshable,
com::sun::star::lang::XServiceInfo >,
public SfxListener
{
private:
ScDocShell* pDocShell;
ScAddress aCellPos;
SvxEditSource* pEditSource;
/// List of refresh listeners.
cppu::OInterfaceContainerHelper* mpRefreshListeners;
/// mutex to lock the InterfaceContainerHelper
osl::Mutex aMutex;
ScCellFieldObj* GetObjectByIndex_Impl(sal_Int32 Index) const;
public:
ScCellFieldsObj(ScDocShell* pDocSh, const ScAddress& rPos);
virtual ~ScCellFieldsObj();
virtual void Notify( SfxBroadcaster& rBC, const SfxHint& rHint );
// XIndexAccess
virtual sal_Int32 SAL_CALL getCount() throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Any SAL_CALL getByIndex( sal_Int32 Index )
throw(::com::sun::star::lang::IndexOutOfBoundsException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
// XEnumerationAccess
virtual ::com::sun::star::uno::Reference< ::com::sun::star::container::XEnumeration > SAL_CALL
createEnumeration() throw(::com::sun::star::uno::RuntimeException);
// XElementAccess
virtual ::com::sun::star::uno::Type SAL_CALL getElementType()
throw(::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL hasElements() throw(::com::sun::star::uno::RuntimeException);
// XContainer
virtual void SAL_CALL addContainerListener( const ::com::sun::star::uno::Reference<
::com::sun::star::container::XContainerListener >& xListener )
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeContainerListener( const ::com::sun::star::uno::Reference<
::com::sun::star::container::XContainerListener >& xListener )
throw(::com::sun::star::uno::RuntimeException);
// XRefreshable
virtual void SAL_CALL refresh( )
throw (::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addRefreshListener( const ::com::sun::star::uno::Reference<
::com::sun::star::util::XRefreshListener >& l )
throw (::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeRefreshListener( const ::com::sun::star::uno::Reference<
::com::sun::star::util::XRefreshListener >& l )
throw (::com::sun::star::uno::RuntimeException);
// XServiceInfo
virtual ::rtl::OUString SAL_CALL getImplementationName()
throw(::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL supportsService( const ::rtl::OUString& ServiceName )
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames()
throw(::com::sun::star::uno::RuntimeException);
};
class ScCellFieldObj : public ScMutexHelper,
public ::cppu::OComponentHelper,
public ::com::sun::star::text::XTextField,
public ::com::sun::star::beans::XPropertySet,
public ::com::sun::star::lang::XUnoTunnel,
public ::com::sun::star::lang::XServiceInfo,
public SfxListener
{
private:
const SfxItemPropertySet* pPropSet;
ScDocShell* pDocShell;
ScAddress aCellPos;
SvxEditSource* pEditSource;
ESelection aSelection;
String aUrl; // Inhalt, wenn noch nicht eingefuegt (nur dann!)
String aRepresentation;
String aTarget;
ScCellFieldObj(); // disabled
public:
ScCellFieldObj(ScDocShell* pDocSh, const ScAddress& rPos,
const ESelection& rSel);
virtual ~ScCellFieldObj();
virtual void Notify( SfxBroadcaster& rBC, const SfxHint& rHint );
// per getImplementation gerufen:
void DeleteField();
sal_Bool IsInserted() const { return pEditSource != NULL; }
SvxFieldItem CreateFieldItem();
void InitDoc( ScDocShell* pDocSh, const ScAddress& rPos,
const ESelection& rSel );
virtual ::com::sun::star::uno::Any SAL_CALL queryAggregation(
const ::com::sun::star::uno::Type & rType )
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Any SAL_CALL queryInterface(
const ::com::sun::star::uno::Type & rType )
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL acquire() throw();
virtual void SAL_CALL release() throw();
// XTextField
virtual ::rtl::OUString SAL_CALL getPresentation( sal_Bool bShowCommand )
throw(::com::sun::star::uno::RuntimeException);
// XTextContent
virtual void SAL_CALL attach( const ::com::sun::star::uno::Reference<
::com::sun::star::text::XTextRange >& xTextRange )
throw(::com::sun::star::lang::IllegalArgumentException,
::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Reference< ::com::sun::star::text::XTextRange > SAL_CALL
getAnchor() throw(::com::sun::star::uno::RuntimeException);
// XComponent
virtual void SAL_CALL dispose() throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addEventListener( const ::com::sun::star::uno::Reference<
::com::sun::star::lang::XEventListener >& xListener )
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeEventListener( const ::com::sun::star::uno::Reference<
::com::sun::star::lang::XEventListener >& aListener )
throw(::com::sun::star::uno::RuntimeException);
// XPropertySet
virtual ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySetInfo >
SAL_CALL getPropertySetInfo()
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL setPropertyValue( const ::rtl::OUString& aPropertyName,
const ::com::sun::star::uno::Any& aValue )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::beans::PropertyVetoException,
::com::sun::star::lang::IllegalArgumentException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Any SAL_CALL getPropertyValue(
const ::rtl::OUString& PropertyName )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addPropertyChangeListener( const ::rtl::OUString& aPropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XPropertyChangeListener >& xListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removePropertyChangeListener( const ::rtl::OUString& aPropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XPropertyChangeListener >& aListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addVetoableChangeListener( const ::rtl::OUString& PropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XVetoableChangeListener >& aListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeVetoableChangeListener( const ::rtl::OUString& PropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XVetoableChangeListener >& aListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
// XUnoTunnel
virtual sal_Int64 SAL_CALL getSomething( const ::com::sun::star::uno::Sequence<
sal_Int8 >& aIdentifier )
throw(::com::sun::star::uno::RuntimeException);
static const com::sun::star::uno::Sequence<sal_Int8>& getUnoTunnelId();
static ScCellFieldObj* getImplementation( const com::sun::star::uno::Reference<
com::sun::star::text::XTextContent> xObj );
// XServiceInfo
virtual ::rtl::OUString SAL_CALL getImplementationName()
throw(::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL supportsService( const ::rtl::OUString& ServiceName )
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames()
throw(::com::sun::star::uno::RuntimeException);
// XTypeProvider
virtual ::com::sun::star::uno::Sequence< ::com::sun::star::uno::Type > SAL_CALL getTypes()
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< sal_Int8 > SAL_CALL getImplementationId()
throw(::com::sun::star::uno::RuntimeException);
};
//------------------------------------------------------------------
class ScHeaderFieldsObj : public cppu::WeakImplHelper5<
com::sun::star::container::XEnumerationAccess,
com::sun::star::container::XIndexAccess,
com::sun::star::container::XContainer,
com::sun::star::util::XRefreshable,
com::sun::star::lang::XServiceInfo >
{
private:
ScHeaderFooterContentObj* pContentObj;
sal_uInt16 nPart;
sal_uInt16 nType;
SvxEditSource* pEditSource;
/// List of refresh listeners.
cppu::OInterfaceContainerHelper* mpRefreshListeners;
/// mutex to lock the InterfaceContainerHelper
osl::Mutex aMutex;
ScHeaderFieldObj* GetObjectByIndex_Impl(sal_Int32 Index) const;
public:
ScHeaderFieldsObj(ScHeaderFooterContentObj* pContent,
sal_uInt16 nP, sal_uInt16 nT);
virtual ~ScHeaderFieldsObj();
// XIndexAccess
virtual sal_Int32 SAL_CALL getCount() throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Any SAL_CALL getByIndex( sal_Int32 Index )
throw(::com::sun::star::lang::IndexOutOfBoundsException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
// XEnumerationAccess
virtual ::com::sun::star::uno::Reference< ::com::sun::star::container::XEnumeration > SAL_CALL
createEnumeration() throw(::com::sun::star::uno::RuntimeException);
// XElementAccess
virtual ::com::sun::star::uno::Type SAL_CALL getElementType()
throw(::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL hasElements() throw(::com::sun::star::uno::RuntimeException);
// XContainer
virtual void SAL_CALL addContainerListener( const ::com::sun::star::uno::Reference<
::com::sun::star::container::XContainerListener >& xListener )
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeContainerListener( const ::com::sun::star::uno::Reference<
::com::sun::star::container::XContainerListener >& xListener )
throw(::com::sun::star::uno::RuntimeException);
// XRefreshable
virtual void SAL_CALL refresh( )
throw (::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addRefreshListener( const ::com::sun::star::uno::Reference<
::com::sun::star::util::XRefreshListener >& l )
throw (::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeRefreshListener( const ::com::sun::star::uno::Reference<
::com::sun::star::util::XRefreshListener >& l )
throw (::com::sun::star::uno::RuntimeException);
// XServiceInfo
virtual ::rtl::OUString SAL_CALL getImplementationName()
throw(::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL supportsService( const ::rtl::OUString& ServiceName )
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames()
throw(::com::sun::star::uno::RuntimeException);
};
class ScHeaderFieldObj : public ScMutexHelper,
public ::cppu::OComponentHelper,
public ::com::sun::star::text::XTextField,
public ::com::sun::star::beans::XPropertySet,
public ::com::sun::star::lang::XUnoTunnel,
public ::com::sun::star::lang::XServiceInfo
{
private:
const SfxItemPropertySet* pPropSet;
ScHeaderFooterContentObj* pContentObj;
sal_uInt16 nPart;
sal_uInt16 nType;
SvxEditSource* pEditSource;
ESelection aSelection;
sal_Int16 nFileFormat; // enum SvxFileFormat, valid if not inserted
ScHeaderFieldObj(); // disabled
public:
ScHeaderFieldObj(ScHeaderFooterContentObj* pContent, sal_uInt16 nP,
sal_uInt16 nT, const ESelection& rSel);
virtual ~ScHeaderFieldObj();
// per getImplementation gerufen:
void DeleteField();
sal_Bool IsInserted() const { return pEditSource != NULL; }
SvxFieldItem CreateFieldItem();
void InitDoc( ScHeaderFooterContentObj* pContent, sal_uInt16 nP,
const ESelection& rSel );
virtual ::com::sun::star::uno::Any SAL_CALL queryAggregation(
const ::com::sun::star::uno::Type & rType )
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Any SAL_CALL queryInterface(
const ::com::sun::star::uno::Type & rType )
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL acquire() throw();
virtual void SAL_CALL release() throw();
// XTextField
virtual ::rtl::OUString SAL_CALL getPresentation( sal_Bool bShowCommand )
throw(::com::sun::star::uno::RuntimeException);
// XTextContent
virtual void SAL_CALL attach( const ::com::sun::star::uno::Reference<
::com::sun::star::text::XTextRange >& xTextRange )
throw(::com::sun::star::lang::IllegalArgumentException,
::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Reference< ::com::sun::star::text::XTextRange > SAL_CALL
getAnchor() throw(::com::sun::star::uno::RuntimeException);
// XComponent
virtual void SAL_CALL dispose() throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addEventListener( const ::com::sun::star::uno::Reference<
::com::sun::star::lang::XEventListener >& xListener )
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeEventListener( const ::com::sun::star::uno::Reference<
::com::sun::star::lang::XEventListener >& aListener )
throw(::com::sun::star::uno::RuntimeException);
// XPropertySet
virtual ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySetInfo >
SAL_CALL getPropertySetInfo()
throw(::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL setPropertyValue( const ::rtl::OUString& aPropertyName,
const ::com::sun::star::uno::Any& aValue )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::beans::PropertyVetoException,
::com::sun::star::lang::IllegalArgumentException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Any SAL_CALL getPropertyValue(
const ::rtl::OUString& PropertyName )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addPropertyChangeListener( const ::rtl::OUString& aPropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XPropertyChangeListener >& xListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removePropertyChangeListener( const ::rtl::OUString& aPropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XPropertyChangeListener >& aListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL addVetoableChangeListener( const ::rtl::OUString& PropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XVetoableChangeListener >& aListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeVetoableChangeListener( const ::rtl::OUString& PropertyName,
const ::com::sun::star::uno::Reference<
::com::sun::star::beans::XVetoableChangeListener >& aListener )
throw(::com::sun::star::beans::UnknownPropertyException,
::com::sun::star::lang::WrappedTargetException,
::com::sun::star::uno::RuntimeException);
// XUnoTunnel
virtual sal_Int64 SAL_CALL getSomething( const ::com::sun::star::uno::Sequence<
sal_Int8 >& aIdentifier )
throw(::com::sun::star::uno::RuntimeException);
static const com::sun::star::uno::Sequence<sal_Int8>& getUnoTunnelId();
static ScHeaderFieldObj* getImplementation( const com::sun::star::uno::Reference<
com::sun::star::text::XTextContent> xObj );
// XServiceInfo
virtual ::rtl::OUString SAL_CALL getImplementationName()
throw(::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL supportsService( const ::rtl::OUString& ServiceName )
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames()
throw(::com::sun::star::uno::RuntimeException);
// XTypeProvider
virtual ::com::sun::star::uno::Sequence< ::com::sun::star::uno::Type > SAL_CALL getTypes()
throw(::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< sal_Int8 > SAL_CALL getImplementationId()
throw(::com::sun::star::uno::RuntimeException);
};
#endif
| 8,135 |
543 |
package com.riiablo.codec.excel;
@Excel.Binned
public class Weapons extends Excel<Weapons.Entry> {
public static class Entry extends ItemEntry {
@Column public String wclass;
@Column(format = "2handedwclass")
public String _2handedwclass;
@Column(format = "1or2handed")
public boolean _1or2handed;
@Column(format = "2handed")
public boolean _2handed;
@Column(format = "2handmindam")
public int _2handmindam;
@Column(format = "2handmaxdam")
public int _2handmaxdam;
@Column public int minmisdam;
@Column public int maxmisdam;
@Column public int reqstr;
@Column public int reqdex;
@Column public int durability;
}
}
| 281 |
401 |
<reponame>LemonNicholas/fresco-helper
package com.facebook.fresco.helper.photoview.anim;
import android.app.Activity;
import android.graphics.Rect;
import android.os.Bundle;
import android.util.SparseArray;
import android.view.View;
import android.view.WindowManager;
import androidx.recyclerview.widget.GridLayoutManager;
import java.util.ArrayList;
public class ViewOptionsCompat {
public static final String KEY_VIEW_OPTION_LIST = "view_option_list";
public static Bundle makeScaleUpAnimation(GridLayoutManager layoutManager, ArrayList<String> thumbnailList) {
SparseArray<ViewOptions> sparseArray = new SparseArray<>();
int firstVisibleItemPosition = layoutManager.findFirstVisibleItemPosition();
int lastVisibleItemPosition = layoutManager.findLastVisibleItemPosition();
for (int i = 0; i < thumbnailList.size(); i++) {
if (i >= firstVisibleItemPosition && i <= lastVisibleItemPosition) {
View childView = layoutManager.findViewByPosition(i);
if (childView != null) {
sparseArray.put(i, createViewOptions(childView, thumbnailList.get(i)));
}
}
}
Bundle bundle = new Bundle();
bundle.putSparseParcelableArray(KEY_VIEW_OPTION_LIST, sparseArray);
return bundle;
}
public static Bundle makeScaleUpAnimation(View thumbnailView, String thumbnail) {
SparseArray<ViewOptions> sparseArray = new SparseArray<>();
sparseArray.put(0, createViewOptions(thumbnailView, thumbnail));
Bundle bundle = new Bundle();
bundle.putSparseParcelableArray(KEY_VIEW_OPTION_LIST, sparseArray);
return bundle;
}
private static ViewOptions createViewOptions(View thumbnailView, String thumbnail) {
ViewOptions viewOptions = new ViewOptions();
viewOptions.thumbnail = thumbnail;
// 判断当前activity是否是全屏模式
viewOptions.isFullScreen = isFullScreen((Activity) thumbnailView.getContext());
// 判断当前是否是竖屏
viewOptions.isVerticalScreen = isVerticalScreen((Activity) thumbnailView.getContext());
// 判断view是否在屏幕上,如果在就执行动画,否则不执行动画
viewOptions.isInTheScreen = isInScreen(thumbnailView);
int[] location = new int[2];
// ps = position,目的得到当前view相对于屏幕的坐标
thumbnailView.getLocationOnScreen(location);
// 设置起始坐标和起始宽高
viewOptions.startX = location[0];
viewOptions.startY = location[1];
viewOptions.width = thumbnailView.getMeasuredWidth();
viewOptions.height = thumbnailView.getMeasuredHeight();
return viewOptions;
}
/**
* view当前是否显示在屏幕上,包括被遮挡,显示不全的状态
*
* @return
*/
public static boolean isInScreen(View view) {
Rect bounds = new Rect();
// 只要有一部分显示在屏幕内,就是true,不考虑遮挡情况
boolean isInScreen = view.getGlobalVisibleRect(bounds);
if (isInScreen) {
if (bounds.width() < view.getWidth() * 0.3f || bounds.height() < view.getHeight() * 0.3f) {
return false;
} else {
return true;
}
} else {
return false;
}
}
/**
* 判断当前屏幕是否是横屏
*
* @param activity
* @return
*/
public static boolean isVerticalScreen(Activity activity) {
int flag = activity.getResources().getConfiguration().orientation;
return !(flag == 0);
}
/**
* @param activity
* @return 判断当前手机是否是全屏
*/
public static boolean isFullScreen(Activity activity) {
int flag = activity.getWindow().getAttributes().flags;
if ((flag & WindowManager.LayoutParams.FLAG_FULLSCREEN)
== WindowManager.LayoutParams.FLAG_FULLSCREEN) {
return true;
} else {
return false;
}
}
}
| 1,792 |
563 |
<filename>tests/test_create.cpp<gh_stars>100-1000
//
// Created by <NAME> on 07.05.2018
//
#include "test.h"
#include "../proto/proto_models.h"
#include "../proto/test_models.h"
TEST_CASE("Create & access", "[FBE]")
{
// Create a new account using FBE model into the FBE stream
FBE::proto::AccountModel writer;
REQUIRE(writer.model.fbe_offset() == 4);
size_t model_begin = writer.create_begin();
size_t account_begin = writer.model.set_begin();
writer.model.id.set(1);
writer.model.name.set(std::string("Test"));
writer.model.state.set(proto::State::good);
size_t wallet_begin = writer.model.wallet.set_begin();
writer.model.wallet.currency.set(std::string("USD"));
writer.model.wallet.amount.set(1000.0);
writer.model.wallet.set_end(wallet_begin);
size_t asset_begin = writer.model.asset.set_begin(true);
size_t asset_wallet_begin = writer.model.asset.value.set_begin();
writer.model.asset.value.currency.set(std::string("EUR"));
writer.model.asset.value.amount.set(100.0);
writer.model.asset.set_end(asset_begin);
writer.model.asset.value.set_end(asset_wallet_begin);
auto order = writer.model.orders.resize(3);
size_t order_begin = order.set_begin();
order.id.set(1);
order.symbol.set(std::string("EURUSD"));
order.side.set(proto::OrderSide::buy);
order.type.set(proto::OrderType::market);
order.price.set(1.23456);
order.volume.set(1000.0);
order.set_end(order_begin);
order.fbe_shift(order.fbe_size());
order_begin = order.set_begin();
order.id.set(2);
order.symbol.set(std::string("EURUSD"));
order.side.set(proto::OrderSide::sell);
order.type.set(proto::OrderType::limit);
order.price.set(1.0);
order.volume.set(100.0);
order.set_end(order_begin);
order.fbe_shift(order.fbe_size());
order_begin = order.set_begin();
order.id.set(3);
order.symbol.set(std::string("EURUSD"));
order.side.set(proto::OrderSide::buy);
order.type.set(proto::OrderType::stop);
order.price.set(1.5);
order.volume.set(10.0);
order.set_end(order_begin);
order.fbe_shift(order.fbe_size());
writer.model.set_end(account_begin);
size_t serialized = writer.create_end(model_begin);
REQUIRE(serialized == writer.buffer().size());
REQUIRE(writer.verify());
writer.next(serialized);
REQUIRE(writer.model.fbe_offset() == (4 + writer.buffer().size()));
// Check the serialized FBE size
REQUIRE(writer.buffer().size() == 252);
// Access the account model in the FBE stream
FBE::proto::AccountModel reader;
REQUIRE(reader.model.fbe_offset() == 4);
reader.attach(writer.buffer());
REQUIRE(reader.verify());
int32_t id;
std::string name;
proto::State state;
std::string wallet_currency;
double wallet_amount;
std::string asset_wallet_currency;
double asset_wallet_amount;
account_begin = reader.model.get_begin();
reader.model.id.get(id);
REQUIRE(id == 1);
reader.model.name.get(name);
REQUIRE(name == "Test");
reader.model.state.get(state);
REQUIRE((state | proto::State::good));
wallet_begin = reader.model.wallet.get_begin();
reader.model.wallet.currency.get(wallet_currency);
REQUIRE(wallet_currency == "USD");
reader.model.wallet.amount.get(wallet_amount);
REQUIRE(wallet_amount == 1000.0);
reader.model.wallet.get_end(wallet_begin);
REQUIRE(reader.model.asset.has_value());
asset_begin = reader.model.asset.get_begin();
asset_wallet_begin = reader.model.asset.value.get_begin();
reader.model.asset.value.currency.get(asset_wallet_currency);
REQUIRE(asset_wallet_currency == "EUR");
reader.model.asset.value.amount.get(asset_wallet_amount);
REQUIRE(asset_wallet_amount == 100.0);
reader.model.asset.value.get_end(asset_wallet_begin);
reader.model.asset.get_end(asset_begin);
REQUIRE(reader.model.orders.size() == 3);
int order_id;
std::string order_symbol;
proto::OrderSide order_side;
proto::OrderType order_type;
double order_price;
double order_volume;
auto o1 = reader.model.orders[0];
order_begin = o1.get_begin();
o1.id.get(order_id);
REQUIRE(order_id == 1);
o1.symbol.get(order_symbol);
REQUIRE(order_symbol == "EURUSD");
o1.side.get(order_side);
REQUIRE(order_side == proto::OrderSide::buy);
o1.type.get(order_type);
REQUIRE(order_type == proto::OrderType::market);
o1.price.get(order_price);
REQUIRE(order_price == 1.23456);
o1.volume.get(order_volume);
REQUIRE(order_volume == 1000.0);
o1.get_end(order_begin);
auto o2 = reader.model.orders[1];
order_begin = o2.get_begin();
o2.id.get(order_id);
REQUIRE(order_id == 2);
o2.symbol.get(order_symbol);
REQUIRE(order_symbol == "EURUSD");
o2.side.get(order_side);
REQUIRE(order_side == proto::OrderSide::sell);
o2.type.get(order_type);
REQUIRE(order_type == proto::OrderType::limit);
o2.price.get(order_price);
REQUIRE(order_price == 1.0);
o2.volume.get(order_volume);
REQUIRE(order_volume == 100.0);
o1.get_end(order_begin);
auto o3 = reader.model.orders[2];
order_begin = o3.get_begin();
o3.id.get(order_id);
REQUIRE(order_id == 3);
o3.symbol.get(order_symbol);
REQUIRE(order_symbol == "EURUSD");
o3.side.get(order_side);
REQUIRE(order_side == proto::OrderSide::buy);
o3.type.get(order_type);
REQUIRE(order_type == proto::OrderType::stop);
o3.price.get(order_price);
REQUIRE(order_price == 1.5);
o3.volume.get(order_volume);
REQUIRE(order_volume == 10.0);
o1.get_end(order_begin);
reader.model.get_end(account_begin);
}
| 2,359 |
335 |
{
"word": "Purse",
"definitions": [
"A small pouch of leather or plastic used for carrying money, typically by a woman.",
"The money possessed by or available to a person or country.",
"A sum of money given as a prize in a sporting contest, especially a boxing match.",
"A handbag."
],
"parts-of-speech": "Noun"
}
| 130 |
353 |
<reponame>codeproject/DeepStack<gh_stars>100-1000
import torch._C
import torch._jit_internal as _jit_internal
import torch.jit.annotations
import torch.testing
import torch.jit._recursive
from torch.jit._recursive import ScriptMethodStub, wrap_cpp_module
from torch.jit._builtins import _find_builtin, _get_builtin_table, _register_builtin # noqa
from torch._jit_internal import Future, _qualified_name
from torch.autograd import Variable, function
from torch.jit.frontend import get_jit_class_def, get_jit_def, get_default_args
from torch.nn import Module
from torch.serialization import validate_cuda_device
from torch._six import PY37, with_metaclass, string_classes, get_function_from_type
from torch.utils import set_module
from torch.autograd.grad_mode import _DecoratorContextManager
from typing import Optional, List
import collections
import contextlib
import copy
import functools
import inspect
import os
import pathlib
import pickle
import re
import sys
import textwrap
import warnings
import weakref
# These are imported so users can access them from the `torch.jit` module
from torch._jit_internal import Final, _overload, _overload_method
from torch._jit_internal import ignore, export, unused
def _parse_env(name, default, true_message, false_message):
value = os.environ.get(name)
if value is None:
return default
if value.lower() in {'1', 'true', 'yes'}:
return True
elif value.lower() in {'0', 'false', 'no'}:
return False
if value == '1v':
print(true_message)
return True
elif value == '0v':
print(false_message)
return False
raise ValueError('Unknown setting of {}. Try using 0 or 1.'.format(name))
_enabled = _parse_env('PYTORCH_JIT', True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED")
_flatten = torch._C._jit_flatten
_unflatten = torch._C._jit_unflatten
_jit_script_class_compile = torch._C._jit_script_class_compile
# The Python CompilationUnit. All functions and modules defined in Python will
# live in here. It's defined in Python because doing in cpp creates static
# destruction order issues.
_python_cu = torch._C.CompilationUnit()
set_module(Future, "torch.jit")
_fork = torch._C.fork
_wait = torch._C.wait
if _enabled:
Attribute = collections.namedtuple('Attribute', ['value', 'type'])
else:
def Attribute(value, type):
return value
@contextlib.contextmanager
def optimized_execution(should_optimize):
"""
A context manager that controls whether the JIT's executor will run
optimizations before executing a function.
"""
stored_flag = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(should_optimize)
try:
yield
finally:
torch._C._set_graph_executor_optimize(stored_flag)
@contextlib.contextmanager
def fuser(name):
"""
A context manager that facilitates switching between
backend fusers.
Valid names:
* ``fuser0`` - enables only legacy fuser
* ``fuser1`` - enables only NNC
* ``fuser2`` - enables only nvFuser
"""
old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
old_nvfuser_state = torch._C._jit_nvfuser_enabled()
if name == 'fuser0': # legacy fuser
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser1': # NNC
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
old_profiling_mode = torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser2': # nvFuser
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
else:
raise Exception("unrecognized fuser option")
try:
yield
finally:
if name == 'fuser1': # NNC
torch._C._jit_set_profiling_executor(old_profiling_executor)
torch._C._jit_set_profiling_mode(old_profiling_mode)
# recover the previous values
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
DEFAULT_EXTRA_FILES_MAP = torch._C.ExtraFilesMap()
def save(m, f, _extra_files=DEFAULT_EXTRA_FILES_MAP):
r"""
Save an offline version of this module for use in a separate process. The
saved module serializes all of the methods, submodules, parameters, and
attributes of this module. It can be loaded into the C++ API using
``torch::jit::load(filename)`` or into the Python API with
:func:`torch.jit.load <torch.jit.load>`.
To be able to save a module, it must not make any calls to native Python
functions. This means that all submodules must be subclasses of
:class:`ScriptModule` as well.
.. DANGER::
All modules, no matter their device, are always loaded onto the CPU
during loading. This is different from :func:`torch.load`'s semantics
and may change in the future.
Arguments:
m: A :class:`ScriptModule` to save.
f: A file-like object (has to implement write and flush) or a string
containing a file name.
_extra_files: Map from filename to contents which will be stored as part of `f`.
.. note::
torch.jit.save attempts to preserve the behavior of some operators
across versions. For example, dividing two integer tensors in
PyTorch 1.5 performed floor division, and if the module
containing that code is saved in PyTorch 1.5 and loaded in PyTorch 1.6
its division behavior will be preserved. The same module saved in
PyTorch 1.6 will fail to load in PyTorch 1.5, however, since the
behavior of division changed in 1.6, and 1.5 does not know how to
replicate the 1.6 behavior.
Example:
.. testcode::
import torch
import io
class MyModule(torch.nn.Module):
def forward(self, x):
return x + 10
m = torch.jit.script(MyModule())
# Save to file
torch.jit.save(m, 'scriptmodule.pt')
# This line is equivalent to the previous
m.save("scriptmodule.pt")
# Save to io.BytesIO buffer
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# Save with extra files
extra_files = torch._C.ExtraFilesMap()
extra_files['foo.txt'] = 'bar'
torch.jit.save(m, 'scriptmodule.pt', _extra_files=extra_files)
"""
if isinstance(f, str) or isinstance(f, pathlib.Path):
m.save(f, _extra_files=_extra_files)
else:
ret = m.save_to_buffer(_extra_files=_extra_files)
f.write(ret)
def load(f, map_location=None, _extra_files=DEFAULT_EXTRA_FILES_MAP):
r"""
Load a :class:`ScriptModule` or :class:`ScriptFunction` previously
saved with :func:`torch.jit.save <torch.jit.save>`
All previously saved modules, no matter their device, are first loaded onto CPU,
and then are moved to the devices they were saved from. If this fails (e.g.
because the run time system doesn't have certain devices), an exception is
raised.
Arguments:
f: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
map_location (string or torch.device): A simplified version of
``map_location`` in `torch.jit.save` used to dynamically remap
storages to an alternative set of devices.
_extra_files (dictionary of filename to content): The extra
filenames given in the map would be loaded and their content
would be stored in the provided map.
Returns:
A :class:`ScriptModule` object.
Example:
.. testcode::
import torch
import io
torch.jit.load('scriptmodule.pt')
# Load ScriptModule from io.BytesIO object
with open('scriptmodule.pt', 'rb') as f:
buffer = io.BytesIO(f.read())
# Load all tensors to the original device
torch.jit.load(buffer)
# Load all tensors onto CPU, using a device
buffer.seek(0)
torch.jit.load(buffer, map_location=torch.device('cpu'))
# Load all tensors onto CPU, using a string
buffer.seek(0)
torch.jit.load(buffer, map_location='cpu')
# Load with extra files.
extra_files = torch._C.ExtraFilesMap()
extra_files['foo.txt'] = 'bar'
torch.jit.load('scriptmodule.pt', _extra_files=extra_files)
print(extra_files['foo.txt'])
.. testoutput::
:hide:
...
.. testcleanup::
import os
os.remove("scriptmodule.pt")
"""
if isinstance(f, string_classes):
if not os.path.exists(f):
raise ValueError("The provided filename {} does not exist".format(f))
if os.path.isdir(f):
raise ValueError("The provided filename {} is a directory".format(f))
map_location = validate_map_location(map_location)
cu = torch._C.CompilationUnit()
if isinstance(f, str) or isinstance(f, pathlib.Path):
cpp_module = torch._C.import_ir_module(cu, f, map_location, _extra_files)
else:
cpp_module = torch._C.import_ir_module_from_buffer(cu, f.read(), map_location, _extra_files)
# TODO: Pretty sure this approach loses ConstSequential status and such
return torch.jit._recursive.wrap_cpp_module(cpp_module)
def validate_map_location(map_location=None):
if isinstance(map_location, str):
map_location = torch.device(map_location)
elif not (map_location is None or
isinstance(map_location, torch.device)):
raise ValueError("map_location should be either None, string or torch.device, "
"but got type: " + str(type(map_location)))
if (str(map_location).startswith('cuda')):
validate_cuda_device(map_location)
return map_location
def export_opnames(m):
r"""
Returns a list of operator names of a script module and its submodules
"""
return torch._C._export_opnames(m._c)
def _get_trace_graph(f, args=(), kwargs=None, strict=True, _force_outplace=False,
return_inputs=False, _return_inputs_states=False):
"""
.. warning::
This function is internal-only and should only be used by the ONNX
exporter. If you are trying to get a graph through tracing, please go
through the public API instead::
trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
trace_graph = trace.graph
Trace a function or model, returning a tuple consisting of the both the
*trace* of an execution, as well as the original return value. If return_inputs,
also returns the trace inputs as part of the tuple
Tracing is guaranteed not to change the semantics of the function/module
that is traced.
Arguments:
f (torch.nn.Module or function): the function or module
to be traced.
args (tuple or Tensor): the positional arguments to pass to the
function/module to be traced. A non-tuple is assumed to
be a single positional argument to be passed to the model.
kwargs (dict): the keyword arguments to pass to the function/module
to be traced.
Example (trace a cell):
.. testcode::
trace = torch.jit.trace(nn.LSTMCell(), (input, hidden))
"""
if kwargs is None:
kwargs = {}
if not isinstance(args, tuple):
args = (args,)
outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
return outs
def _unique_state_dict(module, keep_vars=False):
# since Parameter.detach() always creates a new torch.Tensor instance,
# id(v) doesn't work with it. So we always get the Parameter or Buffer
# as values, and deduplicate the params using Parameters and Buffers
state_dict = module.state_dict(keep_vars=True)
filtered_dict = type(state_dict)()
seen_ids = set()
for k, v in state_dict.items():
if id(v) in seen_ids:
continue
seen_ids.add(id(v))
if keep_vars:
filtered_dict[k] = v
else:
filtered_dict[k] = v.detach()
return filtered_dict
def _create_interpreter_name_lookup_fn(frames_up=1):
def _get_interpreter_name_for_var(var):
frame = inspect.currentframe()
i = 0
while i < frames_up + 1:
frame = frame.f_back
i += 1
f_locals = frame.f_locals
f_globals = frame.f_globals
for k, v in f_locals.items():
if isinstance(v, torch.Tensor) and var is v:
return k if k != 'self' else ''
return ''
return _get_interpreter_name_for_var
class ConstMap:
def __init__(self, const_mapping):
self.const_mapping = const_mapping
def __getattr__(self, attr):
return self.const_mapping[attr]
class ONNXTracedModule(Module):
def __init__(self, inner, strict=True, force_outplace=False, return_inputs=False, return_inputs_states=False):
super(ONNXTracedModule, self).__init__()
# inner may be a Module, or it may be an arbitrary callable
# If it's a Module, we get its parameters automatically, which lets
# us avoid a special casing functions versus modules.
self.inner = inner
self.strict = strict
self._force_outplace = force_outplace
self._return_inputs = return_inputs
self._return_inputs_states = return_inputs_states
def forward(self, *args):
in_vars, in_desc = _flatten(args)
# NOTE: use full state, because we need it for BatchNorm export
# This differs from the compiler path, which doesn't support it at the moment.
module_state = list(_unique_state_dict(self, keep_vars=True).values())
ret_inputs = []
inputs_states = []
outs = []
def wrapper(*args):
trace_inputs = _unflatten(args[:len(in_vars)], in_desc)
ret_inputs.append(tuple(x.clone(memory_format=torch.preserve_format) for x in args))
if self._return_inputs_states:
inputs_states.append(_unflatten(args[:len(in_vars)], in_desc))
outs.append(self.inner(*trace_inputs))
if self._return_inputs_states:
inputs_states[0] = (inputs_states[0], trace_inputs)
out_vars, _ = _flatten(outs)
if len(out_vars) == 1:
return out_vars[0]
else:
return tuple(out_vars)
graph, out = torch._C._create_graph_by_tracing(
wrapper,
in_vars + module_state,
_create_interpreter_name_lookup_fn(),
self.strict,
self._force_outplace,
)
if self._return_inputs:
return graph, outs[0], ret_inputs[0]
if self._return_inputs_states:
return graph, outs[0], inputs_states[0]
else:
return graph, outs[0]
def _clone_inputs(args):
def clone_input(a):
if a is None:
return None
elif isinstance(a, torch.Tensor):
# TODO: figure out one liner to .clone() and set requires_grad
v = a.detach().clone(memory_format=torch.preserve_format).requires_grad_(a.requires_grad)
if a.grad is not None:
v.grad = clone_input(v.grad)
return v
else:
return a.clone(memory_format=torch.preserve_format)
return function._nested_map(lambda x: isinstance(x, torch.Tensor),
clone_input, condition_msg="tensors")(args)
# This is purely for developer debugging. We are not going to advertise it.
_JIT_TIME = os.environ.get('PYTORCH_JIT_TIME', False) # CUDA-only timing
_JIT_DISABLE = os.environ.get('PYTORCH_JIT_DISABLE', False)
_JIT_STATS = os.environ.get('PYTORCH_JIT_STATS', False)
@contextlib.contextmanager
def _time(trace_name, name, time=True):
if (not _JIT_TIME and not time) or not torch.cuda.is_available():
yield
return
stream = torch.cuda.current_stream()
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
yield
finally:
stream.record_event(end)
end.synchronize()
print("{} {} time: {} ms".format(trace_name, name, start.elapsed_time(end)))
def verify(model, args, loss_fn=torch.sum, devices=None):
"""
Verify that a JIT compiled model has the same behavior as its uncompiled
version along with its backwards pass. If your model returns multiple
outputs, you must also specify a `loss_fn` to produce a loss for which
the backwards will be computed.
This function has side-effects (e.g., it executes your model / saves and loads
parameters), so don't expect the model to come out exactly the same as what
you passed in.
Arguments:
model (compiled torch.nn.Module or function): the module/function to be
verified. The module/function definition MUST have been decorated with
`@torch.jit.compile`.
args (tuple or Tensor): the positional arguments to pass to the
compiled function/module to be verified. A non-tuple is assumed to
be a single positional argument to be passed to the model.
loss_fn (function, optional): the loss function to be applied to
the output of the model, before backwards is invoked. By default,
we assume that a model returns a single result, and we :func:`torch.sum`
before calling backwards; if this is inappropriate, you can pass your
own loss function. Note that if a model returns a tuple of results,
these are passed as separate positional arguments to `loss_fn`.
devices (iterable of device IDs, optional): the GPU devices which the
compiled module will be run on. This determines the RNG state we
must save when running both compiled and uncompiled versions of the model.
"""
# TODO: In principle, we track device information in our trace, so it
# should be possible to check if our execution actually obeyed the 'devices'
# the user provided.
# TODO: Consider adding a utility function to torch.jit to test
# for this case
if not isinstance(model, torch._C.CompiledFunction):
raise TypeError("Cannot verify an uncompiled module. Add @torch.jit.compile to compile it")
is_module = isinstance(model, Module)
if not isinstance(args, tuple):
args = (args,)
saved_args = _clone_inputs(args)
if is_module:
saved_state = copy.deepcopy(model.state_dict())
def run_fwd_bwd(args, force_trace=False, assert_compiled=False):
params = list(model.parameters()) if is_module else []
in_vars, _ = _flatten((args, params))
# We use a special API to reset the trace and compile it from scratch.
compiled_fn = model
if force_trace:
compiled_fn.clear_cache()
if assert_compiled:
hits = compiled_fn.hits
out = model(*args)
if assert_compiled and compiled_fn.hits == hits:
raise RuntimeError("failed to use the compiled function")
if not isinstance(out, tuple):
out = (out, )
if loss_fn == torch.sum and len(out) != 1:
raise ValueError(("Model returns {} outputs, but default loss function "
"(torch.sum) can only handle a single output").format(len(out)))
out_vars, _ = _flatten(out)
saved_outs = [v.detach().clone(memory_format=torch.preserve_format) for v in out_vars]
loss = loss_fn(*out)
grads = torch.autograd.grad([loss], in_vars)
# TODO: I'm not sure if the clone here is necessary but it is safer
saved_grads = [v.detach().clone(memory_format=torch.preserve_format) for v in grads]
return (saved_outs, saved_grads)
with torch.random.fork_rng(devices, _caller="torch.jit.verify"):
uncompiled_outs, uncompiled_grads = run_fwd_bwd(args, force_trace=True)
assert model.has_trace_for(*args)
if is_module:
model.load_state_dict(saved_state)
compiled_outs, compiled_grads = run_fwd_bwd(args, assert_compiled=True)
_verify_equal(uncompiled_outs, compiled_outs)
_verify_equal(uncompiled_grads, compiled_grads)
def _verify_equal(xs, ys):
for x, y in zip(xs, ys):
if x.sub(y).abs().max() > 1e-6:
raise RuntimeError("JIT and real computation mismatch")
def indent(s):
return '\n'.join(['\t' + line for line in s.splitlines()])
class TracingCheckError(Exception):
def __init__(self, graph_diff_error, tensor_compare_error, extra_msg=None):
self.message = 'Tracing failed sanity checks!\n'
if extra_msg is not None:
self.message += extra_msg + '\n'
if graph_diff_error is not None:
self.message += 'ERROR: Graphs differed across invocations!\n'
self.message += indent(graph_diff_error) + '\n'
if tensor_compare_error is not None:
self.message += 'ERROR: Tensor-valued Constant nodes differed in value ' \
'across invocations. This often indicates that the tracer has' \
' encountered untraceable code.\n'
self.message += indent(tensor_compare_error) + '\n'
super(TracingCheckError, self).__init__(self.message)
# Check the traced module against a set of user-provided validation inputs
@torch.no_grad()
def _check_trace(check_inputs, func, traced_func, check_tolerance, strict,
force_outplace, is_trace_module, _module_class):
# Note: tracing is independent of optimizations, which consume the trace
for inputs in check_inputs:
if isinstance(inputs, torch.Tensor):
inputs = (inputs,)
if is_trace_module:
copied_dict = {}
for name, data in inputs.items():
copied_dict[name] = _clone_inputs(data)
check_mod = torch.jit.trace_module(
func.__self__ if hasattr(func, '__self__') else func,
copied_dict,
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
_compilation_unit=torch._C.CompilationUnit(),
)
check_mod_func = check_mod._c._get_method(traced_func.name)
inputs = inputs[traced_func.name]
if isinstance(inputs, (torch.Tensor, dict)):
inputs = (inputs,)
else:
check_mod = torch.jit.trace(
func,
_clone_inputs(inputs),
check_trace=False,
strict=strict,
_force_outplace=force_outplace,
_module_class=_module_class,
)
check_mod_func = check_mod
def graph_diagnostic_info():
mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph)
torch._C._jit_pass_inline(mod_canonicalized)
torch._C._jit_pass_erase_shape_information(mod_canonicalized)
mod_str = str(mod_canonicalized)
mod_str = re.sub(r'___torch_mangle_[0-9]+\.', '', mod_str)
check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph)
torch._C._jit_pass_inline(check_canonicalized)
torch._C._jit_pass_erase_shape_information(check_canonicalized)
check_str = str(check_canonicalized)
check_str = re.sub(r'___torch_mangle_[0-9]+\.', '', check_str)
graph_diff_errors = None
if mod_str != check_str:
import difflib
graph_diff = difflib.ndiff(mod_str.splitlines(True),
check_str.splitlines(True))
graph_diff_errors = 'Graph diff:\n' + indent(''.join(graph_diff)) + '\n'
for n_mod, n_check in zip(mod_canonicalized.nodes(), check_canonicalized.nodes()):
if str(n_mod) != str(n_check):
graph_diff_errors += 'First diverging operator:\n'
node_diff = difflib.ndiff(str(n_mod).splitlines(True),
str(n_check).splitlines(True))
source_printout = 'Node diff:\n' + indent(''.join(node_diff)) + '\n'
mod_stack = n_mod.sourceRange()
if mod_stack:
source_printout += 'Trace source location:\n' + indent(mod_stack) + '\n'
check_stack = n_check.sourceRange()
if check_stack:
source_printout += 'Check source location:\n' + indent(check_stack) + '\n'
graph_diff_errors += source_printout
break # For now, only print out the first pair of nodes that diverges
tensor_compare_errors = None
# Check Tensor-valued constant nodes
for n_mod, n_check in zip(mod_canonicalized.nodes(), check_canonicalized.nodes()):
if n_mod.kind() != n_check.kind():
break # Graphs have already diverged
if n_mod.kind() == 'prim::Constant' and not (n_mod.mustBeNone() or n_check.mustBeNone()):
if not n_mod.hasAttribute('value'):
continue
if n_mod.kindOf('value') != 't' or n_check.kindOf('value') != 't':
continue
mod_tensor_val = n_mod.t('value')
check_tensor_val = n_check.t('value')
try:
torch.testing.assert_allclose(mod_tensor_val, check_tensor_val)
except (RuntimeError, AssertionError) as e:
if tensor_compare_errors is None:
tensor_compare_errors = ''
tensor_compare_errors += 'Node:\n' + indent(str(n_mod)) + '\n'
compare_stack = n_mod.sourceRange()
if compare_stack:
tensor_compare_errors += 'Source Location:\n' + indent(compare_stack) + '\n'
tensor_compare_errors += 'Comparison exception: ' + indent(str(e))
break # For now, only print the first diverging pair
return graph_diff_errors, tensor_compare_errors
def wrap_retval(x):
return x if isinstance(x, tuple) else (x,)
def run_mod_and_filter_tensor_outputs(mod, inputs, running_what):
try:
outs = wrap_retval(mod(*_clone_inputs(inputs)))
outs = [out for out in outs if isinstance(out, torch.Tensor)]
return outs
except Exception as e:
raise TracingCheckError(*graph_diagnostic_info(),
extra_msg='Encountered an exception while running the ' + running_what +
' with test inputs.\nException:\n' + indent(str(e)))
has_warned = [False]
def maybe_warn_nondeterministic():
if has_warned[0]:
return
has_warned[0] = True
nondeterm_ops = [op for op in traced_func.graph.nodes() if op.isNondeterministic()]
if len(nondeterm_ops) > 0:
nondeterministic_ops_warning = "Trace had nondeterministic nodes. "
nondeterministic_ops_warning += "Did you forget call .eval() on your model? Nodes:\n"
nondeterministic_ops_warning += "\n".join([indent(str(op)) for op in nondeterm_ops][:20])
nondeterministic_ops_warning += "\nThis may cause errors in trace checking. To disable trace checking,"\
" pass check_trace=False to torch.jit.trace()"
warnings.warn(nondeterministic_ops_warning, category=TracerWarning, stacklevel=5)
def compare_outputs(original, reference, match_what):
all_ok = True
for i, (orig, ref) in enumerate(zip(original, reference)):
try:
if orig.is_quantized:
orig = orig.dequantize()
if ref.is_quantized:
ref = ref.dequantize()
torch.testing.assert_allclose(orig.double(), ref.double(), rtol=check_tolerance,
atol=torch.testing._get_default_tolerance(orig, ref)[1])
except AssertionError as e:
maybe_warn_nondeterministic()
warnings.warn('Output nr ' + str(i + 1) + '. of the traced function does not match '
'the corresponding output of the ' + match_what + '. Detailed error:\n' + str(e),
category=TracerWarning, stacklevel=4)
all_ok = False
return all_ok
traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, 'trace')
fn_outs = run_mod_and_filter_tensor_outputs(func, inputs, 'Python function')
if compare_outputs(traced_outs, fn_outs, 'Python function'):
check_outs = run_mod_and_filter_tensor_outputs(check_mod_func, inputs, 'repeated trace')
compare_outputs(traced_outs, check_outs, 'repeated trace')
diag_info = graph_diagnostic_info()
if any(info is not None for info in diag_info):
raise TracingCheckError(*diag_info)
class TracerWarning(Warning):
@staticmethod
def ignore_lib_warnings():
# We ignore warnings from all submodules excluding the JIT, because we need them e.g. for _check_trace
warnings.filterwarnings('ignore', category=TracerWarning, module='torch.(?!jit)')
# We ignore the tracer warnings coming form inside the library, because all our shape
# checks in nn will trigger them.
TracerWarning.ignore_lib_warnings()
torch._C._tracer_warn_use_python()
def make_tuple(example_inputs):
if isinstance(example_inputs, (torch.Tensor, dict)):
return (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
if not isinstance(example_inputs, tuple):
return tuple(example_inputs)
return example_inputs
def make_module(mod, _module_class, _compilation_unit):
if isinstance(mod, ScriptModule):
return mod
elif torch._jit_internal.module_has_exports(mod):
def make_stubs_from_exported_methods(mod):
exported = []
for name in dir(mod):
item = getattr(mod, name, None)
if torch._jit_internal.get_torchscript_modifier(item) is _jit_internal.FunctionModifiers.EXPORT:
exported.append(name)
stubs = []
for method in exported:
stubs.append(torch.jit._recursive.make_stub_from_method(mod, method))
return stubs
return torch.jit._recursive.create_script_module(mod, make_stubs_from_exported_methods, share_types=False)
else:
if _module_class is None:
_module_class = TopLevelTracedModule
return _module_class(mod, _compilation_unit=_compilation_unit)
def wrap_check_inputs(check_inputs):
if check_inputs is None:
return None
return [{'forward' : c} for c in check_inputs]
def trace(func,
example_inputs,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu):
"""
Trace a function and return an executable or :class:`ScriptFunction`
that will be optimized using just-in-time compilation. Tracing is ideal for
code that operates only on ``Tensor``\\s and lists, dictionaries, and
tuples of ``Tensor``\\s.
Using `torch.jit.trace` and `torch.jit.trace_module`, you can turn an
existing module or Python function into a TorchScript
:class:`ScriptFunction` or :class:`ScriptModule`. You must provide example
inputs, and we run the function, recording the operations performed on all
the tensors.
* The resulting recording of a standalone function produces `ScriptFunction`.
* The resulting recording of `nn.Module.forward` or `nn.Module` produces
`ScriptModule`.
This module also contains any parameters that the original
module had as well.
Warning:
Tracing only correctly records functions and modules which are not data
dependent (e.g., do not have conditionals on data in tensors) and do not have
any untracked external dependencies (e.g., perform input/output or
access global variables). Tracing only records operations done when the given
function is run on the given tensors. Therefore, the returned
`ScriptModule` will always run the same traced graph on any input. This
has some important implications when your module is expected to run
different sets of operations, depending on the input and/or the module
state. For example,
* Tracing will not record any control-flow like if-statements or loops.
When this control-flow is constant across your module, this is fine
and it often inlines the control-flow decisions. But sometimes the
control-flow is actually part of the model itself. For instance, a
recurrent network is a loop over the (possibly dynamic) length of an
input sequence.
* In the returned :class:`ScriptModule`, operations that have different
behaviors in ``training`` and ``eval`` modes will always behave as if
it is in the mode it was in during tracing, no matter which mode the
`ScriptModule` is in.
In cases like these, tracing would not be appropriate and
:func:`scripting <torch.jit.script>` is a better choice. If you trace
such models, you may silently get incorrect results on subsequent
invocations of the model. The tracer will try to emit warnings when
doing something that may cause an incorrect trace to be produced.
Arguments:
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
that will be run with `example_inputs`. `func` arguments and return
values must be tensors or (possibly nested) tuples that contain
tensors. When a module is passed `torch.jit.trace`, only the
``forward`` method is run and traced (see :func:`torch.jit.trace
<torch.jit.trace_module>` for details).
example_inputs (tuple): A tuple of example inputs that will be passed
to the function while tracing. The resulting trace can be run with
inputs of different types and shapes assuming the traced operations
support those types and shapes. `example_inputs` may also be a
single Tensor in which case it is automatically wrapped in a tuple.
Keyword arguments:
check_trace (bool, optional): Check if the same inputs run through
traced code produce the same outputs. Default: ``True``. You might want
to disable this if, for example, your network contains non-
deterministic ops or if you are sure that the network is correct despite
a checker failure.
check_inputs (list of tuples, optional): A list of tuples of input
arguments that should be used to check the trace against what is
expected. Each tuple is equivalent to a set of input arguments that
would be specified in ``example_inputs``. For best results, pass in
a set of checking inputs representative of the space of shapes and
types of inputs you expect the network to see. If not specified,
the original ``example_inputs`` are used for checking
check_tolerance (float, optional): Floating-point comparison tolerance
to use in the checker procedure. This can be used to relax the
checker strictness in the event that results diverge numerically
for a known reason, such as operator fusion.
strict (bool, optional): run the tracer in a strict mode or not
(default: ``True``). Only turn this off when you want the tracer to
record your mutable container types (currently ``list``/``dict``)
and you are sure that the containuer you are using in your
problem is a ``constant`` structure and does not get used as
control flow (if, for) conditions.
Returns:
If `func` is `nn.Module` or ``forward`` of `nn.Module`, `trace` returns
a :class:`ScriptModule` object with a single ``forward`` method
containing the traced code. The returned `ScriptModule` will
have the same set of sub-modules and parameters as the original
``nn.Module``. If ``func`` is a standalone function, ``trace``
returns `ScriptFunction`.
Example (tracing a function):
.. testcode::
import torch
def foo(x, y):
return 2 * x + y
# Run `foo` with the provided inputs and record the tensor operations
traced_foo = torch.jit.trace(foo, (torch.rand(3), torch.rand(3)))
# `traced_foo` can now be run with the TorchScript interpreter or saved
# and loaded in a Python-free environment
Example (tracing an existing module)::
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
n = Net()
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
# Trace a specific method and construct `ScriptModule` with
# a single `forward` method
module = torch.jit.trace(n.forward, example_forward_input)
# Trace a module (implicitly traces `forward`) and construct a
# `ScriptModule` with a single `forward` method
module = torch.jit.trace(n, example_forward_input)
"""
if not _enabled:
return func
if optimize is not None:
warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead")
if isinstance(func, torch.jit.ScriptModule):
# it is hard to trace it because the forward method on ScriptModule is already defined, so it
# would result in an error.
warnings.warn('The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is.')
return func
if isinstance(func, torch.nn.Module):
return trace_module(func, {'forward': example_inputs}, None,
check_trace, wrap_check_inputs(check_inputs),
check_tolerance, strict, _force_outplace, _module_class)
if (hasattr(func, '__self__') and isinstance(func.__self__, torch.nn.Module) and
func.__name__ == 'forward'):
return trace_module(func.__self__, {'forward': example_inputs}, None,
check_trace, wrap_check_inputs(check_inputs),
check_tolerance, strict, _force_outplace, _module_class)
# Special case for common case of passing a single Tensor
if isinstance(example_inputs, (torch.Tensor, dict)):
example_inputs = (example_inputs,)
# done primarily so that weird iterables fail here and not pybind11 code
elif not isinstance(example_inputs, tuple):
example_inputs = tuple(example_inputs)
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
if (hasattr(func, '__self__') and isinstance(func.__self__, torch.nn.Module)):
raise AttributeError("trace doesn't support compiling individual module's functions.\n"
"Please use trace_module")
name = _qualified_name(func)
traced = torch._C._create_function_from_trace(name, func, example_inputs,
var_lookup_fn,
strict,
_force_outplace)
# Check the trace against new traces created from user-specified inputs
if check_trace:
if check_inputs is not None:
_check_trace(check_inputs, func, traced, check_tolerance, strict, _force_outplace, False, _module_class)
else:
_check_trace([example_inputs], func, traced, check_tolerance, strict, _force_outplace, False, _module_class)
return traced
_trace_module_map = None
def trace_module(mod,
inputs,
optimize=None,
check_trace=True,
check_inputs=None,
check_tolerance=1e-5,
strict=True,
_force_outplace=False,
_module_class=None,
_compilation_unit=_python_cu):
"""
Trace a module and return an executable :class:`ScriptModule` that will be optimized
using just-in-time compilation. When a module is passed to :func:`torch.jit.trace <torch.jit.trace>`, only
the ``forward`` method is run and traced. With ``trace_module``, you can specify a dictionary of
method names to example inputs to trace (see the ``example_inputs``) argument below.
See :func:`torch.jit.trace <torch.jit.trace>` for more information on tracing.
Arguments:
mod (torch.nn.Module): A ``torch.nn.Module`` containing methods whose names are
specified in ``example_inputs``. The given methods will be compiled
as a part of a single `ScriptModule`.
example_inputs (dict): A dict containing sample inputs indexed by method names in ``mod``.
The inputs will be passed to methods whose names correspond to inputs'
keys while tracing.
``{ 'forward' : example_forward_input, 'method2': example_method2_input}``
Keyword arguments:
check_trace (``bool``, optional): Check if the same inputs run through
traced code produce the same outputs. Default: ``True``. You might want
to disable this if, for example, your network contains non-
deterministic ops or if you are sure that the network is correct despite
a checker failure.
check_inputs (list of dicts, optional): A list of dicts of input arguments that should be used
to check the trace against what is expected. Each tuple
is equivalent to a set of input arguments that would
be specified in ``example_inputs``. For best results, pass in a
set of checking inputs representative of the space of
shapes and types of inputs you expect the network to see.
If not specified, the original ``example_inputs`` are used for checking
check_tolerance (float, optional): Floating-point comparison tolerance to use in the checker procedure.
This can be used to relax the checker strictness in the event that
results diverge numerically for a known reason, such as operator fusion.
Returns:
A :class:`ScriptModule` object with a single ``forward`` method containing the traced code.
When ``func`` is a ``torch.nn.Module``, the returned :class:`ScriptModule` will have the same set of
sub-modules and parameters as ``func``.
Example (tracing a module with multiple methods)::
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
def weighted_kernel_sum(self, weight):
return weight * self.conv.weight
n = Net()
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
# Trace a specific method and construct `ScriptModule` with
# a single `forward` method
module = torch.jit.trace(n.forward, example_forward_input)
# Trace a module (implicitly traces `forward`) and construct a
# `ScriptModule` with a single `forward` method
module = torch.jit.trace(n, example_forward_input)
# Trace specific methods on a module (specified in `inputs`), constructs
# a `ScriptModule` with `forward` and `weighted_kernel_sum` methods
inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
module = torch.jit.trace_module(n, inputs)
"""
if not _enabled:
return mod
if optimize is not None:
warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead")
var_lookup_fn = _create_interpreter_name_lookup_fn(0)
if not isinstance(mod, torch.nn.Module):
raise AttributeError("expected torch.nn.Module as the first argument")
if not isinstance(inputs, dict):
raise AttributeError("expected a dictionary of (method_name, input) pairs")
old_module_map = torch.jit._trace_module_map
try:
torch.jit._trace_module_map = {}
def register_submods(mod, prefix):
for name, child in mod.named_children():
submod_qualname = prefix + '.' + name
torch.jit._trace_module_map[child] = submod_qualname
register_submods(child, submod_qualname)
torch.jit._trace_module_map['__module'] = mod
register_submods(mod, '__module')
module = make_module(mod, _module_class, _compilation_unit)
for method_name, example_inputs in inputs.items():
# this is needed since Module.__call__ sets up some extra tracing
func = mod if method_name == "forward" else getattr(mod, method_name)
example_inputs = make_tuple(example_inputs)
module._c._create_method_from_trace(method_name, func, example_inputs, var_lookup_fn, strict, _force_outplace)
check_trace_method = module._c._get_method(method_name)
# Check the trace against new traces created from user-specified inputs
if check_trace:
if check_inputs is not None:
_check_trace(check_inputs, func, check_trace_method,
check_tolerance, strict, _force_outplace, True, _module_class)
else:
_check_trace([inputs], func, check_trace_method,
check_tolerance, strict, _force_outplace, True, _module_class)
finally:
torch.jit._trace_module_map = old_module_map
return module
def fork(func, *args, **kwargs):
"""
Creates an asynchronous task executing `func` and a reference to the value
of the result of this execution. `fork` will return immediately,
so the return value of `func` may not have been computed yet. To force completion
of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
nested, and may be invoked with positional and keyword arguments.
Asynchronous execution will only occur when run in TorchScript. If run in pure python,
`fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
Warning:
`fork` tasks will execute non-deterministicly. We recommend only spawning
parallel fork tasks for pure functions that do not modify their inputs,
module attributes, or global state.
Arguments:
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
that will be invoked. If executed in TorchScript, it will execute asynchronously,
otherwise it will not. Traced invocations of fork will be captured in the IR.
*args, **kwargs: arguments to invoke `func` with.
Returns:
`torch.jit.Future[T]`: a reference to the execution of `func`. The value `T`
can only be accessed by forcing completion of `func` through `torch.jit.wait`.
Example (fork a free function):
.. testcode::
import torch
from torch import Tensor
def foo(a : Tensor, b : int) -> Tensor:
return a + b
def bar(a):
fut : torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2)
return torch.jit.wait(fut)
script_bar = torch.jit.script(bar)
input = torch.tensor(2)
# only the scripted version executes asynchronously
assert script_bar(input) == bar(input)
# trace is not run asynchronously, but fork is captured in IR
graph = torch.jit.trace(bar, (input,)).graph
assert "fork" in str(graph)
Example (fork a module method):
.. testcode::
import torch
from torch import Tensor
class SubMod(torch.nn.Module):
def forward(self, a: Tensor, b : int):
return a + b
class Mod(torch.nn.Module):
def __init__(self):
super(self).__init__()
self.mod = SubMod()
def forward(self, input):
fut = torch.jit.fork(self.mod, a, b=2)
return torch.jit.wait(fut)
input = torch.tensor(2)
mod = Mod()
assert mod(input) == torch.jit.script(mod).forward(input)
"""
return torch._C.fork(func, *args, **kwargs)
def wait(future):
"""
Forces completion of a `torch.jit.Future[T]` asynchronous task, returning the
result of the task. See :func:`~fork` for docs and examples.
Arguments:
func (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork`
Returns:
`T`: the return value of the the completed task
"""
return torch._C.wait(future)
def freeze(mod, preserved_attrs : Optional[List[str]] = None):
r"""
Freezing a :class:`ScriptModule` will clone it and attempt to inline the cloned
module's submodules, parameters, and attributes as constants in the TorchScript IR Graph.
By default, `forward` will be preserved, as well as attributes & methods specified in
`preserved_attrs`. Additionally, any attribute that is modified within a preserved
method will be preserved.
Freezing currently only accepts ScriptModules that are in eval mode.
Arguments:
mod (:class:`ScriptModule`): a module to be frozen
preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method.
Attributes modified in preserved methods will also be preserved.
Returns:
Frozen :class:`ScriptModule`.
Example (Freezing a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super(MyModule, self).__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mm(input)
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3).eval())
frozen_module = torch.jit.freeze(scripted_module)
# parameters have been removed and inlined into the Graph as constants
assert len(list(frozen_module.named_parameters())) == 0
# See the compiled graph as Python code
print(frozen_module.code)
Example (Freezing a module with preserved attributes)
.. testcode::
import torch
class MyModule2(torch.nn.Module):
def __init__(self):
super(MyModule2, self).__init__()
self.modified_tensor = torch.tensor(10.)
self.version = 1
def forward(self, input):
self.modified_tensor += 1
return input + self.modified_tensor
scripted_module = torch.jit.script(MyModule2().eval())
frozen_module = torch.jit.freeze(scripted_module, preserved_attrs=["version"])
# we've manually preserved `version`, so it still exists on the frozen module and can be modified
assert frozen_module.version == 1
frozen_module.version = 2
# `modified_tensor` is detected as being mutated in the forward, so freezing preserves
# it to retain model semantics
assert frozen_module(torch.tensor(1)) == torch.tensor(12)
# now that we've run it once, the next result will be incremented by one
assert frozen_module(torch.tensor(1)) == torch.tensor(13)
Note:
If you're not sure why an attribute is not being inlined as a constant, you can run
`dump_alias_db` on frozen_module.forward.graph to see if freezing has detected the
attribute is being modified.
"""
if not isinstance(mod, ScriptModule):
raise RuntimeError("Freezing expects a ScriptModule as input. "
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'.")
if mod.training:
raise RuntimeError("Freezing is currently only implemented for modules in eval mode. "
"Please call .eval() on your module before freezing.")
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
return out
class CompilationUnit(object):
def __init__(self, lang=None, _frames_up=0):
self._c = torch._C.CompilationUnit()
if lang is not None:
self.define(lang, _frames_up=_frames_up + 1)
def define(self, lang, rcb=None, _frames_up=0):
if not rcb:
rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
self._c.define(lang, rcb)
def __getattr__(self, attr):
r = self._c.find_function(attr)
if r is None:
raise AttributeError("'CompilationUnit' has no attribute '{}'".format(attr))
return r
def _try_get_dispatched_fn(fn):
if not callable(fn):
return None
return _jit_internal.boolean_dispatched.get(fn)
def _try_get_overloaded_fn(mod, field):
return mod._overloads.get(field, None) if isinstance(mod, ScriptModule) else None
class ScriptWarning(Warning):
pass
@contextlib.contextmanager
def _disable_emit_hooks():
hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
yield
torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
def _disable_emit_hooks_decorator(_DecoratorContextManager): # noqa: F811
def __enter__(self):
self.hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
def __exit__(self, *args):
torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
# ScriptClasses must be new-style classes because we construct them using their
# __new__ method.
def _is_new_style_class(cls):
if hasattr(cls, '__class__'):
return ('__dict__' in dir(cls) or hasattr(cls, '__slots__'))
def whichmodule(obj):
"""Find the module an object belong to."""
module_name = getattr(obj, '__module__', None)
# Protect the iteration by using a list copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr.
for name, module in list(sys.modules.items()):
if name == '__main__' or module is None:
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except AttributeError:
pass
return '__main__'
def _recursive_compile_class(obj, loc):
_qual_name = _qualified_name(obj)
# We're starting a new compilation, so update the error call stack in
# case it fails
error_stack = torch._C.CallStack(_qual_name, loc)
rcb = _jit_internal.createResolutionCallbackForClassMethods(obj)
_compile_and_register_class(obj, rcb, _qual_name)
def _compile_and_register_class(obj, rcb, qualified_name):
ast = get_jit_class_def(obj, obj.__name__)
_jit_script_class_compile(qualified_name, ast, rcb)
_add_script_class(obj, qualified_name)
def script(obj, optimize=None, _frames_up=0, _rcb=None):
r"""
Scripting a function or ``nn.Module`` will inspect the source code, compile
it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or
:class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all
features in Python work, but we provide enough functionality to compute on
tensors and do control-dependent operations. For a complete guide, see the
:ref:`language-reference`.
``torch.jit.script`` can be used as a function for modules and functions, and as a decorator
``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
Arguments:
obj (callable, class, or ``nn.Module``): The ``nn.Module``, function, or class type to
compile.
Returns:
If ``obj`` is ``nn.Module``, ``script`` returns
a :class:`ScriptModule` object. The returned :class:`ScriptModule` will
have the same set of sub-modules and parameters as the
original ``nn.Module``. If ``obj`` is a standalone function,
a :class:`ScriptFunction` will be returned.
**Scripting a function**
The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction`
by compiling the body of the function.
Example (scripting a function):
.. testcode::
import torch
@torch.jit.script
def foo(x, y):
if x.max() > y.max():
r = x
else:
r = y
return r
print(type(foo)) # torch.jit.ScriptFuncion
# See the compiled graph as Python code
print(foo.code)
# Call the function using the TorchScript interpreter
foo(torch.ones(2, 2), torch.ones(2, 2))
.. testoutput::
:hide:
...
**Scripting an nn.Module**
Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively
compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses
features supported in TorchScript, no changes to the original module code should be necessary. ``script``
will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of
the original module.
Example (scripting a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super(MyModule, self).__init__()
# This parameter will be copied to the new ScriptModule
self.weight = torch.nn.Parameter(torch.rand(N, M))
# When this submodule is used, it will be compiled
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mv(input)
# This calls the `forward` method of the `nn.Linear` module, which will
# cause the `self.linear` submodule to be compiled to a `ScriptModule` here
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3))
Example (scripting a module with traced submodules):
.. testcode::
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# torch.jit.trace produces a ScriptModule's conv1 and conv2
self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
def forward(self, input):
input = F.relu(self.conv1(input))
input = F.relu(self.conv2(input))
return input
scripted_module = torch.jit.script(MyModule())
To compile a method other than ``forward`` (and recursively compile anything it calls), add
the :func:`@torch.jit.export <torch.jit.export>` decorator to the method. To opt out of compilation
use :func:`@torch.jit.ignore <torch.jit.ignore>` or :func:`@torch.jit.unused <torch.jit.unused>`.
Example (an exported and ignored method in a module)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
@torch.jit.export
def some_entry_point(self, input):
return input + 10
@torch.jit.ignore
def python_only_fn(self, input):
# This function won't be compiled, so any
# Python APIs can be used
import pdb
pdb.set_trace()
def forward(self, input):
if self.training:
self.python_only_fn(input)
return input * 99
scripted_module = torch.jit.script(MyModule())
print(scripted_module.some_entry_point(torch.randn(2, 2)))
print(scripted_module(torch.randn(2, 2)))
"""
if not _enabled:
return obj
if optimize is not None:
warnings.warn("`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead")
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, torch.nn.Module):
return torch.jit._recursive.create_script_module(obj, torch.jit._recursive.infer_methods_to_compile)
qualified_name = _qualified_name(obj)
if inspect.isclass(obj):
# If this type is a `nn.Module` subclass, they probably meant to pass
# an instance instead of a Module
if issubclass(obj, torch.nn.Module):
raise RuntimeError("Type '{}' cannot be compiled since it inherits"
" from nn.Module,"
" pass an instance instead".format(obj))
if not _is_new_style_class(obj):
raise RuntimeError("TorchScript classes must be new-style classes. "
"Please inherit from 'object'.")
if len(obj.mro()) > 2:
raise RuntimeError("TorchScript classes does not support inheritance yet. "
"Please directly inherit from 'object'.")
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
_compile_and_register_class(obj, _rcb, qualified_name)
return obj
else:
# this is a decorated fn, and we need to the underlying fn and its rcb
if hasattr(obj, "__script_if_tracing_wrapper"):
obj = obj.__original_fn
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
_check_directly_compile_overloaded(obj)
maybe_already_compiled_fn = _try_get_jit_cached_function(obj)
if maybe_already_compiled_fn:
return maybe_already_compiled_fn
ast = get_jit_def(obj, obj.__name__)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
fn = torch._C._jit_script_compile(qualified_name, ast, _rcb, get_default_args(obj))
# Forward docstrings
fn.__doc__ = obj.__doc__
_set_jit_function_cache(obj, fn)
return fn
def interface(obj):
if not inspect.isclass(obj):
raise RuntimeError("interface must be applied to a class")
if not _is_new_style_class(obj):
raise RuntimeError("TorchScript interfaces must inherit from 'object'")
# Expected MRO is:
# User module
# torch.nn.modules.module.Module
# object
is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3
if not is_module_interface and len(obj.mro()) > 2:
raise RuntimeError("TorchScript interface does not support inheritance yet. "
"Please directly inherit from 'object' or 'nn.Module'.")
qualified_name = _qualified_name(obj)
rcb = _jit_internal.createResolutionCallbackFromFrame(1)
# if this type is a `nn.Module` subclass, generate an module interface type
# instead of a class interface type, an module interface type only compile
# the user provided methods as part of the interface
ast = get_jit_class_def(obj, obj.__name__)
torch._C._jit_script_interface_compile(qualified_name, ast, rcb, is_module_interface)
obj.__torch_script_interface__ = True
return obj
def _script_if_tracing(fn):
"""
Compiles ``fn`` when it is first called during tracing. ``torch.jit.script``
has a non-negligible start up time when it is first called due to
lazy-initializations of many compiler builtins. Therefore you should not use
it in library code. However, you may want to have parts of your library work
in tracing even if they use control flow. In these cases, you should use
``@torch.jit._script_if_tracing`` to substitute for
``torch.jit.script``.
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if not is_tracing():
# Not tracing, don't do anything
return fn(*args, **kwargs)
compiled_fn = script(wrapper.__original_fn)
return compiled_fn(*args, **kwargs)
wrapper.__original_fn = fn
wrapper.__script_if_tracing_wrapper = True
return wrapper
def script_method(fn):
if not _enabled:
return fn
# NOTE: we need to traverse two frames here because the meta-class frame
# for ScriptModule will be present, as opposed to invoking @script on a
# a function or invoking define() on a CompilationUnit.
# The stack will look like:
#
# 0. createResolutionCallback()
# 1. script_method()
# 2. ScriptModule metaclass frame
# 3. Surrounding scope
#
# createResolutionCallback internally adds 1 to get us to the scope of this
# function (the calling function). Adding 2 gets us to the proper surrounding scope.
_rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2)
ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule")
return ScriptMethodStub(_rcb, ast, fn)
# These OrderedDictWrapper classes replace the actual OrderedDicts in
# module with versions that get/set properties inside of Module.
# This allows us to reuse most of nn.Module while still storing the
# data in C++.
# Each OrderedDict needs to support:
# x not in view
# x in view
# view[name] = ...
# view.values()
# del view[name]
# view.items()
# view.keys()
# len(view)
class OrderedDictWrapper(object):
def __init__(self, _c):
self._c = _c
def keys(self):
return [k for k, v in self.items()]
def values(self):
return [v for k, v in self.items()]
def __len__(self):
return len(self.values())
def __delitem__(self, k):
raise RuntimeError("cannot delete methods or parameters of a script module")
def items(self):
return self._c.items()
def __setitem__(self, k, v):
if k not in self:
raise RuntimeError("Can't add a new parameter after ScriptModule construction."
" Tried to add '{}".format(k))
self._c.setattr(k, v)
def __contains__(self, k):
return self._c.contains(k)
def __getitem__(self, k):
if k not in self:
raise KeyError(k)
return self._c.getattr(k)
class OrderedModuleDict(OrderedDictWrapper):
def __init__(self, module, python_dict):
super(OrderedModuleDict, self).__init__(torch._C.ModuleDict(module))
# contains _both_ script modules and non-script python-only modules
# because script modules are subclassed in python and the
# C++ Module class will not hold references to them,
# to ensure that you always get the same python value here
# we store it in the python dict as well
self._python_modules = python_dict
def items(self):
r = self._python_modules.items()
return r
def __contains__(self, k):
return k in self._python_modules
def __setitem__(self, k, v):
# Cases where sub-module can be re-assigned after ScriptModule construction
# 1. If the attr is an module interface type, it's guaranteed that the module is
# not inlined in the graph, so it's safe to swap a new ScriptModule in.
# 2. if the new value if a ScriptModule with the same JIT type, IR won't change
# and it's legit to swap a new module in.
# In these two cases we allow swapping a new scripted module and update the
# corresponding python module dict to keep sync.
# Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
# otherwise it's illegal and we throw error.
if isinstance(v, ScriptModule):
self._c.setattr(k, v)
self._python_modules[k] = v
else:
raise RuntimeError("Cannot re-assign modules in a ScriptModule with non-scripted "
"module, tried to replace existing module '{}': {}".format(k, v))
def __getitem__(self, k):
return self._python_modules[k]
# For each user-defined class that subclasses ScriptModule, this meta-class:
# (1) finds all the methods annotated with @script_method in a ScriptModule and
# removes them from the class attributes
# (2) puts a wrapper around the class's __init__ method to recusively compile
# all of the script_methods with the module after the original __init__ has
# run. This has to occur after the user-defined __init__ so that submodules and
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
class ScriptMeta(type):
def __init__(cls, name, bases, attrs): # noqa: B902
# Aggregate all the ScriptMethods and constants from superclasses
cls._methods = {}
cls._constants_set = set(getattr(cls, '__constants__', ()))
for base in reversed(bases):
for k, v in getattr(base, '_methods', {}).items():
cls._methods[k] = v
base_constants = getattr(base, '_constants_set', set())
cls._constants_set = cls._constants_set.union(base_constants)
# find all the script methods of the current class
for k, v in sorted(attrs.items()):
if isinstance(v, ScriptMethodStub):
delattr(cls, k)
cls._methods[v.original_method.__name__] = v
if getattr(cls, '_disable_script_meta', False):
# We leave built-in ScriptModule types alone, since this metaclass
# is only for compiling user classes that inherit from
# ScriptModule.
return super(ScriptMeta, cls).__init__(name, bases, attrs)
original_init = getattr(cls, '__init__', lambda self: None)
@functools.wraps(original_init)
def init_then_script(self, *args, **kwargs):
original_init(self, *args, **kwargs)
if type(self) == cls:
def make_stubs(module):
cls = type(module)
return [v for k, v in sorted(cls._methods.items())]
self.__dict__["_actual_script_module"] = torch.jit._recursive.create_script_module(self, make_stubs)
# Delete the Python attributes that now shadow the ScriptModule
# ones, so that __getattr__ and __setattr__ will properly find
# the scripted versions.
concrete_type = self._actual_script_module._concrete_type
for name in concrete_type.get_attributes():
delattr(self, name)
for name, _ in concrete_type.get_modules():
delattr(self, name)
for name in ("_parameters", "_buffers", "_modules"):
delattr(self, name)
cls.__init__ = init_then_script
return super(ScriptMeta, cls).__init__(name, bases, attrs)
if _enabled:
# this is a Python 'non-data descriptor' that causes the first access
# to ScriptModule's forward to lookup the forward method and stash
# it in the objects dict. Due to the standard rules for attribute lookup
# subsequent lookups will just directly return the previously looked up method.
# This is necessary because nn.Module defines forward as a method. If we
# did nothing __getattr__ would not be called. Instead we'd get nn.Module.forward
# which always throws an exception.
class _CachedForward(object):
def __get__(self, obj, cls):
return self.__getattr__('forward')
class ScriptModule(with_metaclass(ScriptMeta, Module)):
"""
``ScriptModule``s wrap a C++ ``torch::jit::Module``. ``ScriptModule``s
contain methods, attributes, parameters, and
constants. These can be accessed the same as on a normal ``nn.Module``.
"""
def __init__(self):
super(ScriptModule, self).__init__()
forward = _CachedForward()
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super(ScriptModule, self).__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
# Unwrap torch.jit.Attribute into a regular setattr + recording
# the provided type in __annotations__.
#
# This ensures that if we use the attr again in `__init__`, it
# will look like the actual value, not an instance of Attribute.
if isinstance(value, Attribute):
# NB: Ensure that we set __annotations__ on the specific
# class in question, and not on a superclass (which would
# be wrong wrong wrong!).
# See also https://github.com/pytorch/pytorch/issues/39463
if "__annotations__" not in self.__class__.__dict__:
self.__class__.__annotations__ = {}
self.__annotations__[attr] = value.type
value = value.value
return super(ScriptModule, self).__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def define(self, src):
if "_actual_script_module" in self.__dict__:
# If we have completed initialization, just defer to the
# backing RecursiveScriptModule to eagerly compile the provided
# source.
return self._actual_script_module.define(src)
# Otherwise, we are still in the object's __init__.
# In that case, add `src` as a stub to be compiled.
#
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
ast = torch._C._parse_source_def(src)
self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)
def _replicate_for_data_parallel(self):
return self._actual_script_module._replicate_for_data_parallel()
class RecursiveScriptModule(ScriptModule):
# XXX: RecursiveScriptModule inherits from ScriptModule for the sole
# reason that it retains the existing isinstance(ScriptModule)
# behavior.
r"""
The core data structure in TorchScript is the ``ScriptModule``. It is an
analogue of torch's ``nn.Module`` and represents an entire model as a tree of
submodules. Like normal modules, each individual module in a ``ScriptModule`` can
have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented
as Python functions, but in ``ScriptModule``\s methods are implemented as
TorchScript functions, a statically-typed subset of Python that contains all
of PyTorch's built-in Tensor operations. This difference allows your
``ScriptModule``\s code to run without the need for a Python interpreter.
``ScriptModule``\s should not be created manually, instead use
either :func:`tracing <torch.jit.trace>` or :func:`scripting <torch.jit.script>`.
Tracing and scripting can be applied incrementally and :ref:`composed as necessary <Types>`.
* Tracing records the tensor operations as executed with a set of example inputs and uses these
operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing,
but values other than Tensors and control flow aren't captured in the graph.
* Scripting inspects the Python code of the model
and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow.
Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary.
"""
_disable_script_meta = True
def __init__(self, cpp_module):
self.__dict__['_initializing'] = True
self._c = cpp_module
super(RecursiveScriptModule, self).__init__()
# Delete the 'training' attribute set up by `Module.__init__`. It
# will get set on the underlying cpp module, so we delete it here
# to avoid this version shadowing the cpp module version.
delattr(self, 'training')
@staticmethod
def _construct(cpp_module, init_fn):
"""
Construct a RecursiveScriptModule that's ready for use. PyTorch
code should use this to construct a RecursiveScriptModule instead
of instead of calling `__init__` directly, as it makes sure the
object is properly finalized (and in the future we may take
control of how the RecursiveScriptModule instance is created).
Arguments:
cpp_module: The C++ Module that will hold the actual state of
this RecursiveScriptModule instance.
init_fn: Lambda that initializes the RecursiveScriptModule passed to it.
"""
script_module = RecursiveScriptModule(cpp_module)
init_fn(script_module)
# Finalize the ScriptModule: replace the nn.Module state with our
# custom implementations and flip the _initializing bit.
RecursiveScriptModule._finalize_scriptmodule(script_module)
return script_module
@staticmethod
def _finalize_scriptmodule(script_module):
script_module._parameters = OrderedDictWrapper(torch._C.ParameterDict(script_module._c))
script_module._buffers = OrderedDictWrapper(torch._C.BufferDict(script_module._c))
script_module._modules = OrderedModuleDict(script_module._c, script_module._modules)
script_module._initializing = False
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of RecursiveScriptModule using an instance of a C++ module.
Arguments:
cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.
"""
self.__init__(cpp_module)
# Copy the concrete type from the C++ module to this ScriptModule.
self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(self._c._type())
# Copy submodules from the C++ module to this ScriptModule.
modules = {}
for name, cpp_module in torch._C.ModuleDict(self._c).items():
modules[name] = wrap_cpp_module(cpp_module)
self._modules = OrderedModuleDict(self._c, modules)
# Copy parameters and buffers.
self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c))
self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c))
# Get rid of the functions from the old C++ module.
self.__dict__ = {k: v for k, v in self.__dict__.items() if not isinstance(v, torch._C.ScriptMethod)}
self.__dict__['_initializing'] = False
@property
def graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. See `Interpreting Graphs`_ for details.
"""
return self.forward.graph
@property
def inlined_graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. This graph will be preprocessed to inline all function and method calls.
See `Interpreting Graphs`_ for details.
"""
return self.forward.inlined_graph
@property
def code(self):
r"""
Returns a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See `Inspecting Code`_
for details.
"""
return self.forward.code
@property
def code_with_constants(self):
r"""
Returns a tuple of:
[0] a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See `code`.
[1] a ConstMap following the CONSTANT.cN format of the output in [0].
The indices in the [0] output are keys to the underlying constant's values.
See `Inspecting Code`_ for details.
"""
r = self.forward.code_with_constants
return (r[0], ConstMap(r[1]))
def save(self, *args, **kwargs):
r"""
save(f, _extra_files=ExtraFilesMap{})
See :func:`torch.jit.save <torch.jit.save>` for details.
"""
return self._c.save(*args, **kwargs)
def _save_for_lite_interpreter(self, *args, **kwargs):
r"""
_save_for_lite_interpreter(f)
Add (or update) the bytecode session to the script model. The updated model is used
in lite interpreter for mobile applications.
Arguments:
f: a string containing a file name.
_extra_files: Map from filename to contents which will be stored as part of 'f'.
"""
return self._c._save_for_mobile(*args, **kwargs)
def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs):
return self._c._save_to_buffer_for_mobile(*args, **kwargs)
def save_to_buffer(self, *args, **kwargs):
return self._c.save_to_buffer(*args, **kwargs)
def get_debug_state(self, *args, **kwargs):
return self._c.get_debug_state()
def extra_repr(self):
return 'original_name={}'.format(self.original_name)
def graph_for(self, *args, **kwargs):
return self.forward.graph_for(*args, **kwargs)
@property
def original_name(self):
if type(self) == str(self._c._type().name()):
return ''
return str(self._c._type().name())
def define(self, src):
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
self._c._define(self._concrete_type, src, rcb)
def __getattr__(self, attr):
if '_initializing' not in self.__dict__:
raise RuntimeError("ScriptModule has not been initialized, did you forget to call super's init?")
if self._initializing:
return super(RecursiveScriptModule, self).__getattr__(attr)
# _modules check is before hasattr since modules are included as attributes in _c,
# but we want to get the python wrapper from _modules instead of the raw _c object.
if attr in self._modules:
return self._modules[attr]
elif self._c.hasattr(attr):
return self._c.getattr(attr)
elif self._c._has_method(attr):
script_method = self._c._get_method(attr)
# cache method so future calls do not go through __getattr__
# to improve invocation performance
self.__dict__[attr] = script_method
return script_method
return super(RecursiveScriptModule, self).__getattr__(attr)
def __setattr__(self, attr, value):
if self._initializing:
return super(RecursiveScriptModule, self).__setattr__(attr, value)
if attr in self._modules:
self._modules[attr] = value
elif self._c.hasattr(attr):
self._c.setattr(attr, value)
elif hasattr(self, "_concrete_type") and attr in self._concrete_type.get_constants().keys():
# TODO: we don't have _concrete_type set after load(), and in general we lose constant information.
# We should encode constants as class type attributes (or something) so it persists across save/load.
raise AttributeError("Cannot mutate TorchScript constant value: '{}'. Value: '{}'".format(attr, value))
else:
# We allow setting Python attributes on the ScriptModule, for
# when people want to stash some convenience info on it.
# TODO: it's possible that the following is confusing:
# s = torch.jit.script(...)
# s.python_attr = ...
# s.save() <--- this doesn't have `python_attr`
# It's fairly trivial to save enough info to warn in this case.
return super(RecursiveScriptModule, self).__setattr__(attr, value)
def __getstate__(self):
raise pickle.PickleError(
"ScriptModules cannot be deepcopied using copy.deepcopy or saved using torch.save. " +
"Mixed serialization of script and non-script modules is not supported. " +
"For purely script modules use my_script_module.save(<filename>) instead.")
def __copy__(self):
return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c))
def __deepcopy__(self, memo):
return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo))
# Python magic methods do method lookups on an object's class type, instead of looking up
# the method defines on the class instance. In order to continue to expose the magic methods
# of builtin-containers (ModuleList, Sequential, ModuleDict) to python we
# define magic methods here as a shim to the correct attribute.
def forward_magic_method(self, method_name, *args, **kwargs):
self_method = getattr(self, method_name)
if getattr(self_method, "__func__", None) == getattr(RecursiveScriptModule, method_name):
raise NotImplementedError()
return self_method(*args, **kwargs)
def __iter__(self):
return self.forward_magic_method("__iter__")
def __getitem__(self, idx):
return self.forward_magic_method("__getitem__", idx)
def __len__(self):
return self.forward_magic_method("__len__")
def __contains__(self, key):
return self.forward_magic_method("__contains__", key)
# dir is defined by the base nn.Module, so instead of throwing if
# it is not overriden, we call into the nn.Module __dir__ method
def __dir__(self):
self_method = self.__dir__
if self_method.__func__ == get_function_from_type(RecursiveScriptModule, "__dir__"):
return super(RecursiveScriptModule, self).__dir__()
return self_method()
# to resolve bool(value), python looks if __bool__ is defined then __iter__
# is defined then returns true for classes. because __iter__() on this
# class throws if it isn't overriden, we define __bool__ to preserve default behavior
def __bool__(self):
self_method = self.__bool__
if self_method.__func__ == get_function_from_type(RecursiveScriptModule, "__bool__"):
return True
return self_method()
def _replicate_for_data_parallel(self):
# we have to initialize ScriptModule properly so that
# it works with pybind11
def init_fn(script_module):
# Don't do anything here, we'll initialize the ScriptModule below
return
return RecursiveScriptModule._construct(self._c._replicate_for_data_parallel(), init_fn)
# Need to copy all RecursiveScriptModule methods to ScriptModule.
#
# This is because `super(MyScriptModule, self).foo()` does not use
# `__getattr__` to look up `foo`. So we need to make each method available on
# the ScriptModule manually.
for name, item in RecursiveScriptModule.__dict__.items():
if not callable(item) and not isinstance(item, property):
continue
if name.startswith('__') or hasattr(ScriptModule, name):
continue
# We can copy over the implementation wholesale because besides the
# `super()` thing above, ScriptModule behaves exactly like
# RecursiveScriptModule
setattr(ScriptModule, name, item)
def _get_methods(cls):
import inspect
# In Python 3 unbound methods are functions, but in Python 2 they are methods
return inspect.getmembers(cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
_compiled_methods_whitelist = {
'forward', 'register_buffer', 'register_parameter', 'add_module',
'_apply', 'apply', 'cuda', 'cpu', 'to', 'type', 'float', 'double', 'half',
'state_dict', '_save_to_state_dict', 'load_state_dict',
'_load_from_state_dict', '_named_members', 'parameters', 'named_parameters',
'buffers', 'named_buffers', 'children', 'named_children', 'modules',
'named_modules', 'zero_grad', 'share_memory', '_get_name', 'extra_repr',
'_slow_forward', '_tracing_name', 'eval', 'train',
}
def _make_fail(name):
def fail(self, *args, **kwargs):
raise RuntimeError(name + " is not supported on ScriptModules")
return fail
for name, method in _get_methods(torch.nn.Module):
if name.startswith('__'):
continue
if name not in RecursiveScriptModule.__dict__ and name not in _compiled_methods_whitelist:
setattr(RecursiveScriptModule, method.__name__, _make_fail(name))
else:
# TODO MAKE SURE THAT DISABLING WORKS
class ScriptModule(torch.nn.Module):
def __init__(self):
super(ScriptModule, self).__init__()
class TracedModule(ScriptModule):
_disable_script_meta = True
def __init__(self, orig, id_set=None, _compilation_unit=None):
# XXX: orig can be a nn.Module or a function!
super(TracedModule, self).__init__()
assert(isinstance(orig, torch.nn.Module))
# Copy a subset of `orig` to a temporary nn.Module.
# This is a way to customize what will actually get compiled by create_script_module
id_set = set()
# This allows us to preserve the original module's qualified name by defining a new
# type with the attribute _jit_override_qualname. In torch._jit_internal._qualified_name
# we have a special case that will look up this attribute to override whatever qualname
# we would get from the python type system
class QualnameWrapper(torch.nn.Module):
pass
QualnameWrapper._jit_override_qualname = torch._jit_internal._qualified_name(type(orig))
tmp_module = QualnameWrapper()
def check_unique(param):
if param in id_set:
raise ValueError("TracedModules don't support parameter sharing between modules")
id_set.add(param)
tmp_module.training = orig.training
for name, param in orig._parameters.items():
if param is not None:
tmp_module._parameters[name] = param
check_unique(param)
for name, buf in orig._buffers.items():
if buf is not None:
tmp_module._buffers[name] = buf
check_unique(buf)
for name, val in orig.__dict__.items():
if torch._C._jit_is_script_object(val) and name not in orig._parameters and name not in orig._buffers:
setattr(tmp_module, name, val)
if orig._backward_hooks:
raise ValueError("Modules that have backward hooks assigned can't be compiled: " + str(orig))
for name, submodule in orig._modules.items():
tmp_module._modules[name] = make_module(submodule, TracedModule, _compilation_unit=None)
script_module = torch.jit._recursive.create_script_module(tmp_module, lambda module: (), share_types=False)
self.__dict__['_name'] = type(orig).__name__
self.__dict__['_actual_script_module'] = script_module
for name in ("_parameters", "_buffers", "_modules"):
delattr(self, name)
def forward(self, *args, **kwargs):
raise RuntimeError('Trace submodules cannot be called.')
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super(TracedModule, self).__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
return super(TracedModule, self).__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def _get_name(self):
return self._name
def extra_repr(self):
return 'original_name={}'.format(self._name)
if _enabled:
class TopLevelTracedModule(TracedModule):
forward = _CachedForward()
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of TopLevelTracedModule using an instance of a C++ module.
Arguments:
cpp_module: The C++ module that this TopLevelTracedModule will be rebuilt around.
"""
self.__dict__['_actual_script_module']._reconstruct(cpp_module)
def is_scripting():
r"""
Function that returns True when in compilation and False otherwise. This
is useful especially with the @unused decorator to leave code in your
model that is not yet TorchScript compatible.
.. testcode::
import torch
@torch.jit.unused
def unsupported_linear_op(x):
return x
def linear(x):
if not torch.jit.is_scripting():
return torch.linear(x)
else:
return unsupported_linear_op(x)
"""
return False
def is_tracing():
"""
Returns ``True`` in tracing (if a function is called during the tracing of
code with ``torch.jit.trace``) and ``False`` otherwise.
"""
return torch._C._is_tracing
def _unwrap_optional(x):
assert x is not None, "Unwrapping null optional"
return x
_register_builtin(_unwrap_optional, 'aten::_unwrap_optional')
_register_builtin(_wait, 'aten::wait')
_register_builtin(wait, 'aten::wait')
_register_builtin(is_scripting, 'aten::is_scripting')
# Caching: we currently cache compilation of free functions and overloaded functions.
# To cache free functions we hold a weak ref to the function object and
# map to the compiled fn's qualified name.
# To cache overloaded functions we hold a weak ref to the function obj and
# map to all of its overloaded compiled fns.
# In the future we could consider caching more types of objects so that
# aliasing is preserved across separate compilations of the same object.
_jit_caching_layer = weakref.WeakKeyDictionary()
_jit_function_overload_caching = weakref.WeakKeyDictionary()
def _try_get_jit_cached_overloads(key):
qual_names = _jit_function_overload_caching.get(key, None)
if qual_names:
return [_python_cu.find_function(qual_name) for qual_name in qual_names]
else:
return None
def _set_jit_overload_cache(key, compiled_fns):
_jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns]
def _try_get_jit_cached_function(key):
if getattr(key, "__disable_jit_function_caching__", False) is True:
return None
qual_name = _jit_caching_layer.get(key, None)
if qual_name:
return _python_cu.find_function(qual_name)
else:
return None
def _set_jit_function_cache(key, value):
# only free functions currently supported
assert isinstance(value, torch.jit.ScriptFunction)
_jit_caching_layer[key] = value.qualified_name
# qualified_name => ScriptClass mapping
_script_classes = {}
def _add_script_class(cls, name):
cls.__torch_script_class__ = True
global _script_classes
_script_classes[name] = cls
def _get_script_class(name):
global _script_classes
if name not in _script_classes:
return None
return _script_classes[name]
# overloads are registered in _jit_internal and compiled here so that _overload
# can be used in nn/functional.py without an import cycle
def _check_overload_defaults(impl_defaults, overload_defaults, loc):
for name, overload_value in overload_defaults.items():
if name not in impl_defaults or impl_defaults[name] != overload_value:
raise torch.jit.frontend.FrontendError(
loc, "Default parameters on overloads do not affect the runtime so they "
"must equal to the default parameter on the implementation function. Found on "
"parameter {name}".format(name=name))
def _compile_function_with_overload(overload_fn, qual_name, impl_fn):
overload_decl = torch.jit.get_jit_def(overload_fn, overload_fn.__name__).decl()
overload_signature = torch.jit.annotations.get_signature(overload_fn, None, None, inspect.ismethod(overload_fn))
impl_ast = torch.jit.get_jit_def(impl_fn, impl_fn.__name__)
overload_defaults = get_default_args(overload_fn)
implementation_defaults = get_default_args(impl_fn)
_rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn)
_check_overload_defaults(implementation_defaults, overload_defaults, overload_decl.range())
fn = torch._C._jit_script_compile_overload(qual_name, overload_decl, impl_ast, _rcb,
implementation_defaults, overload_signature)
return fn
def _get_overloads(obj):
# check for cached compiled fns
existing_compiled_fns = _try_get_jit_cached_overloads(obj)
qual_name = _qualified_name(obj)
uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name)
if uncompiled_overloads is None:
return existing_compiled_fns
compiled_fns = []
for overload_fn in uncompiled_overloads:
compiled_fns.append(_compile_function_with_overload(overload_fn, qual_name, obj))
if existing_compiled_fns:
compiled_fns = existing_compiled_fns + compiled_fns
# cache compilation, remove information stored to do compilation
_set_jit_overload_cache(obj, compiled_fns)
_jit_internal._clear_fn_overloads(qual_name)
return compiled_fns
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
raise RuntimeError("Function {} cannot be directly compiled because it"
" is overloaded. It must be used in a context of a function"
" where its inputs can determine which overload to call.".format(qual_name))
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
# This is not perfect but works in common cases
Error.__name__ = "Error"
Error.__qualname__ = "Error"
def _get_named_tuple_properties(obj):
assert issubclass(obj, tuple) and hasattr(obj, '_fields')
fields = list(obj._fields)
annotations = []
has_annotations = hasattr(obj, '__annotations__')
for field in fields:
if has_annotations and field in obj.__annotations__:
the_type = torch.jit.annotations.ann_to_type(obj.__annotations__[field], _jit_internal.fake_range())
annotations.append(the_type)
else:
annotations.append(torch._C.TensorType.get())
return type(obj).__name__, fields, annotations
def _create_named_tuple(t, unqual_name, field_names):
TupleType = collections.namedtuple(unqual_name, field_names)
return TupleType(*t)
class _disable_tracing(object):
def __enter__(self):
self.state = torch._C._get_tracing_state()
torch._C._set_tracing_state(None)
def __exit__(self, *args):
torch._C._set_tracing_state(self.state)
self.state = None
# for use in python if using annotate
def annotate(the_type, the_value):
# noop in python
return the_value
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
def _graph_for(self, *args, **kwargs):
self(*args, **kwargs)
return last_executed_optimized_graph()
torch._C.ScriptMethod.graph_for = _graph_for
torch._C.ScriptFunction.graph_for = _graph_for
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
set_module(ScriptFunction, "torch.jit")
if not torch._C._jit_init():
raise RuntimeError("JIT initialization failed")
| 46,552 |
904 |
<gh_stars>100-1000
import logging
from django.db import migrations
from django.db import models
LOG = logging.getLogger(__name__)
def delete_empty_repositories(apps, schema_editor):
db_alias = schema_editor.connection.alias
Repository = apps.get_model('main', 'Repository')
qs = (
Repository.objects.using(db_alias)
.annotate(content_count=models.Count('content_objects'))
.filter(content_count=0)
)
count = qs.count()
LOG.info('Deleting {0} Repository records'.format(count))
for repo in qs:
repo.delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0096_repository_format'),
]
operations = [
migrations.RunPython(code=delete_empty_repositories,
reverse_code=migrations.RunPython.noop)
]
| 350 |
527 |
<gh_stars>100-1000
/*
* Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.ml.mms.archive;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import java.io.File;
import java.io.FileFilter;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
public final class Exporter {
private static final Gson GSON = new GsonBuilder().setPrettyPrinting().create();
private Exporter() {}
public static void main(String[] args) {
String jarName = getJarName();
Options options = Config.getOptions();
DefaultParser parser = new DefaultParser();
try {
if (args.length == 0
|| args[0].equalsIgnoreCase("-h")
|| args[0].equalsIgnoreCase("--help")) {
printHelp("java -jar " + jarName + " <export>", options);
return;
}
CommandLine cmd = parser.parse(options, args, null, false);
List<String> cmdArgs = cmd.getArgList();
if (cmdArgs.isEmpty()) {
printHelp("java -jar " + jarName + " <export>", options);
return;
}
Config config = new Config(cmd);
String action = cmdArgs.get(0);
if (!"export".equalsIgnoreCase(action)) {
printHelp("java -jar " + jarName + " <export>", options);
return;
}
String modelName = config.getModelName();
if (!modelName.matches("[A-Za-z][A-Za-z0-9_\\-.]+")) {
System.err.println(
"model-name must starts with letter and only allows alphanumeric characters, hyphens, underscore or dot.");
return;
}
File modelPath = new File(config.getModelPath()).getCanonicalFile();
if (!modelPath.exists()) {
System.err.println("model-path not found: " + modelName);
return;
}
String output = config.getOutputFile();
File outputFile;
if (output == null) {
outputFile = new File(modelPath.getParentFile(), modelName + ".mar");
} else {
outputFile = new File(output);
}
final String fileName = modelPath.getName();
if (modelPath.isFile() && fileName.endsWith(".model") || fileName.endsWith(".mar")) {
ModelArchive.migrate(modelPath, outputFile);
return;
}
if (!modelPath.isDirectory()) {
System.err.println("model-path should be a directory or model archive file.");
return;
}
File[] files = modelPath.listFiles();
if (files == null) {
throw new AssertionError(
"Failed list files in folder: " + modelPath.getAbsolutePath());
}
Manifest manifest = new Manifest();
Manifest.Model model = new Manifest.Model();
manifest.setModel(model);
String runtime = config.getRuntime();
if (runtime != null) {
manifest.setRuntime(Manifest.RuntimeType.fromValue(runtime));
}
File symbolFile = findUniqueFile(files, "-symbol.json");
if (symbolFile != null) {
model.addExtension("symbolFile", symbolFile.getName());
}
File paramsFile = findUniqueFile(files, ".params");
if (paramsFile != null) {
model.addExtension("parametersFile", paramsFile.getName());
}
String handler = config.getHandler();
if (handler == null) {
File serviceFile = findUniqueFile(files, "_service.py");
if (serviceFile != null) {
model.setHandler(serviceFile.getName());
}
} else {
Manifest.RuntimeType runtimeType = manifest.getRuntime();
if (runtimeType == Manifest.RuntimeType.PYTHON
|| runtimeType == Manifest.RuntimeType.PYTHON2
|| runtimeType == Manifest.RuntimeType.PYTHON3) {
String[] tokens = handler.split(":");
File serviceFile = new File(modelPath, tokens[0]);
if (serviceFile.exists()) {
System.err.println("handler file is not found in: " + modelPath);
return;
}
}
model.setHandler(handler);
}
try (ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(outputFile))) {
zos.putNextEntry(new ZipEntry("MANIFEST.json"));
zos.write(GSON.toJson(manifest).getBytes(StandardCharsets.UTF_8));
int prefix = modelPath.getCanonicalPath().length();
FileFilter filter =
pathname -> {
if (pathname.isHidden()) {
return false;
}
String name = pathname.getName();
return !"MANIFEST.json".equalsIgnoreCase(name);
};
for (File file : files) {
if (filter.accept(file)) {
ZipUtils.addToZip(prefix, file, filter, zos);
}
}
} catch (IOException e) {
e.printStackTrace();
if (!outputFile.delete()) {
outputFile.deleteOnExit();
}
}
} catch (InvalidModelException | IOException e) {
System.err.println(e.getMessage());
} catch (ParseException e) {
System.err.println(e.getMessage());
printHelp("java -jar " + jarName + " <export>", options);
}
}
private static void printHelp(String message, Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.setLeftPadding(1);
formatter.setWidth(120);
formatter.printHelp(message, options);
}
private static String getJarName() {
URL url = Exporter.class.getProtectionDomain().getCodeSource().getLocation();
String path = url.getPath();
if ("file".equalsIgnoreCase(url.getProtocol())) {
File file = new File(path);
if (path.toLowerCase().endsWith(".jar")) { // we only support jar file for now
return file.getName();
}
}
return null;
}
private static File findUniqueFile(File[] list, String extension) throws InvalidModelException {
File ret = null;
for (File file : list) {
if (file.getName().endsWith(extension)) {
if (ret != null) {
throw new InvalidModelException(
"Multiple " + extension + " file found in the path.");
}
ret = file;
}
}
return ret;
}
private static final class Config {
private String modelName;
private String modelPath;
private String handler;
private String runtime;
private String outputFile;
public Config(CommandLine cmd) {
modelName = cmd.getOptionValue("model-name");
modelPath = cmd.getOptionValue("model-path");
handler = cmd.getOptionValue("handler");
runtime = cmd.getOptionValue("runtime");
handler = cmd.getOptionValue("handler");
outputFile = cmd.getOptionValue("output-file");
}
public static Options getOptions() {
Options options = new Options();
options.addOption(
Option.builder("n")
.longOpt("model-name")
.hasArg()
.required()
.argName("MODEL_NAME")
.desc(
"Exported model name. Exported file will be named as model-name.model and saved in current working directory.")
.build());
options.addOption(
Option.builder("p")
.longOpt("model-path")
.hasArg()
.required()
.argName("MODEL_PATH")
.desc(
"Path to the folder containing model related files or legacy model archive. Signature file is required.")
.build());
options.addOption(
Option.builder("r")
.longOpt("runtime")
.hasArg()
.argName("RUNTIME")
.desc(
"The runtime environment for the MMS to execute your model custom code, default python2.7")
.build());
options.addOption(
Option.builder("e")
.longOpt("engine")
.hasArg()
.argName("engine")
.desc("The ML framework for your model, default MXNet")
.build());
options.addOption(
Option.builder("s")
.longOpt("handler")
.hasArg()
.argName("HANDLER")
.desc(
"The entry-point within your code that MMS can call to begin execution.")
.build());
options.addOption(
Option.builder("o")
.longOpt("output-file")
.hasArg()
.argName("OUTPUT_FILE")
.desc("Output model archive file path.")
.build());
return options;
}
public String getModelName() {
return modelName;
}
public void setModelName(String modelName) {
this.modelName = modelName;
}
public String getModelPath() {
return modelPath;
}
public void setModelPath(String modelPath) {
this.modelPath = modelPath;
}
public String getHandler() {
return handler;
}
public void setHandler(String handler) {
this.handler = handler;
}
public String getOutputFile() {
return outputFile;
}
public void setOutputFile(String outputFile) {
this.outputFile = outputFile;
}
public String getRuntime() {
return runtime;
}
public void setRuntime(String runtime) {
this.runtime = runtime;
}
}
}
| 6,119 |
528 |
from amaranth.sim.core import *
from amaranth.sim.core import __all__
import warnings
warnings.warn("instead of nmigen.sim.core, use amaranth.sim.core",
DeprecationWarning, stacklevel=2)
| 76 |
1,144 |
package de.metas.procurement.base.order.impl;
import java.util.Iterator;
import org.adempiere.ad.trx.api.ITrx;
import org.adempiere.ad.trx.api.ITrxManager;
import org.compiere.util.TrxRunnableAdapter;
import de.metas.procurement.base.model.I_PMM_PurchaseCandidate;
import de.metas.util.Check;
import de.metas.util.Services;
/*
* #%L
* de.metas.procurement.base
* %%
* Copyright (C) 2016 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
/**
* Generates purchase orders from {@link I_PMM_PurchaseCandidate}s by utilizing {@link OrdersAggregator}.
*
* @author metas-dev <<EMAIL>>
*
*/
public class OrdersGenerator
{
public static final OrdersGenerator newInstance()
{
return new OrdersGenerator();
}
// services
private final transient ITrxManager trxManager = Services.get(ITrxManager.class);
private Iterator<I_PMM_PurchaseCandidate> candidates;
private OrdersGenerator()
{
super();
}
public void generate()
{
trxManager.run(ITrx.TRXNAME_ThreadInherited, new TrxRunnableAdapter()
{
@Override
public void run(final String localTrxName) throws Exception
{
generate0();
}
});
}
private void generate0()
{
final OrdersCollector ordersCollector = OrdersCollector.newInstance();
final OrdersAggregator aggregator = OrdersAggregator.newInstance(ordersCollector);
for (final Iterator<I_PMM_PurchaseCandidate> it = getCandidates(); it.hasNext();)
{
final I_PMM_PurchaseCandidate candidateModel = it.next();
final PurchaseCandidate candidate = PurchaseCandidate.of(candidateModel);
aggregator.add(candidate);
}
aggregator.closeAllGroups();
}
public OrdersGenerator setCandidates(final Iterator<I_PMM_PurchaseCandidate> candidates)
{
Check.assumeNotNull(candidates, "candidates not null");
this.candidates = candidates;
return this;
}
public OrdersGenerator setCandidates(final Iterable<I_PMM_PurchaseCandidate> candidates)
{
Check.assumeNotNull(candidates, "candidates not null");
setCandidates(candidates.iterator());
return this;
}
private Iterator<I_PMM_PurchaseCandidate> getCandidates()
{
return candidates;
}
}
| 904 |
3,200 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import django
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={"default": {"ENGINE": "django.db.backends.sqlite3"}},
ROOT_URLCONF="",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
],
SITE_ID=1,
FIXTURE_DIRS=[""],
AUDITOR_BACKEND=None,
AUDITOR_EVENTS_TASK=None,
WORKERS_BACKEND=None,
WORKERS_SERVICE=None,
EXECUTOR_BACKEND=None,
EXECUTOR_SERVICE=None,
CONF_BACKEND=None,
CONF_CHECK_OWNERSHIP=False,
STORE_OPTION="env",
)
django.setup()
except ImportError:
raise ImportError(
"To fix this error, run: pip install -r requirements/requirements-test.txt"
)
| 616 |
4,392 |
#include "common.h"
#if defined(BFLOAT16) && defined(BFLOAT16CONVERSION)
static float
bfloat16tof32 (bfloat16 f16)
{
float result = 0;
unsigned short* q = (unsigned short*)(&result);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
q[0] = f16;
#else
q[1] = f16;
#endif
return result;
}
#define BF16TOF32(x) (bfloat16tof32(x))
#else
#define BF16TOF32(x) x
#endif
int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,IFLOAT* ba,IFLOAT* bb,FLOAT* C,BLASLONG ldc
#ifdef TRMMKERNEL
,BLASLONG offset
#endif
)
{
BLASLONG i,j,k;
FLOAT *C0,*C1;
IFLOAT *ptrba,*ptrbb;
FLOAT res0,res1,res2,res3;
IFLOAT load0,load1,load2,load3,load4,load5,load6,load7;
for (j=0; j<bn/2; j+=1)
{
C0 = C;
C1 = C0+ldc;
ptrba = ba;
for (i=0; i<bm/2; i+=1)
{
ptrbb = bb;
res0 = 0;
res1 = 0;
res2 = 0;
res3 = 0;
for (k=0; k<bk/4; k+=1)
{
load0 = ptrba[2*0+0];
load1 = ptrbb[2*0+0];
res0 = res0+BF16TOF32(load0)*BF16TOF32(load1);
load2 = ptrba[2*0+1];
res1 = res1+BF16TOF32(load2)*BF16TOF32(load1);
load3 = ptrbb[2*0+1];
res2 = res2+BF16TOF32(load0)*BF16TOF32(load3);
res3 = res3+BF16TOF32(load2)*BF16TOF32(load3);
load4 = ptrba[2*1+0];
load5 = ptrbb[2*1+0];
res0 = res0+BF16TOF32(load4)*BF16TOF32(load5);
load6 = ptrba[2*1+1];
res1 = res1+BF16TOF32(load6)*BF16TOF32(load5);
load7 = ptrbb[2*1+1];
res2 = res2+BF16TOF32(load4)*BF16TOF32(load7);
res3 = res3+BF16TOF32(load6)*BF16TOF32(load7);
load0 = ptrba[2*2+0];
load1 = ptrbb[2*2+0];
res0 = res0+BF16TOF32(load0)*BF16TOF32(load1);
load2 = ptrba[2*2+1];
res1 = res1+BF16TOF32(load2)*BF16TOF32(load1);
load3 = ptrbb[2*2+1];
res2 = res2+BF16TOF32(load0)*BF16TOF32(load3);
res3 = res3+BF16TOF32(load2)*BF16TOF32(load3);
load4 = ptrba[2*3+0];
load5 = ptrbb[2*3+0];
res0 = res0+BF16TOF32(load4)*BF16TOF32(load5);
load6 = ptrba[2*3+1];
res1 = res1+BF16TOF32(load6)*BF16TOF32(load5);
load7 = ptrbb[2*3+1];
res2 = res2+BF16TOF32(load4)*BF16TOF32(load7);
res3 = res3+BF16TOF32(load6)*BF16TOF32(load7);
ptrba = ptrba+8;
ptrbb = ptrbb+8;
}
for (k=0; k<(bk&3); k+=1)
{
load0 = ptrba[2*0+0];
load1 = ptrbb[2*0+0];
res0 = res0+BF16TOF32(load0)*BF16TOF32(load1);
load2 = ptrba[2*0+1];
res1 = res1+BF16TOF32(load2)*BF16TOF32(load1);
load3 = ptrbb[2*0+1];
res2 = res2+BF16TOF32(load0)*BF16TOF32(load3);
res3 = res3+BF16TOF32(load2)*BF16TOF32(load3);
ptrba = ptrba+2;
ptrbb = ptrbb+2;
}
res0 = res0*alpha;
C0[0] = C0[0]+res0;
res1 = res1*alpha;
C0[1] = C0[1]+res1;
res2 = res2*alpha;
C1[0] = C1[0]+res2;
res3 = res3*alpha;
C1[1] = C1[1]+res3;
C0 = C0+2;
C1 = C1+2;
}
for (i=0; i<(bm&1); i+=1)
{
ptrbb = bb;
res0 = 0;
res1 = 0;
for (k=0; k<bk; k+=1)
{
load0 = ptrba[0+0];
load1 = ptrbb[2*0+0];
res0 = res0+BF16TOF32(load0)*BF16TOF32(load1);
load2 = ptrbb[2*0+1];
res1 = res1+BF16TOF32(load0)*BF16TOF32(load2);
ptrba = ptrba+1;
ptrbb = ptrbb+2;
}
res0 = res0*alpha;
C0[0] = C0[0]+res0;
res1 = res1*alpha;
C1[0] = C1[0]+res1;
C0 = C0+1;
C1 = C1+1;
}
k = (bk<<1);
bb = bb+k;
i = (ldc<<1);
C = C+i;
}
for (j=0; j<(bn&1); j+=1)
{
C0 = C;
ptrba = ba;
for (i=0; i<bm/2; i+=1)
{
ptrbb = bb;
res0 = 0;
res1 = 0;
for (k=0; k<bk; k+=1)
{
load0 = ptrba[2*0+0];
load1 = ptrbb[0+0];
res0 = res0+BF16TOF32(load0)*BF16TOF32(load1);
load2 = ptrba[2*0+1];
res1 = res1+BF16TOF32(load2)*BF16TOF32(load1);
ptrba = ptrba+2;
ptrbb = ptrbb+1;
}
res0 = res0*alpha;
C0[0] = C0[0]+res0;
res1 = res1*alpha;
C0[1] = C0[1]+res1;
C0 = C0+2;
}
for (i=0; i<(bm&1); i+=1)
{
ptrbb = bb;
res0 = 0;
for (k=0; k<bk; k+=1)
{
load0 = ptrba[0+0];
load1 = ptrbb[0+0];
res0 = res0+BF16TOF32(load0)*BF16TOF32(load1);
ptrba = ptrba+1;
ptrbb = ptrbb+1;
}
res0 = res0*alpha;
C0[0] = C0[0]+res0;
C0 = C0+1;
}
k = (bk<<0);
bb = bb+k;
C = C+ldc;
}
return 0;
}
| 4,018 |
354 |
<filename>scripts/build_caselists.py
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
from build.common import *
from build.config import *
from build.build import *
import os
import sys
import string
import argparse
import tempfile
import shutil
class Module:
def __init__ (self, name, dirName, binName):
self.name = name
self.dirName = dirName
self.binName = binName
MODULES = [
Module("dE-IT", "internal", "de-internal-tests"),
Module("dEQP-EGL", "egl", "deqp-egl"),
Module("dEQP-GLES2", "gles2", "deqp-gles2"),
Module("dEQP-GLES3", "gles3", "deqp-gles3"),
Module("dEQP-GLES31", "gles31", "deqp-gles31"),
Module("dEQP-VK", "../external/vulkancts/modules/vulkan", "deqp-vk"),
]
DEFAULT_BUILD_DIR = os.path.join(tempfile.gettempdir(), "deqp-caselists", "{targetName}-{buildType}")
DEFAULT_TARGET = "null"
def getModuleByName (name):
for module in MODULES:
if module.name == name:
return module
else:
raise Exception("Unknown module %s" % name)
def getBuildConfig (buildPathPtrn, targetName, buildType):
buildPath = buildPathPtrn.format(
targetName = targetName,
buildType = buildType)
return BuildConfig(buildPath, buildType, ["-DDEQP_TARGET=%s" % targetName])
def getModulesPath (buildCfg):
return os.path.join(buildCfg.getBuildDir(), "modules")
def getBuiltModules (buildCfg):
modules = []
modulesDir = getModulesPath(buildCfg)
for module in MODULES:
fullPath = os.path.join(modulesDir, module.dirName)
if os.path.exists(fullPath) and os.path.isdir(fullPath):
modules.append(module)
return modules
def getCaseListFileName (module, caseListType):
return "%s-cases.%s" % (module.name, caseListType)
def getCaseListPath (buildCfg, module, caseListType):
return os.path.join(getModulesPath(buildCfg), module.dirName, getCaseListFileName(module, caseListType))
def genCaseList (buildCfg, generator, module, caseListType):
workDir = os.path.join(getModulesPath(buildCfg), module.dirName)
pushWorkingDir(workDir)
try:
binPath = generator.getBinaryPath(buildCfg.getBuildType(), os.path.join(".", module.binName))
execute([binPath, "--deqp-runmode=%s-caselist" % caseListType])
finally:
popWorkingDir()
def genAndCopyCaseList (buildCfg, generator, module, dstDir, caseListType):
caseListFile = getCaseListFileName(module, caseListType)
srcPath = getCaseListPath(buildCfg, module, caseListType)
dstPath = os.path.join(dstDir, caseListFile)
if os.path.exists(srcPath):
os.remove(srcPath)
genCaseList(buildCfg, generator, module, caseListType)
if not os.path.exists(srcPath):
raise Exception("%s not generated" % srcPath)
shutil.copyfile(srcPath, dstPath)
def parseArgs ():
parser = argparse.ArgumentParser(description = "Build test case lists",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-b",
"--build-dir",
dest="buildDir",
default=DEFAULT_BUILD_DIR,
help="Temporary build directory")
parser.add_argument("-t",
"--build-type",
dest="buildType",
default="Debug",
help="Build type")
parser.add_argument("-c",
"--deqp-target",
dest="targetName",
default=DEFAULT_TARGET,
help="dEQP build target")
parser.add_argument("--case-list-type",
dest="caseListType",
default="xml",
help="Case list type (xml, txt)")
parser.add_argument("-m",
"--modules",
dest="modules",
help="Comma-separated list of modules to update")
parser.add_argument("dst",
help="Destination directory for test case lists")
return parser.parse_args()
if __name__ == "__main__":
args = parseArgs()
generator = ANY_GENERATOR
buildCfg = getBuildConfig(args.buildDir, args.targetName, args.buildType)
modules = None
if args.modules:
modules = []
for m in args.modules.split(","):
modules.append(getModuleByName(m))
if modules:
build(buildCfg, generator, [m.binName for m in modules])
else:
build(buildCfg, generator)
modules = getBuiltModules(buildCfg)
for module in modules:
print("Generating test case list for %s" % module.name)
genAndCopyCaseList(buildCfg, generator, module, args.dst, args.caseListType)
| 1,842 |
575 |
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/command_buffer/service/shared_image_backing_gl_common.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "ui/gl/gl_gl_api_implementation.h"
namespace gpu {
SharedImageBackingGLCommon::ScopedResetAndRestoreUnpackState::
ScopedResetAndRestoreUnpackState(gl::GLApi* api,
const UnpackStateAttribs& attribs,
bool uploading_data)
: api_(api) {
if (attribs.es3_capable) {
// Need to unbind any GL_PIXEL_UNPACK_BUFFER for the nullptr in
// glTexImage2D to mean "no pixels" (as opposed to offset 0 in the
// buffer).
api_->glGetIntegervFn(GL_PIXEL_UNPACK_BUFFER_BINDING, &unpack_buffer_);
if (unpack_buffer_)
api_->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, 0);
}
if (uploading_data) {
api_->glGetIntegervFn(GL_UNPACK_ALIGNMENT, &unpack_alignment_);
if (unpack_alignment_ != 4)
api_->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, 4);
if (attribs.es3_capable || attribs.supports_unpack_subimage) {
api_->glGetIntegervFn(GL_UNPACK_ROW_LENGTH, &unpack_row_length_);
if (unpack_row_length_)
api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, 0);
api_->glGetIntegervFn(GL_UNPACK_SKIP_ROWS, &unpack_skip_rows_);
if (unpack_skip_rows_)
api_->glPixelStoreiFn(GL_UNPACK_SKIP_ROWS, 0);
api_->glGetIntegervFn(GL_UNPACK_SKIP_PIXELS, &unpack_skip_pixels_);
if (unpack_skip_pixels_)
api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, 0);
}
if (attribs.es3_capable) {
api_->glGetIntegervFn(GL_UNPACK_SKIP_IMAGES, &unpack_skip_images_);
if (unpack_skip_images_)
api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, 0);
api_->glGetIntegervFn(GL_UNPACK_IMAGE_HEIGHT, &unpack_image_height_);
if (unpack_image_height_)
api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, 0);
}
if (attribs.desktop_gl) {
api->glGetBooleanvFn(GL_UNPACK_SWAP_BYTES, &unpack_swap_bytes_);
if (unpack_swap_bytes_)
api->glPixelStoreiFn(GL_UNPACK_SWAP_BYTES, GL_FALSE);
api->glGetBooleanvFn(GL_UNPACK_LSB_FIRST, &unpack_lsb_first_);
if (unpack_lsb_first_)
api->glPixelStoreiFn(GL_UNPACK_LSB_FIRST, GL_FALSE);
}
}
}
SharedImageBackingGLCommon::ScopedResetAndRestoreUnpackState::
~ScopedResetAndRestoreUnpackState() {
if (unpack_buffer_)
api_->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, unpack_buffer_);
if (unpack_alignment_ != 4)
api_->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, unpack_alignment_);
if (unpack_row_length_)
api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, unpack_row_length_);
if (unpack_image_height_)
api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, unpack_image_height_);
if (unpack_skip_rows_)
api_->glPixelStoreiFn(GL_UNPACK_SKIP_ROWS, unpack_skip_rows_);
if (unpack_skip_images_)
api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, unpack_skip_images_);
if (unpack_skip_pixels_)
api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, unpack_skip_pixels_);
if (unpack_swap_bytes_)
api_->glPixelStoreiFn(GL_UNPACK_SWAP_BYTES, unpack_swap_bytes_);
if (unpack_lsb_first_)
api_->glPixelStoreiFn(GL_UNPACK_LSB_FIRST, unpack_lsb_first_);
}
SharedImageBackingGLCommon::ScopedRestoreTexture::ScopedRestoreTexture(
gl::GLApi* api,
GLenum target)
: api_(api), target_(target) {
GLenum get_target = GL_TEXTURE_BINDING_2D;
switch (target) {
case GL_TEXTURE_2D:
get_target = GL_TEXTURE_BINDING_2D;
break;
case GL_TEXTURE_RECTANGLE_ARB:
get_target = GL_TEXTURE_BINDING_RECTANGLE_ARB;
break;
case GL_TEXTURE_EXTERNAL_OES:
get_target = GL_TEXTURE_BINDING_EXTERNAL_OES;
break;
default:
NOTREACHED();
break;
}
GLint old_texture_binding = 0;
api->glGetIntegervFn(get_target, &old_texture_binding);
old_binding_ = old_texture_binding;
}
SharedImageBackingGLCommon::ScopedRestoreTexture::~ScopedRestoreTexture() {
api_->glBindTextureFn(target_, old_binding_);
}
std::unique_ptr<SharedImageRepresentationDawn>
SharedImageBackingGLCommon::ProduceDawnCommon(SharedImageFactory* factory,
SharedImageManager* manager,
MemoryTypeTracker* tracker,
WGPUDevice device,
SharedImageBacking* backing,
bool use_passthrough) {
DCHECK(factory);
// Make SharedContextState from factory the current context
SharedContextState* shared_context_state = factory->GetSharedContextState();
if (!shared_context_state->MakeCurrent(nullptr, true)) {
DLOG(ERROR) << "Cannot make util SharedContextState the current context";
return nullptr;
}
Mailbox dst_mailbox = Mailbox::GenerateForSharedImage();
bool success = factory->CreateSharedImage(
dst_mailbox, backing->format(), backing->size(), backing->color_space(),
kTopLeft_GrSurfaceOrigin, kPremul_SkAlphaType, gpu::kNullSurfaceHandle,
backing->usage() | SHARED_IMAGE_USAGE_WEBGPU);
if (!success) {
DLOG(ERROR) << "Cannot create a shared image resource for internal blit";
return nullptr;
}
// Create a representation for current backing to avoid non-expected release
// and using scope access methods.
std::unique_ptr<SharedImageRepresentationGLTextureBase> src_image;
std::unique_ptr<SharedImageRepresentationGLTextureBase> dst_image;
if (use_passthrough) {
src_image =
manager->ProduceGLTexturePassthrough(backing->mailbox(), tracker);
dst_image = manager->ProduceGLTexturePassthrough(dst_mailbox, tracker);
} else {
src_image = manager->ProduceGLTexture(backing->mailbox(), tracker);
dst_image = manager->ProduceGLTexture(dst_mailbox, tracker);
}
if (!src_image || !dst_image) {
DLOG(ERROR) << "ProduceDawn: Couldn't produce shared image for copy";
return nullptr;
}
std::unique_ptr<SharedImageRepresentationGLTextureBase::ScopedAccess>
source_access = src_image->BeginScopedAccess(
GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
SharedImageRepresentation::AllowUnclearedAccess::kNo);
if (!source_access) {
DLOG(ERROR) << "ProduceDawn: Couldn't access shared image for copy.";
return nullptr;
}
std::unique_ptr<SharedImageRepresentationGLTextureBase::ScopedAccess>
dest_access = dst_image->BeginScopedAccess(
GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
SharedImageRepresentation::AllowUnclearedAccess::kYes);
if (!dest_access) {
DLOG(ERROR) << "ProduceDawn: Couldn't access shared image for copy.";
return nullptr;
}
GLuint source_texture = src_image->GetTextureBase()->service_id();
GLuint dest_texture = dst_image->GetTextureBase()->service_id();
DCHECK_NE(source_texture, dest_texture);
GLenum target = dst_image->GetTextureBase()->target();
// Ensure skia's internal cache of GL context state is reset before using it.
// TODO(crbug.com/1036142): Figure out cases that need this invocation.
shared_context_state->PessimisticallyResetGrContext();
if (use_passthrough) {
gl::GLApi* gl = shared_context_state->context_state()->api();
gl->glCopySubTextureCHROMIUMFn(source_texture, 0, target, dest_texture, 0,
0, 0, 0, 0, dst_image->size().width(),
dst_image->size().height(), false, false,
false);
} else {
// TODO(crbug.com/1036142): Implement copyTextureCHROMIUM for validating
// path.
NOTREACHED();
return nullptr;
}
// Set cleared flag for internal backing to prevent auto clear.
dst_image->SetCleared();
// Safe to destroy factory's ref. The backing is kept alive by GL
// representation ref.
factory->DestroySharedImage(dst_mailbox);
return manager->ProduceDawn(dst_mailbox, tracker, device);
}
// static
void SharedImageBackingGLCommon::MakeTextureAndSetParameters(
GLenum target,
GLuint service_id,
bool framebuffer_attachment_angle,
scoped_refptr<gles2::TexturePassthrough>* passthrough_texture,
gles2::Texture** texture) {
if (!service_id) {
gl::GLApi* api = gl::g_current_gl_context;
ScopedRestoreTexture scoped_restore(api, target);
api->glGenTexturesFn(1, &service_id);
api->glBindTextureFn(target, service_id);
api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (framebuffer_attachment_angle) {
api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE,
GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
}
}
if (passthrough_texture) {
*passthrough_texture =
base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
}
if (texture) {
*texture = new gles2::Texture(service_id);
(*texture)->SetLightweightRef();
(*texture)->SetTarget(target, 1);
(*texture)->set_min_filter(GL_LINEAR);
(*texture)->set_mag_filter(GL_LINEAR);
(*texture)->set_wrap_s(GL_CLAMP_TO_EDGE);
(*texture)->set_wrap_t(GL_CLAMP_TO_EDGE);
}
}
} // namespace gpu
| 4,231 |
575 |
<gh_stars>100-1000
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/media/key_system_support_impl.h"
#include <string>
#include <vector>
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "base/token.h"
#include "content/public/browser/cdm_registry.h"
#include "content/public/browser/plugin_service.h"
#include "content/public/common/cdm_info.h"
#include "content/public/common/webplugininfo.h"
#include "content/public/test/browser_task_environment.h"
#include "media/base/decrypt_config.h"
#include "media/base/video_codecs.h"
#include "mojo/public/cpp/bindings/remote.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace content {
namespace {
using VideoCodec = media::VideoCodec;
using EncryptionScheme = media::EncryptionScheme;
using CdmSessionType = media::CdmSessionType;
const base::Token kTestCdmGuid{1234, 5678};
const char kVersion[] = "1.1.1.1";
const char kTestPath[] = "/aa/bb";
const char kTestFileSystemId[] = "file_system_id";
// Helper function to compare a STL container to an initializer_list.
template <typename Container, typename T>
bool StlEquals(const Container a, std::initializer_list<T> b) {
return a == Container(b);
}
#define EXPECT_STL_EQ(a, ...) \
do { \
EXPECT_TRUE(StlEquals(a, {__VA_ARGS__})); \
} while (false)
#define EXPECT_VIDEO_CODECS(...) \
EXPECT_STL_EQ(capability_->video_codecs, __VA_ARGS__)
#define EXPECT_ENCRYPTION_SCHEMES(...) \
EXPECT_STL_EQ(capability_->encryption_schemes, __VA_ARGS__)
#define EXPECT_SESSION_TYPES(...) \
EXPECT_STL_EQ(capability_->session_types, __VA_ARGS__)
} // namespace
class KeySystemSupportTest : public testing::Test {
protected:
void SetUp() final {
DVLOG(1) << __func__;
KeySystemSupportImpl::Create(
key_system_support_.BindNewPipeAndPassReceiver());
}
// TODO(xhwang): Add tests for hardware secure video codecs and encryption
// schemes.
CdmCapability GetTestCdmCapability() {
return CdmCapability(
{VideoCodec::kCodecVP8, VideoCodec::kCodecVP9},
{EncryptionScheme::kCenc, EncryptionScheme::kCbcs},
{CdmSessionType::kTemporary, CdmSessionType::kPersistentLicense});
}
// Registers |key_system| with |capability|. All other values for CdmInfo have
// some default value as they're not returned by IsKeySystemSupported().
void Register(const std::string& key_system, CdmCapability capability) {
DVLOG(1) << __func__;
CdmRegistry::GetInstance()->RegisterCdm(
CdmInfo(key_system, kTestCdmGuid, base::Version(kVersion),
base::FilePath::FromUTF8Unsafe(kTestPath), kTestFileSystemId,
std::move(capability), key_system, false));
}
// Determines if |key_system| is registered. If it is, updates |codecs_|
// and |persistent_|.
bool IsSupported(const std::string& key_system) {
DVLOG(1) << __func__;
bool is_available = false;
key_system_support_->IsKeySystemSupported(key_system, &is_available,
&capability_);
return is_available;
}
mojo::Remote<media::mojom::KeySystemSupport> key_system_support_;
BrowserTaskEnvironment task_environment_;
// Updated by IsSupported().
media::mojom::KeySystemCapabilityPtr capability_;
};
// Note that as CdmRegistry::GetInstance() is a static, it is shared between
// tests. So use unique key system names in each test below to avoid
// interactions between the tests.
TEST_F(KeySystemSupportTest, NoKeySystems) {
EXPECT_FALSE(IsSupported("KeySystem1"));
EXPECT_FALSE(capability_);
}
TEST_F(KeySystemSupportTest, OneKeySystem) {
Register("KeySystem2", GetTestCdmCapability());
EXPECT_TRUE(IsSupported("KeySystem2"));
EXPECT_VIDEO_CODECS(VideoCodec::kCodecVP8, VideoCodec::kCodecVP9);
EXPECT_ENCRYPTION_SCHEMES(EncryptionScheme::kCenc, EncryptionScheme::kCbcs);
EXPECT_SESSION_TYPES(CdmSessionType::kTemporary,
CdmSessionType::kPersistentLicense);
}
TEST_F(KeySystemSupportTest, MultipleKeySystems) {
Register("KeySystem3", GetTestCdmCapability());
Register("KeySystem4", GetTestCdmCapability());
EXPECT_TRUE(IsSupported("KeySystem3"));
EXPECT_TRUE(IsSupported("KeySystem4"));
}
TEST_F(KeySystemSupportTest, MissingKeySystem) {
Register("KeySystem5", GetTestCdmCapability());
EXPECT_FALSE(IsSupported("KeySystem6"));
EXPECT_FALSE(capability_);
}
} // namespace content
| 1,754 |
344 |
/**
* FXyzSampler_backup.java
*
* Copyright (c) 2013-2016, F(X)yz
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of F(X)yz, any associated website, nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL F(X)yz BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.fxyz3d.ExtrasAndTests;
import org.fxyz3d.FXyzSampleBase;
import org.fxyz3d.FXyzSample;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import javafx.application.Application;
import javafx.beans.Observable;
import javafx.beans.value.ChangeListener;
import javafx.beans.value.ObservableValue;
import javafx.geometry.Insets;
import javafx.geometry.Pos;
import javafx.geometry.Rectangle2D;
import javafx.scene.Node;
import javafx.scene.Scene;
import javafx.scene.SceneAntialiasing;
import javafx.scene.control.Label;
import javafx.scene.control.TextField;
import javafx.scene.control.TreeCell;
import javafx.scene.control.TreeItem;
import javafx.scene.control.TreeView;
import javafx.scene.layout.HBox;
import javafx.scene.layout.Priority;
import static javafx.scene.layout.Region.USE_COMPUTED_SIZE;
import static javafx.scene.layout.Region.USE_PREF_SIZE;
import javafx.scene.layout.StackPane;
import javafx.scene.layout.VBox;
import javafx.scene.paint.Color;
import javafx.stage.Screen;
import javafx.stage.Stage;
import javafx.util.Callback;
import org.fxyz3d.model.EmptySample;
import org.fxyz3d.model.Project;
import org.fxyz3d.model.SampleTree.TreeNode;
import org.fxyz3d.model.WelcomePage;
import org.fxyz3d.util.SampleScanner;
public class FXyzSampler_backup extends Application {
public static final String
GLASS_BLACK_SMOKE = FXyzSampler_backup.class.getResource("cyanBlackGlass.css").toExternalForm();
;
private Map<String, Project> projectsMap;
private Stage stage;
private HBox rootContainer;
private VBox leftContainer;
private StackPane centerContainer;
private VBox rightContainer;
private FXyzSample selectedSample;
private TreeView<FXyzSample> samplesTreeView;
private TreeItem<FXyzSample> root;
public static void main(String[] args) {
launch(args);
}
@Override
public void start(final Stage stage) throws Exception {
Application.setUserAgentStylesheet(GLASS_BLACK_SMOKE);
this.stage = stage;
// primaryStage.getIcons().add(new Image("/org/controlsfx/samples/controlsfx-logo.png"));
projectsMap = new SampleScanner().discoverSamples();
buildSampleTree(null);
rootContainer = new HBox();
// --- left hand side
leftContainer = new VBox();
leftContainer.setSpacing(3.0);
// search box
final TextField searchBox = new TextField();
searchBox.setPromptText("Search");
searchBox.getStyleClass().addAll("search-box", "fxyz3d-control");
searchBox.textProperty().addListener((Observable o) -> {
buildSampleTree(searchBox.getText());
});
searchBox.setFocusTraversable(false);
// treeview
samplesTreeView = new TreeView<>(root);
samplesTreeView.setShowRoot(false);
samplesTreeView.getStyleClass().add("samples-tree");
samplesTreeView.setMinWidth(USE_PREF_SIZE);
samplesTreeView.setMaxWidth(Double.MAX_VALUE);
samplesTreeView.setCellFactory(new Callback<TreeView<FXyzSample>, TreeCell<FXyzSample>>() {
@Override
public TreeCell<FXyzSample> call(TreeView<FXyzSample> param) {
return new TreeCell<FXyzSample>() {
@Override
protected void updateItem(FXyzSample item, boolean empty) {
super.updateItem(item, empty);
if (empty) {
setText("");
} else {
setText(item.getSampleName());
}
}
};
}
});
samplesTreeView.getSelectionModel().selectedItemProperty().addListener(new ChangeListener<TreeItem<FXyzSample>>() {
@Override
public void changed(ObservableValue<? extends TreeItem<FXyzSample>> observable, TreeItem<FXyzSample> oldValue, TreeItem<FXyzSample> newSample) {
if (newSample == null) {
return;
} else if (newSample.getValue() instanceof EmptySample) {
FXyzSample selectedSample = newSample.getValue();
Project selectedProject = projectsMap.get(selectedSample.getSampleName());
System.out.println(selectedProject);
if (selectedProject != null) {
changeToWelcomeTab(selectedProject.getWelcomePage());
}
return;
}
selectedSample = newSample.getValue();
changeSample();
}
});
samplesTreeView.setFocusTraversable(false);
samplesTreeView.getStyleClass().add("fxyz3d-control");
VBox.setVgrow(searchBox, Priority.NEVER);
VBox.setVgrow(samplesTreeView, Priority.ALWAYS);
leftContainer.getChildren().addAll(searchBox, samplesTreeView);
leftContainer.setPrefSize(USE_PREF_SIZE, USE_COMPUTED_SIZE);
HBox.setHgrow(leftContainer, Priority.SOMETIMES);
// center stack
centerContainer = new StackPane();
centerContainer.setPrefSize(USE_COMPUTED_SIZE, USE_COMPUTED_SIZE);
HBox.setHgrow(centerContainer, Priority.ALWAYS);
// by default we'll show the welcome message of first project in the tree
// if no projects are available, we'll show the default page
List<TreeItem<FXyzSample>> projects = samplesTreeView.getRoot().getChildren();
if (!projects.isEmpty()) {
TreeItem<FXyzSample> firstProject = projects.get(0);
samplesTreeView.getSelectionModel().select(firstProject);
} else {
changeToWelcomeTab(null);
}
rightContainer = new VBox();
rightContainer.setPrefSize(USE_PREF_SIZE, Double.MAX_VALUE);
HBox.setHgrow(rightContainer, Priority.SOMETIMES);
// scene root
rootContainer = new HBox(leftContainer, centerContainer, rightContainer);
rootContainer.setAlignment(Pos.CENTER);
rootContainer.setSpacing(5);
rootContainer.setPadding(new Insets(3));
rootContainer.setPrefSize(USE_COMPUTED_SIZE, USE_COMPUTED_SIZE);
rootContainer.setFillHeight(true);
// put it all together
Scene scene = new Scene(rootContainer, 1024, 800, true, SceneAntialiasing.BALANCED);
scene.setFill(Color.gray(0.6));
stage.setScene(scene);
// set width / height values to be 75% of users screen resolution
Rectangle2D screenBounds = Screen.getPrimary().getVisualBounds();
stage.setWidth(screenBounds.getWidth() * 0.75);
stage.setHeight(screenBounds.getHeight() * .75);
//stage.setMinWidth(grid.getPrefWidth());
stage.setTitle("FXyz-Sampler!");
stage.show();
rootContainer.getStyleClass().addAll("client-root");
}
private String getUserInterfaceFXML(){
String fxmlPath = "", userDir;
userDir = System.getProperty("user.dir");
return fxmlPath;
}
/*==========================================================================
* TreeView
==========================================================================*/
protected void buildSampleTree(String searchText) {
// rebuild the whole tree (it isn't memory intensive - we only scan
// classes once at startup)
root = new TreeItem<>(new EmptySample("FXyz-Sampler"));
root.setExpanded(true);
for (String projectName : projectsMap.keySet()) {
final Project project = projectsMap.get(projectName);
if (project == null) {
continue;
}
// now work through the project sample tree building the rest
TreeNode n = project.getSampleTree().getRoot();
root.getChildren().add(n.createTreeItem());
}
// with this newly built and full tree, we filter based on the search text
if (searchText != null) {
pruneSampleTree(root, searchText);
// FIXME weird bug in TreeView I think
samplesTreeView.setRoot(null);
samplesTreeView.setRoot(root);
}
// and finally we sort the display a little
sort(root, (o1, o2) -> o1.getValue().getSampleName().compareTo(o2.getValue().getSampleName()));
}
private void sort(TreeItem<FXyzSample> node, Comparator<TreeItem<FXyzSample>> comparator) {
node.getChildren().sort(comparator);
for (TreeItem<FXyzSample> child : node.getChildren()) {
sort(child, comparator);
}
}
// true == keep, false == delete
private boolean pruneSampleTree(TreeItem<FXyzSample> treeItem, String searchText) {
// we go all the way down to the leaf nodes, and check if they match
// the search text. If they do, they stay. If they don't, we remove them.
// As we pop back up we check if the branch nodes still have children,
// and if not we remove them too
if (searchText == null) {
return true;
}
if (treeItem.isLeaf()) {
// check for match. Return true if we match (to keep), and false
// to delete
return treeItem.getValue().getSampleName().toUpperCase().contains(searchText.toUpperCase());
} else {
// go down the tree...
List<TreeItem<FXyzSample>> toRemove = new ArrayList<>();
for (TreeItem<FXyzSample> child : treeItem.getChildren()) {
boolean keep = pruneSampleTree(child, searchText);
if (!keep) {
toRemove.add(child);
}
}
// remove the unrelated tree items
treeItem.getChildren().removeAll(toRemove);
// return true if there are children to this branch, false otherwise
// (by returning false we say that we should delete this now-empty branch)
return !treeItem.getChildren().isEmpty();
}
}
/*==========================================================================
* Sample Content Area
=========================================================================*/
private void changeToWelcomeTab(WelcomePage wPage) {
//change to index above 0 -> 0 will be content header overlay
centerContainer.getChildren().removeIf(index-> centerContainer.getChildren().indexOf(index) == 0 && index instanceof StackPane);
if (null == wPage) {
wPage = getDefaultWelcomePage();
}
centerContainer.getChildren().addAll(wPage.getContent());
}
private WelcomePage getDefaultWelcomePage() {
// line 1
Label welcomeLabel1 = new Label("Welcome to FXSampler!");
welcomeLabel1.setStyle("-fx-font-size: 2em; -fx-padding: 0 0 0 5;");
// line 2
Label welcomeLabel2 = new Label(
"Explore the available UI controls and other interesting projects "
+ "by clicking on the options to the left.");
welcomeLabel2.setStyle("-fx-font-size: 1.25em; -fx-padding: 0 0 0 5;");
WelcomePage wPage = new WelcomePage("Welcome!", new VBox(5, welcomeLabel1, welcomeLabel2));
return wPage;
}
protected void changeSample() {
if (selectedSample == null) {
return;
}
if (!centerContainer.getChildren().isEmpty()) {
centerContainer.getChildren().clear();
rightContainer.getChildren().clear();
}
updateTab();
}
private void updateTab() {
centerContainer.getChildren().addAll(buildSampleTabContent(selectedSample));
rightContainer.getChildren().add(selectedSample.getControlPanel());
}
/*==========================================================================
* Source Code Methods
==========================================================================*/
private String getResource(String resourceName, Class<?> baseClass) {
Class<?> clz = baseClass == null ? getClass() : baseClass;
return getResource(clz.getResourceAsStream(resourceName));
}
private String getResource(InputStream is) {
try (BufferedReader br = new BufferedReader(new InputStreamReader(is))) {
String line;
StringBuilder sb = new StringBuilder();
while ((line = br.readLine()) != null) {
sb.append(line);
sb.append("\n");
}
return sb.toString();
} catch (IOException e) {
e.printStackTrace();
return "";
}
}
private String getSourceCode(FXyzSample sample) {
String sourceURL = sample.getSampleSourceURL();
try {
// try loading via the web or local file system
URL url = new URL(sourceURL);
InputStream is = url.openStream();
return getResource(is);
} catch (IOException e) {
// no-op - the URL may not be valid, no biggy
}
return getResource(sourceURL, sample.getClass());
}
private String formatSourceCode(FXyzSample sample) {
String sourceURL = sample.getSampleSourceURL();
String src;
if (sourceURL == null) {
src = "No sample source available";
} else {
src = "Sample Source not found";
try {
src = getSourceCode(sample);
} catch (Throwable ex) {
ex.printStackTrace();
}
}
// Escape '<' by "<" to ensure correct rendering by SyntaxHighlighter
src = src.replace("<", "<");
String template = getResource("/fxsampler/util/SourceCodeTemplate.html", null);
return template.replace("<source/>", src);
}
private String formatCss(FXyzSample sample) {
String cssUrl = sample.getControlStylesheetURL();
String src;
if (cssUrl == null) {
src = "No CSS source available";
} else {
src = "Css not found";
try {
src = new String(
Files.readAllBytes(Paths.get(getClass().getResource(cssUrl).toURI()))
);
} catch (URISyntaxException | IOException ex) {
ex.printStackTrace();
}
}
// Escape '<' by "<" to ensure correct rendering by SyntaxHighlighter
src = src.replace("<", "<");
String template = getResource("/fxsampler/util/CssTemplate.html", null);
return template.replace("<source/>", src);
}
private Node buildSampleTabContent(FXyzSample sample) {
return FXyzSampleBase.buildSample(sample, stage);
}
}
| 7,064 |
10,225 |
<reponame>CraigMcDonaldCodes/quarkus
package org.jboss.resteasy.reactive.server;
import java.lang.reflect.Method;
import javax.ws.rs.container.ResourceInfo;
/**
* Type that can be injected into places where ResourceInfo can.
* The idea is that this can be used when a piece of code does not need access to the entire resource method
* (which entails a reflective lookup call), where the resource class, method name and parameter types will suffice
*/
public interface SimpleResourceInfo {
/**
* Get the resource class that is the target of a request,
*/
Class<?> getResourceClass();
/**
* Get the name of the resource method that is the target of a request
*/
String getMethodName();
/**
* Get the parameter types of the resource method that is the target of a request
*/
Class<?>[] parameterTypes();
class NullValues implements SimpleResourceInfo, ResourceInfo {
public static final NullValues INSTANCE = new NullValues();
private NullValues() {
}
@Override
public Method getResourceMethod() {
return null;
}
@Override
public Class<?> getResourceClass() {
return null;
}
@Override
public String getMethodName() {
return null;
}
@Override
public Class<?>[] parameterTypes() {
return new Class[0];
}
}
}
| 518 |
815 |
greeting = """
ParaView mini-test suite
=========================
This is a collection of verification and validate tests that
confirm that few core capabilities are working in this build.
The suite consists of several individual tests that can be launched
on their own as follows:
pvpython -m paraview.tests.verify_eyedomelighting --output /tmp/result.png
Each tests supports a '--help' option that can be used to obtain list of
available options supported by that particular test.
This root package can be used to launch all the tests one after another.
pvpython -m paraview.tests
--output_directory /tmp/outputs
--baseline_directory /tmp/baselines
"""
import argparse, textwrap, os, os.path
from .. import print_info as log
parser = argparse.ArgumentParser(
prog="paraview.tests",
description=textwrap.dedent(greeting),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--interactive", help="enable interaction", action="store_true")
parser.add_argument("-o", "--output_directory", help="output directory", type=str)
parser.add_argument("-v", "--baseline_directory", help="baseline directory (for comparison)", type=str)
def single_yes_or_no_question(question, default_no=True):
choices = ' [y/N]: ' if default_no else ' [Y/n]: '
default_answer = 'n' if default_no else 'y'
reply = str(input(question + choices)).lower().strip() or default_answer
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
else:
return False if default_no else True
def main(opts):
import importlib
tests = [ "verify_eyedomelighting", "basic_rendering" ]
if opts.output_directory:
os.makedirs(opts.output_directory, exist_ok=True)
for tname in tests:
if opts.interactive and not single_yes_or_no_question("Run test '%s'" % tname):
break
targs = []
if opts.interactive:
targs.append("-i")
if opts.output_directory:
targs.append("-o")
targs.append(os.path.join(opts.output_directory, tname + ".png"))
if opts.baseline_directory:
targs.append("-v")
targs.append(os.path.join(opts.baseline_directory, tname + ".png"))
log("start '%s'" % tname)
tmodule = importlib.import_module(".%s" % tname, __package__)
log(textwrap.wrap(tmodule.__doc__, width=30))
tmodule.main(targs)
log("done '%s'" % tname)
args = parser.parse_args()
main(args)
| 976 |
319 |
<filename>target_firmware/magpie_fw_dev/target/inc/adf_os_lock.h
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Qualcomm Atheros nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @ingroup adf_os_public
* @file adf_os_lock.h
* This file abstracts locking operations.
*/
#ifndef _ADF_OS_LOCK_H
#define _ADF_OS_LOCK_H
#include <adf_os_types.h>
#include <adf_os_lock_pvt.h>
/**
* @brief Platform spinlock object
*/
typedef __adf_os_spinlock_t adf_os_spinlock_t;
/**
* @brief Platform mutex object
*/
typedef __adf_os_mutex_t adf_os_mutex_t;
/**
* @brief Initialize a mutex
*
* @param[in] m mutex to initialize
*/
static inline void adf_os_init_mutex(adf_os_mutex_t *m)
{
__adf_os_init_mutex(m);
}
/**
* @brief Take the mutex
*
* @param[in] m mutex to take
*/
static inline int adf_os_mutex_acquire(adf_os_mutex_t *m)
{
return (__adf_os_mutex_acquire(m));
}
/**
* @brief Give the mutex
*
* @param[in] m mutex to give
*/
static inline void adf_os_mutex_release(adf_os_mutex_t *m)
{
__adf_os_mutex_release(m);
}
/**
* @brief Initialize a spinlock
*
* @param[in] lock spinlock object pointer
*/
static inline void
adf_os_spinlock_init(adf_os_spinlock_t *lock)
{
__adf_os_spinlock_init(lock);
}
/**
* @brief Acquire a spinlock by disabling the interrupts
*
* @param[in] lock spinlock object pointer
* @param[out] flags flags used to hold interrupt state
*/
static inline void
adf_os_spin_lock_irq(adf_os_spinlock_t *lock, a_uint32_t *flags)
{
__adf_os_spin_lock_irq(lock,flags);
}
/**
* @brief Release a spinlock & restore the irq
*
* @param[in] lock spinlock object pointer
* @param[in] flags flags filled in by @ref adf_os_spin_lock_irq
*/
static inline void
adf_os_spin_unlock_irq(adf_os_spinlock_t *lock, a_uint32_t *flags)
{
__adf_os_spin_unlock_irq(lock,flags);
}
/**
* @brief locks the spinlock mutex in soft irq context
*
* @param[in] lock spinlock object pointer
*/
static inline void
adf_os_spin_lock_bh(adf_os_spinlock_t *lock)
{
__adf_os_spin_lock_bh(lock);
}
/**
* @brief unlocks the spinlock mutex in soft irq context
*
* @param[in] lock spinlock object pointer
*/
static inline void
adf_os_spin_unlock_bh(adf_os_spinlock_t *lock)
{
__adf_os_spin_unlock_bh(lock);
}
/**
* @brief Execute the input function with spinlock held and interrupt disabled.
*
* @param[in] hdl OS handle
* @param[in] lock spinlock to be held for the critical region
* @param[in] func critical region function that to be executed
* @param[in] context context of the critical region function
*
* @return Boolean status returned by the critical region function
*/
static inline a_bool_t
adf_os_spinlock_irq_exec(adf_os_handle_t hdl,
adf_os_spinlock_t *lock,
adf_os_irqlocked_func_t func,
void *arg)
{
return __adf_os_spinlock_irq_exec(hdl, lock, func, arg);
}
#endif
| 1,753 |
1,223 |
<reponame>wiltonlazary/Nidium<gh_stars>1000+
# Copyright 2016 Nidium Inc. All rights reserved.
# Use of this source code is governed by a MIT license
# that can be found in the LICENSE file.
{
'targets': [{
'target_name': 'libnidiumcore-includes',
'type': 'none',
'direct_dependent_settings': {
'include_dirs': [
'<(third_party_path)/mozilla-central/js/src/obj/dist/include/',
'<(third_party_path)/mozilla-central/js/src/',
'<(third_party_path)/mozilla-central/nsprpub/dist/include/nspr/',
'<(third_party_path)/http-parser/',
'<(third_party_path)/leveldb/include/',
'<(third_party_path)/jsoncpp/dist',
'<(third_party_path)/rapidxml',
'../src/',
],
'defines': [
#'_FILE_OFFSET_BITS=64',
'__STDC_LIMIT_MACROS',
'JSGC_USE_EXACT_ROOTING'
],
'cflags': [
'-fno-rtti',
#'-fno-exceptions', # rapidxml use exception :/
'-Wno-c++0x-extensions',
'-ffunction-sections',
'-fdata-sections',
# Flags needed to silent some SM warning
'-Wno-invalid-offsetof',
'-Wno-mismatched-tags',
# Include our own js-config.h so it is automatically
# versioned for our build flavour
'-include <(nidium_output_third_party_path)/js-config.h'
],
'cflags_cc': [
'-include ../src/Macros.h'
],
'xcode_settings': {
'OTHER_CFLAGS': [
'-fno-rtti',
#'-fno-exceptions', # rapidxml use exception :/
'-Wno-c++0x-extensions',
'-Wno-invalid-offsetof',
'-Wno-mismatched-tags',
'-include <(nidium_output_third_party_path)/js-config.h',
],
'OTHER_CPLUSPLUSFLAGS': [
'$inherited',
'-include ../src/Macros.h'
],
},
'conditions': [
['nidium_product_define=="NIDIUM_PRODUCT_FRONTEND"', {
'include_dirs': [
'<(third_party_path)/skia/',
'<(third_party_path)/skia/include/core/',
'<(third_party_path)/skia/include/config/',
]}],
],
}
}, {
'target_name': 'libnidiumcore-link',
'type': 'none',
'direct_dependent_settings': {
'conditions': [
['target_os=="mac"', {
"link_settings": {
'libraries': [
'libhttp_parser.a',
'libjs_static.a',
'libnspr4.a',
'libmozglue.a',
'libleveldb.a',
]
}
}],
['target_os=="linux" or target_os=="android"', {
'ldflags': [
'-Wl,--gc-sections',
],
"link_settings": {
'libraries': [
'-ljs_static',
'-lmozglue',
'-lnspr4',
'-lpthread',
'-lrt',
'-ldl',
'-lhttp_parser',
'-lleveldb',
]
}
}]
],
},
}, {
'target_name': 'libnidiumcore',
'type': 'static_library',
'dependencies': [
'<(nidium_network_path)/gyp/network.gyp:*',
'libnidiumcore.gyp:libnidiumcore-includes',
],
'conditions': [
['target_os=="mac"', {
'defines': [
'DSO_EXTENSION=".dylib"'
],
}],
['target_os=="linux" or target_os=="android"', {
'defines': [
'DSO_EXTENSION=".so"'
]
}],
['nidium_js_disable_window_global==1', {
'defines':[
'NIDIUM_DISABLE_WINDOW_GLOBAL'
],
}]
],
'sources': [
'<(third_party_path)/jsoncpp/dist/jsoncpp.cpp',
'../src/Net/HTTP.cpp',
'../src/Net/HTTPParser.cpp',
'../src/Net/HTTPServer.cpp',
'../src/Net/HTTPStream.cpp',
'../src/Net/WebSocket.cpp',
'../src/Net/WebSocketClient.cpp',
'../src/Binding/ThreadLocalContext.cpp',
'../src/Binding/NidiumJS.cpp',
'../src/Binding/JSGlobal.cpp',
'../src/Binding/JSEvents.cpp',
'../src/Binding/JSFile.cpp',
'../src/Binding/JSHTTP.cpp',
'../src/Binding/JSHTTPServer.cpp',
'../src/Binding/JSModules.cpp',
'../src/Binding/JSSocket.cpp',
'../src/Binding/JSThread.cpp',
'../src/Binding/JSDebug.cpp',
'../src/Binding/JSDebugger.cpp',
'../src/Binding/JSConsole.cpp',
'../src/Binding/JSFS.cpp',
'../src/Binding/JSNFS.cpp',
'../src/Binding/JSProcess.cpp',
'../src/Binding/JSUtils.cpp',
'../src/Binding/JSStream.cpp',
'../src/Binding/JSWebSocket.cpp',
'../src/Binding/JSWebSocketClient.cpp',
'../src/Binding/JSDB.cpp',
'../src/Binding/JSOS.cpp',
'../src/Binding/JSVM.cpp',
'../src/Core/SharedMessages.cpp',
'../src/Core/Utils.cpp',
'../src/Core/Messages.cpp',
'../src/Core/DB.cpp',
'../src/Core/TaskManager.cpp',
'../src/Core/Path.cpp',
'../src/Core/Context.cpp',
'../src/IO/File.cpp',
'../src/IO/Stream.cpp',
'../src/IO/FileStream.cpp',
'../src/IO/NFSStream.cpp',
'../src/IO/NFS.cpp',
],
}],
}
| 3,992 |
302 |
#include "iscriptmultiworkerreportdata.h"
#include "every_cpp.h"
namespace BrowserAutomationStudioFramework
{
IScriptMultiWorkerReportData::IScriptMultiWorkerReportData(QObject *parent) :
QObject(parent)
{
}
}
| 82 |
372 |
<gh_stars>100-1000
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.docs.v1.model;
/**
* Deletes content from the document.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Google Docs API. For a detailed explanation see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class DeleteContentRangeRequest extends com.google.api.client.json.GenericJson {
/**
* The range of content to delete. Deleting text that crosses a paragraph boundary may result in
* changes to paragraph styles, lists, positioned objects and bookmarks as the two paragraphs are
* merged. Attempting to delete certain ranges can result in an invalid document structure in
* which case a 400 bad request error is returned. Some examples of invalid delete requests
* include: * Deleting one code unit of a surrogate pair. * Deleting the last newline character of
* a Body, Header, Footer, Footnote, TableCell or TableOfContents. * Deleting the start or end of
* a Table, TableOfContents or Equation without deleting the entire element. * Deleting the
* newline character before a Table, TableOfContents or SectionBreak without deleting the element.
* * Deleting individual rows or cells of a table. Deleting the content within a table cell is
* allowed.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private Range range;
/**
* The range of content to delete. Deleting text that crosses a paragraph boundary may result in
* changes to paragraph styles, lists, positioned objects and bookmarks as the two paragraphs are
* merged. Attempting to delete certain ranges can result in an invalid document structure in
* which case a 400 bad request error is returned. Some examples of invalid delete requests
* include: * Deleting one code unit of a surrogate pair. * Deleting the last newline character of
* a Body, Header, Footer, Footnote, TableCell or TableOfContents. * Deleting the start or end of
* a Table, TableOfContents or Equation without deleting the entire element. * Deleting the
* newline character before a Table, TableOfContents or SectionBreak without deleting the element.
* * Deleting individual rows or cells of a table. Deleting the content within a table cell is
* allowed.
* @return value or {@code null} for none
*/
public Range getRange() {
return range;
}
/**
* The range of content to delete. Deleting text that crosses a paragraph boundary may result in
* changes to paragraph styles, lists, positioned objects and bookmarks as the two paragraphs are
* merged. Attempting to delete certain ranges can result in an invalid document structure in
* which case a 400 bad request error is returned. Some examples of invalid delete requests
* include: * Deleting one code unit of a surrogate pair. * Deleting the last newline character of
* a Body, Header, Footer, Footnote, TableCell or TableOfContents. * Deleting the start or end of
* a Table, TableOfContents or Equation without deleting the entire element. * Deleting the
* newline character before a Table, TableOfContents or SectionBreak without deleting the element.
* * Deleting individual rows or cells of a table. Deleting the content within a table cell is
* allowed.
* @param range range or {@code null} for none
*/
public DeleteContentRangeRequest setRange(Range range) {
this.range = range;
return this;
}
@Override
public DeleteContentRangeRequest set(String fieldName, Object value) {
return (DeleteContentRangeRequest) super.set(fieldName, value);
}
@Override
public DeleteContentRangeRequest clone() {
return (DeleteContentRangeRequest) super.clone();
}
}
| 1,235 |
648 |
<gh_stars>100-1000
/*
* Copyright 2016-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.amqp.core;
import java.util.Arrays;
import java.util.Map;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
/**
* Builder providing a fluent API for building {@link Exchange}s.
*
* @author <NAME>
* @author <NAME>
*
* @since 1.6
*
*/
public final class ExchangeBuilder extends AbstractBuilder {
private final String name;
private final String type;
private boolean durable = true;
private boolean autoDelete;
private boolean internal;
private boolean delayed;
private boolean ignoreDeclarationExceptions;
private boolean declare = true;
private Object[] declaringAdmins;
/**
* Construct an instance of the appropriate type.
* @param name the exchange name
* @param type the type name
* @since 1.6.7
* @see ExchangeTypes
*/
public ExchangeBuilder(String name, String type) {
this.name = name;
this.type = type;
}
/**
* Return a {@link DirectExchange} builder.
* @param name the name.
* @return the builder.
*/
public static ExchangeBuilder directExchange(String name) {
return new ExchangeBuilder(name, ExchangeTypes.DIRECT);
}
/**
* Return a {@link TopicExchange} builder.
* @param name the name.
* @return the builder.
*/
public static ExchangeBuilder topicExchange(String name) {
return new ExchangeBuilder(name, ExchangeTypes.TOPIC);
}
/**
* Return a {@link FanoutExchange} builder.
* @param name the name.
* @return the builder.
*/
public static ExchangeBuilder fanoutExchange(String name) {
return new ExchangeBuilder(name, ExchangeTypes.FANOUT);
}
/**
* Return a {@link HeadersExchange} builder.
* @param name the name.
* @return the builder.
*/
public static ExchangeBuilder headersExchange(String name) {
return new ExchangeBuilder(name, ExchangeTypes.HEADERS);
}
/**
* Set the auto delete flag.
* @return the builder.
*/
public ExchangeBuilder autoDelete() {
this.autoDelete = true;
return this;
}
/**
* Set the durable flag.
* @param isDurable the durable flag (default true).
* @return the builder.
*/
public ExchangeBuilder durable(boolean isDurable) {
this.durable = isDurable;
return this;
}
/**
* Add an argument.
* @param key the argument key.
* @param value the argument value.
* @return the builder.
*/
public ExchangeBuilder withArgument(String key, Object value) {
getOrCreateArguments().put(key, value);
return this;
}
/**
* Add the arguments.
* @param arguments the arguments map.
* @return the builder.
*/
public ExchangeBuilder withArguments(Map<String, Object> arguments) {
this.getOrCreateArguments().putAll(arguments);
return this;
}
public ExchangeBuilder alternate(String exchange) {
return withArgument("alternate-exchange", exchange);
}
/**
* Set the internal flag.
* @return the builder.
*/
public ExchangeBuilder internal() {
this.internal = true;
return this;
}
/**
* Set the delayed flag.
* @return the builder.
*/
public ExchangeBuilder delayed() {
this.delayed = true;
return this;
}
/**
* Switch on ignore exceptions such as mismatched properties when declaring.
* @return the builder.
* @since 2.0
*/
public ExchangeBuilder ignoreDeclarationExceptions() {
this.ignoreDeclarationExceptions = true;
return this;
}
/**
* Switch to disable declaration of the exchange by any admin.
* @return the builder.
* @since 2.1
*/
public ExchangeBuilder suppressDeclaration() {
this.declare = false;
return this;
}
/**
* Admin instances, or admin bean names that should declare this exchange.
* @param admins the admins.
* @return the builder.
* @since 2.1
*/
public ExchangeBuilder admins(Object... admins) {
Assert.notNull(admins, "'admins' cannot be null");
Assert.noNullElements(admins, "'admins' can't have null elements");
this.declaringAdmins = Arrays.copyOf(admins, admins.length);
return this;
}
@SuppressWarnings("unchecked")
public <T extends Exchange> T build() {
AbstractExchange exchange;
if (ExchangeTypes.DIRECT.equals(this.type)) {
exchange = new DirectExchange(this.name, this.durable, this.autoDelete, getArguments());
}
else if (ExchangeTypes.TOPIC.equals(this.type)) {
exchange = new TopicExchange(this.name, this.durable, this.autoDelete, getArguments());
}
else if (ExchangeTypes.FANOUT.equals(this.type)) {
exchange = new FanoutExchange(this.name, this.durable, this.autoDelete, getArguments());
}
else if (ExchangeTypes.HEADERS.equals(this.type)) {
exchange = new HeadersExchange(this.name, this.durable, this.autoDelete, getArguments());
}
else {
exchange = new CustomExchange(this.name, this.type, this.durable, this.autoDelete, getArguments());
}
exchange.setInternal(this.internal);
exchange.setDelayed(this.delayed);
exchange.setIgnoreDeclarationExceptions(this.ignoreDeclarationExceptions);
exchange.setShouldDeclare(this.declare);
if (!ObjectUtils.isEmpty(this.declaringAdmins)) {
exchange.setAdminsThatShouldDeclare(this.declaringAdmins);
}
return (T) exchange;
}
}
| 1,823 |
1,442 |
<filename>apps/sequence/values/interval_parameter_controller.h
#ifndef SEQUENCE_INTERVAL_PARAM_CONTROLLER_H
#define SEQUENCE_INTERVAL_PARAM_CONTROLLER_H
#include "../../shared/interval_parameter_controller.h"
namespace Sequence {
class IntervalParameterController : public Shared::IntervalParameterController {
public:
using Shared::IntervalParameterController::IntervalParameterController;
bool setParameterAtIndex(int parameterIndex, double f) override;
};
}
#endif
| 146 |
30,023 |
"""Support for fetching Vulcan data."""
async def get_lessons(client, date_from=None, date_to=None):
"""Support for fetching Vulcan lessons."""
changes = {}
list_ans = []
async for lesson in await client.data.get_changed_lessons(
date_from=date_from, date_to=date_to
):
temp_dict = {}
_id = str(lesson.id)
temp_dict["id"] = lesson.id
temp_dict["number"] = lesson.time.position if lesson.time is not None else None
temp_dict["lesson"] = (
lesson.subject.name if lesson.subject is not None else None
)
temp_dict["room"] = lesson.room.code if lesson.room is not None else None
temp_dict["changes"] = lesson.changes
temp_dict["note"] = lesson.note
temp_dict["reason"] = lesson.reason
temp_dict["event"] = lesson.event
temp_dict["group"] = lesson.group
temp_dict["teacher"] = (
lesson.teacher.display_name if lesson.teacher is not None else None
)
temp_dict["from_to"] = (
lesson.time.displayed_time if lesson.time is not None else None
)
changes[str(_id)] = temp_dict
async for lesson in await client.data.get_lessons(
date_from=date_from, date_to=date_to
):
temp_dict = {}
temp_dict["id"] = lesson.id
temp_dict["number"] = lesson.time.position
temp_dict["time"] = lesson.time
temp_dict["date"] = lesson.date.date
temp_dict["lesson"] = (
lesson.subject.name if lesson.subject is not None else None
)
if lesson.room is not None:
temp_dict["room"] = lesson.room.code
else:
temp_dict["room"] = "-"
temp_dict["visible"] = lesson.visible
temp_dict["changes"] = lesson.changes
temp_dict["group"] = lesson.group
temp_dict["reason"] = None
temp_dict["teacher"] = (
lesson.teacher.display_name if lesson.teacher is not None else None
)
temp_dict["from_to"] = (
lesson.time.displayed_time if lesson.time is not None else None
)
if temp_dict["changes"] is None:
temp_dict["changes"] = ""
elif temp_dict["changes"].type == 1:
temp_dict["lesson"] = f"Lekcja odwołana ({temp_dict['lesson']})"
temp_dict["changes_info"] = f"Lekcja odwołana ({temp_dict['lesson']})"
if str(temp_dict["changes"].id) in changes:
temp_dict["reason"] = changes[str(temp_dict["changes"].id)]["reason"]
elif temp_dict["changes"].type == 2:
temp_dict["lesson"] = f"{temp_dict['lesson']} (Zastępstwo)"
temp_dict["teacher"] = changes[str(temp_dict["changes"].id)]["teacher"]
if str(temp_dict["changes"].id) in changes:
temp_dict["teacher"] = changes[str(temp_dict["changes"].id)]["teacher"]
temp_dict["reason"] = changes[str(temp_dict["changes"].id)]["reason"]
elif temp_dict["changes"].type == 4:
temp_dict["lesson"] = f"Lekcja odwołana ({temp_dict['lesson']})"
if str(temp_dict["changes"].id) in changes:
temp_dict["reason"] = changes[str(temp_dict["changes"].id)]["reason"]
if temp_dict["visible"]:
list_ans.append(temp_dict)
return list_ans
async def get_student_info(client, student_id):
"""Support for fetching Student info by student id."""
student_info = {}
for student in await client.get_students():
if str(student.pupil.id) == str(student_id):
student_info["first_name"] = student.pupil.first_name
if student.pupil.second_name:
student_info["second_name"] = student.pupil.second_name
student_info["last_name"] = student.pupil.last_name
student_info[
"full_name"
] = f"{student.pupil.first_name} {student.pupil.last_name}"
student_info["id"] = student.pupil.id
student_info["class"] = student.class_
student_info["school"] = student.school.name
student_info["symbol"] = student.symbol
break
return student_info
| 1,912 |
1,088 |
<reponame>amznero/graph-learn
/* Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef GRAPHLEARN_SERVICE_DIST_GRPC_SERVICE_H_
#define GRAPHLEARN_SERVICE_DIST_GRPC_SERVICE_H_
#include "graphlearn/proto/service.grpc.pb.h"
#include "graphlearn/proto/service.pb.h"
#include "grpcpp/grpcpp.h"
namespace graphlearn {
class Env;
class Executor;
class Coordinator;
class RequestFactory;
class GrpcServiceImpl : public GraphLearn::Service {
public:
GrpcServiceImpl(Env* env, Executor* executor, Coordinator* coord);
virtual ~GrpcServiceImpl();
::grpc::Status HandleOp(
::grpc::ServerContext* context,
const OpRequestPb* request,
OpResponsePb* response) override;
::grpc::Status HandleStop(
::grpc::ServerContext* context,
const StopRequestPb* request,
StatusResponsePb* response) override;
::grpc::Status HandleReport(
::grpc::ServerContext* context,
const StateRequestPb* request,
StatusResponsePb* response) override;
::grpc::Status HandleDag(
::grpc::ServerContext* context,
const DagDef* request,
StatusResponsePb* response) override;
::grpc::Status HandleDagValues(
::grpc::ServerContext* context,
const DagValuesRequestPb* request,
DagValuesResponsePb* response) override;
private:
Env* env_;
Executor* executor_;
Coordinator* coord_;
RequestFactory* factory_;
};
} // namespace graphlearn
#endif // GRAPHLEARN_SERVICE_DIST_GRPC_SERVICE_H_
| 693 |
1,338 |
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2007 <NAME> <<EMAIL>>
This program can be distributed under the terms of the GNU LGPLv2.
See the file COPYING.LIB
*/
#ifndef USERLAND_FS_FUSE_CONFIG_H
#define USERLAND_FS_FUSE_CONFIG_H
#include "fuse_api.h"
struct fuse_config {
unsigned int uid;
unsigned int gid;
unsigned int umask;
double entry_timeout;
double negative_timeout;
double attr_timeout;
double ac_attr_timeout;
int ac_attr_timeout_set;
int debug;
int hard_remove;
int use_ino;
int readdir_ino;
int set_mode;
int set_uid;
int set_gid;
int direct_io;
int kernel_cache;
int auto_cache;
int intr;
int intr_signal;
int help;
char *modules;
};
#ifdef __cplusplus
extern "C" {
#endif
int fuse_parse_lib_config_args(struct fuse_args* args,
struct fuse_config* config);
int fuse_parse_mount_config_args(struct fuse_args* args);
#ifdef __cplusplus
}
#endif
#endif // USERLAND_FS_FUSE_CONFIG_H
| 372 |
679 |
<reponame>Grosskopf/openoffice
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _CODEMAKER_DEPENDENCY_HXX_
#define _CODEMAKER_DEPENDENCY_HXX_
#include <hash_map>
#include <registry/registry.hxx>
#ifndef __REGISTRY_REFLREAD_HXX__
#include <registry/reflread.hxx>
#endif
#include <codemaker/typemanager.hxx>
#include <codemaker/global.hxx>
#include <osl/diagnose.h>
#define TYPEUSE_NORMAL 0x0001
#define TYPEUSE_SUPER 0x0002
#define TYPEUSE_MEMBER 0x0004
#define TYPEUSE_INPARAM 0x0008
#define TYPEUSE_OUTPARAM 0x0010
#define TYPEUSE_INOUTPARAM 0x0020
#define TYPEUSE_RETURN 0x0040
#define TYPEUSE_EXCEPTION 0x0080
#define TYPEUSE_SCOPE 0x0100
/**
* Flag shows the state of the code generation. If the Flag is set
* the code for this type is generated.
*/
#define CODEGEN_DEFAULT 0x0001
struct TypeUsing
{
TypeUsing(const ::rtl::OString& type, sal_uInt16 use)
: m_type(type)
, m_use(use)
{}
::rtl::OString m_type;
sal_uInt16 m_use;
sal_Bool operator == (const TypeUsing & typeUsing) const
{
OSL_ASSERT(0);
return m_type == typeUsing.m_type && m_use == typeUsing.m_use;
}
};
struct LessTypeUsing
{
sal_Bool operator()(const TypeUsing& tuse1, const TypeUsing& tuse2) const
{
return (tuse1.m_type < tuse2.m_type);
}
};
typedef ::std::set< TypeUsing, LessTypeUsing > TypeUsingSet;
#if (defined( _MSC_VER ) && ( _MSC_VER < 1200 ))
typedef ::std::__hash_map__
<
::rtl::OString,
TypeUsingSet,
HashString,
EqualString,
NewAlloc
> DependencyMap;
typedef ::std::__hash_map__
<
::rtl::OString,
sal_uInt16,
HashString,
EqualString,
NewAlloc
> GenerationMap;
#else
typedef ::std::hash_map
<
::rtl::OString,
TypeUsingSet,
HashString,
EqualString
> DependencyMap;
typedef ::std::hash_map
<
::rtl::OString,
sal_uInt16,
HashString,
EqualString
> GenerationMap;
#endif
struct TypeDependencyImpl
{
TypeDependencyImpl()
: m_refCount(0)
{}
sal_Int32 m_refCount;
DependencyMap m_dependencies;
GenerationMap m_generatedTypes;
};
class TypeDependency
{
public:
TypeDependency();
~TypeDependency();
TypeDependency( const TypeDependency& value )
: m_pImpl( value.m_pImpl )
{
acquire();
}
TypeDependency& operator = ( const TypeDependency& value )
{
release();
m_pImpl = value.m_pImpl;
acquire();
return *this;
}
sal_Bool insert(const ::rtl::OString& type, const ::rtl::OString& depend, sal_uInt16);
TypeUsingSet getDependencies(const ::rtl::OString& type);
sal_Bool hasDependencies(const ::rtl::OString& type);
void setGenerated(const ::rtl::OString& type, sal_uInt16 genFlag=CODEGEN_DEFAULT);
sal_Bool isGenerated(const ::rtl::OString& type, sal_uInt16 genFlag=CODEGEN_DEFAULT);
sal_Int32 getSize() { return m_pImpl->m_generatedTypes.size(); }
protected:
void acquire();
void release();
protected:
TypeDependencyImpl* m_pImpl;
};
sal_Bool checkTypeDependencies(TypeManager& typeMgr, TypeDependency& dependencies, const ::rtl::OString& type, sal_Bool bDepend = sal_False);
#endif // _CODEMAKER_DEPENDENCY_HXX_
| 1,504 |
14,668 |
<gh_stars>1000+
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/constants/ash_features.h"
#include "ash/constants/ash_pref_names.h"
#include "ash/public/cpp/shelf_prefs.h"
#include "base/notreached.h"
#include "base/strings/string_util.h"
#include "base/values.h"
#include "chrome/browser/sync/test/integration/preferences_helper.h"
#include "chrome/browser/sync/test/integration/sync_consent_optional_sync_test.h"
#include "chrome/browser/sync/test/integration/sync_settings_categorization_sync_test.h"
#include "chrome/browser/sync/test/integration/sync_test.h"
#include "chrome/browser/sync/test/integration/updated_progress_marker_checker.h"
#include "chrome/common/pref_names.h"
#include "components/prefs/pref_service.h"
#include "components/sync/base/model_type.h"
#include "components/sync/driver/sync_service.h"
#include "components/sync/driver/sync_user_settings.h"
#include "components/sync/protocol/entity_specifics.pb.h"
#include "content/public/test/browser_test.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
using preferences_helper::ChangeStringPref;
using preferences_helper::GetPrefs;
using testing::Eq;
class SingleClientOsPreferencesSyncTest
: public SyncSettingsCategorizationSyncTest {
public:
SingleClientOsPreferencesSyncTest()
: SyncSettingsCategorizationSyncTest(SINGLE_CLIENT) {}
~SingleClientOsPreferencesSyncTest() override = default;
};
IN_PROC_BROWSER_TEST_F(SingleClientOsPreferencesSyncTest, Sanity) {
ASSERT_TRUE(chromeos::features::IsSyncSettingsCategorizationEnabled());
ASSERT_TRUE(SetupSync()) << "SetupSync() failed.";
// Shelf alignment is a Chrome OS only preference.
ChangeStringPref(/*profile_index=*/0, ash::prefs::kShelfAlignment,
ash::kShelfAlignmentRight);
EXPECT_TRUE(UpdatedProgressMarkerChecker(GetSyncService(0)).Wait());
EXPECT_THAT(GetPrefs(/*index=*/0)->GetString(ash::prefs::kShelfAlignment),
Eq(ash::kShelfAlignmentRight));
}
class SingleClientOsPreferencesOptionalConsentSyncTest
: public SyncConsentOptionalSyncTest {
public:
SingleClientOsPreferencesOptionalConsentSyncTest()
: SyncConsentOptionalSyncTest(SINGLE_CLIENT) {}
~SingleClientOsPreferencesOptionalConsentSyncTest() override = default;
};
IN_PROC_BROWSER_TEST_F(SingleClientOsPreferencesOptionalConsentSyncTest,
DisablingOsSyncFeatureDisablesDataType) {
ASSERT_TRUE(chromeos::features::IsSyncConsentOptionalEnabled());
ASSERT_TRUE(SetupSync());
syncer::SyncService* service = GetSyncService(0);
syncer::SyncUserSettings* settings = service->GetUserSettings();
EXPECT_TRUE(settings->IsOsSyncFeatureEnabled());
EXPECT_TRUE(service->GetActiveDataTypes().Has(syncer::OS_PREFERENCES));
settings->SetOsSyncFeatureEnabled(false);
EXPECT_FALSE(settings->IsOsSyncFeatureEnabled());
EXPECT_FALSE(service->GetActiveDataTypes().Has(syncer::OS_PREFERENCES));
}
class SyncCategorizationBaseTest : public SyncTest {
public:
SyncCategorizationBaseTest() : SyncTest(SyncTest::SINGLE_CLIENT) {}
protected:
static std::string ConvertToSyncedPrefValue(const base::Value& value) {
const std::string res = value.DebugString();
const base::StringPiece trimmed =
base::TrimWhitespaceASCII(res, base::TrimPositions::TRIM_ALL);
return std::string(trimmed);
}
sync_pb::PreferenceSpecifics* GetPreferenceSpecifics(
syncer::ModelType model_type,
sync_pb::EntitySpecifics& specifics) {
switch (model_type) {
case syncer::ModelType::PREFERENCES:
return specifics.mutable_preference();
case syncer::ModelType::PRIORITY_PREFERENCES:
return specifics.mutable_priority_preference()->mutable_preference();
case syncer::ModelType::OS_PREFERENCES:
return specifics.mutable_os_preference()->mutable_preference();
case syncer::ModelType::OS_PRIORITY_PREFERENCES:
return specifics.mutable_os_priority_preference()->mutable_preference();
default:
NOTREACHED();
return specifics.mutable_preference();
}
}
void InjectPreferenceToFakeServer(syncer::ModelType model_type,
const char* name,
const base::Value& value) {
sync_pb::EntitySpecifics specifics;
sync_pb::PreferenceSpecifics* preference_specifics =
GetPreferenceSpecifics(model_type, specifics);
preference_specifics->set_name(name);
preference_specifics->set_value(ConvertToSyncedPrefValue(value));
GetFakeServer()->InjectEntity(
syncer::PersistentUniqueClientEntity::CreateFromSpecificsForTesting(
/*non_unique_name=*/name,
/*client_tag=*/name, specifics,
/*creation_time=*/0, /*last_modified_time=*/0));
}
const char* const kOsPreferenceKey = ash::prefs::kShelfAutoHideBehavior;
const base::Value kOsPreferenceNewValue =
base::Value(ash::kShelfAutoHideBehaviorAlways);
const char* const kOsPriorityPreferenceKey = ::prefs::kTapToClickEnabled;
const base::Value kOsPriorityPreferenceNewValue = base::Value(false);
base::test::ScopedFeatureList features_;
};
class SyncCategorizationEnabledTest : public SyncCategorizationBaseTest {
public:
SyncCategorizationEnabledTest() {
features_.InitAndEnableFeature(ash::features::kSyncSettingsCategorization);
}
};
class SyncCategorizationDisabledTest : public SyncCategorizationBaseTest {
public:
SyncCategorizationDisabledTest() {
features_.InitAndDisableFeature(ash::features::kSyncSettingsCategorization);
}
};
// OS preferences should sync from the new clients as both preferences and OS
// preferences.
IN_PROC_BROWSER_TEST_F(SyncCategorizationEnabledTest,
OSPreferencesSyncAsBothTypes) {
ASSERT_TRUE(SetupClients()) << "SetupClients() failed.";
ASSERT_NE(*GetPrefs(0)->Get(kOsPreferenceKey), kOsPreferenceNewValue);
ASSERT_NE(*GetPrefs(0)->Get(kOsPriorityPreferenceKey),
kOsPriorityPreferenceNewValue);
ASSERT_TRUE(SetupSync());
GetPrefs(/*index=*/0)->Set(kOsPreferenceKey, kOsPreferenceNewValue);
GetPrefs(/*index=*/0)
->Set(kOsPriorityPreferenceKey, kOsPriorityPreferenceNewValue);
// OS preferences are syncing both as OS_PREFERENCES and PREFERENCES to
// support sync to the old clients.
EXPECT_TRUE(FakeServerPrefMatchesValueChecker(
syncer::ModelType::OS_PREFERENCES, kOsPreferenceKey,
ConvertToSyncedPrefValue(kOsPreferenceNewValue))
.Wait());
EXPECT_TRUE(FakeServerPrefMatchesValueChecker(
syncer::ModelType::PREFERENCES, kOsPreferenceKey,
ConvertToSyncedPrefValue(kOsPreferenceNewValue))
.Wait());
// Same with OS priority preferences.
EXPECT_TRUE(FakeServerPrefMatchesValueChecker(
syncer::ModelType::OS_PRIORITY_PREFERENCES,
kOsPriorityPreferenceKey,
ConvertToSyncedPrefValue(kOsPriorityPreferenceNewValue))
.Wait());
EXPECT_TRUE(FakeServerPrefMatchesValueChecker(
syncer::ModelType::PRIORITY_PREFERENCES,
kOsPriorityPreferenceKey,
ConvertToSyncedPrefValue(kOsPriorityPreferenceNewValue))
.Wait());
}
// Old clients should get synced prefs.
IN_PROC_BROWSER_TEST_F(SyncCategorizationDisabledTest, ReceiveSyncedOSPrefs) {
InjectPreferenceToFakeServer(syncer::PREFERENCES, kOsPreferenceKey,
kOsPreferenceNewValue);
InjectPreferenceToFakeServer(syncer::OS_PREFERENCES, kOsPreferenceKey,
kOsPreferenceNewValue);
InjectPreferenceToFakeServer(syncer::PRIORITY_PREFERENCES,
kOsPriorityPreferenceKey,
kOsPriorityPreferenceNewValue);
InjectPreferenceToFakeServer(syncer::OS_PRIORITY_PREFERENCES,
kOsPriorityPreferenceKey,
kOsPriorityPreferenceNewValue);
ASSERT_TRUE(SetupSync()) << "SetupSync() failed.";
EXPECT_EQ(*GetPrefs(/*index=*/0)->Get(kOsPreferenceKey),
kOsPreferenceNewValue);
EXPECT_EQ(*GetPrefs(/*index=*/0)->Get(kOsPriorityPreferenceKey),
kOsPriorityPreferenceNewValue);
}
// OS preferences are syncing only as browser prefs on the old clients.
IN_PROC_BROWSER_TEST_F(SyncCategorizationDisabledTest,
OSPreferencesSyncOnlyAsBrowserPrefs) {
ASSERT_TRUE(SetupClients()) << "SetupClients() failed.";
ASSERT_NE(*GetPrefs(0)->Get(kOsPreferenceKey), kOsPreferenceNewValue);
ASSERT_NE(*GetPrefs(0)->Get(kOsPriorityPreferenceKey),
kOsPriorityPreferenceNewValue);
ASSERT_TRUE(SetupSync());
GetPrefs(/*index=*/0)->Set(kOsPreferenceKey, kOsPreferenceNewValue);
GetPrefs(/*index=*/0)
->Set(kOsPriorityPreferenceKey, kOsPriorityPreferenceNewValue);
EXPECT_TRUE(FakeServerPrefMatchesValueChecker(
syncer::ModelType::PREFERENCES, kOsPreferenceKey,
ConvertToSyncedPrefValue(kOsPreferenceNewValue))
.Wait());
EXPECT_FALSE(
preferences_helper::GetPreferenceInFakeServer(
syncer::ModelType::OS_PREFERENCES, kOsPreferenceKey, GetFakeServer())
.has_value());
EXPECT_TRUE(FakeServerPrefMatchesValueChecker(
syncer::ModelType::PRIORITY_PREFERENCES,
kOsPriorityPreferenceKey,
ConvertToSyncedPrefValue(kOsPriorityPreferenceNewValue))
.Wait());
EXPECT_FALSE(preferences_helper::GetPreferenceInFakeServer(
syncer::ModelType::OS_PRIORITY_PREFERENCES, kOsPreferenceKey,
GetFakeServer())
.has_value());
}
// OS preferences are not getting synced from the browser prefs on the new
// clients.
IN_PROC_BROWSER_TEST_F(SyncCategorizationEnabledTest,
DontReceiveSyncedOSPrefsFromOldClients) {
InjectPreferenceToFakeServer(syncer::PREFERENCES, kOsPreferenceKey,
kOsPreferenceNewValue);
InjectPreferenceToFakeServer(syncer::PRIORITY_PREFERENCES,
kOsPriorityPreferenceKey,
kOsPriorityPreferenceNewValue);
ASSERT_TRUE(SetupSync()) << "SetupSync() failed.";
EXPECT_TRUE(GetPrefs(/*index=*/0)
->FindPreference(kOsPreferenceKey)
->IsDefaultValue());
EXPECT_TRUE(GetPrefs(/*index=*/0)
->FindPreference(kOsPriorityPreferenceKey)
->IsDefaultValue());
}
} // namespace
| 4,379 |
1,402 |
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: <NAME>, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Similarity3.h
* @brief Implementation of Similarity3 transform
* @author <NAME>
* @author <NAME>
*/
#pragma once
#include <gtsam/geometry/Rot3.h>
#include <gtsam/geometry/Point3.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/base/Lie.h>
#include <gtsam/base/Manifold.h>
#include <gtsam/dllexport.h>
namespace gtsam {
// Forward declarations
class Pose3;
/**
* 3D similarity transform
*/
class Similarity3: public LieGroup<Similarity3, 7> {
/// @name Pose Concept
/// @{
typedef Rot3 Rotation;
typedef Point3 Translation;
/// @}
private:
Rot3 R_;
Point3 t_;
double s_;
public:
/// @name Constructors
/// @{
/// Default constructor
GTSAM_EXPORT Similarity3();
/// Construct pure scaling
GTSAM_EXPORT Similarity3(double s);
/// Construct from GTSAM types
GTSAM_EXPORT Similarity3(const Rot3& R, const Point3& t, double s);
/// Construct from Eigen types
GTSAM_EXPORT Similarity3(const Matrix3& R, const Vector3& t, double s);
/// Construct from matrix [R t; 0 s^-1]
GTSAM_EXPORT Similarity3(const Matrix4& T);
/// @}
/// @name Testable
/// @{
/// Compare with tolerance
GTSAM_EXPORT bool equals(const Similarity3& sim, double tol) const;
/// Exact equality
GTSAM_EXPORT bool operator==(const Similarity3& other) const;
/// Print with optional string
GTSAM_EXPORT void print(const std::string& s) const;
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Similarity3& p);
/// @}
/// @name Group
/// @{
/// Return an identity transform
GTSAM_EXPORT static Similarity3 identity();
/// Composition
GTSAM_EXPORT Similarity3 operator*(const Similarity3& S) const;
/// Return the inverse
GTSAM_EXPORT Similarity3 inverse() const;
/// @}
/// @name Group action on Point3
/// @{
/// Action on a point p is s*(R*p+t)
GTSAM_EXPORT Point3 transformFrom(const Point3& p, //
OptionalJacobian<3, 7> H1 = boost::none, //
OptionalJacobian<3, 3> H2 = boost::none) const;
/**
* Action on a pose T.
* |Rs ts| |R t| |Rs*R Rs*t+ts|
* |0 1/s| * |0 1| = | 0 1/s |, the result is still a Sim3 object.
* To retrieve a Pose3, we normalized the scale value into 1.
* |Rs*R Rs*t+ts| |Rs*R s(Rs*t+ts)|
* | 0 1/s | = | 0 1 |
*
* This group action satisfies the compatibility condition.
* For more details, refer to: https://en.wikipedia.org/wiki/Group_action
*/
GTSAM_EXPORT Pose3 transformFrom(const Pose3& T) const;
/** syntactic sugar for transformFrom */
GTSAM_EXPORT Point3 operator*(const Point3& p) const;
/**
* Create Similarity3 by aligning at least three point pairs
*/
GTSAM_EXPORT static Similarity3 Align(const std::vector<Point3Pair>& abPointPairs);
/**
* Create the Similarity3 object that aligns at least two pose pairs.
* Each pair is of the form (aTi, bTi).
* Given a list of pairs in frame a, and a list of pairs in frame b, Align()
* will compute the best-fit Similarity3 aSb transformation to align them.
* First, the rotation aRb will be computed as the average (Karcher mean) of
* many estimates aRb (from each pair). Afterwards, the scale factor will be computed
* using the algorithm described here:
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
*/
GTSAM_EXPORT static Similarity3 Align(const std::vector<Pose3Pair>& abPosePairs);
/// @}
/// @name Lie Group
/// @{
/** Log map at the identity
* \f$ [R_x,R_y,R_z, t_x, t_y, t_z, \lambda] \f$
*/
GTSAM_EXPORT static Vector7 Logmap(const Similarity3& s, //
OptionalJacobian<7, 7> Hm = boost::none);
/** Exponential map at the identity
*/
GTSAM_EXPORT static Similarity3 Expmap(const Vector7& v, //
OptionalJacobian<7, 7> Hm = boost::none);
/// Chart at the origin
struct ChartAtOrigin {
static Similarity3 Retract(const Vector7& v, ChartJacobian H = boost::none) {
return Similarity3::Expmap(v, H);
}
static Vector7 Local(const Similarity3& other, ChartJacobian H = boost::none) {
return Similarity3::Logmap(other, H);
}
};
using LieGroup<Similarity3, 7>::inverse;
/**
* wedge for Similarity3:
* @param xi 7-dim twist (w,u,lambda) where
* @return 4*4 element of Lie algebra that can be exponentiated
* TODO(frank): rename to Hat, make part of traits
*/
GTSAM_EXPORT static Matrix4 wedge(const Vector7& xi);
/// Project from one tangent space to another
GTSAM_EXPORT Matrix7 AdjointMap() const;
/// @}
/// @name Standard interface
/// @{
/// Calculate 4*4 matrix group equivalent
GTSAM_EXPORT const Matrix4 matrix() const;
/// Return a GTSAM rotation
const Rot3& rotation() const {
return R_;
}
/// Return a GTSAM translation
const Point3& translation() const {
return t_;
}
/// Return the scale
double scale() const {
return s_;
}
/// Convert to a rigid body pose (R, s*t)
/// TODO(frank): why is this here? Red flag! Definitely don't have it as a cast.
GTSAM_EXPORT operator Pose3() const;
/// Dimensionality of tangent space = 7 DOF - used to autodetect sizes
inline static size_t Dim() {
return 7;
}
/// Dimensionality of tangent space = 7 DOF
inline size_t dim() const {
return 7;
}
/// @}
/// @name Helper functions
/// @{
private:
/// Calculate expmap and logmap coefficients.
static Matrix3 GetV(Vector3 w, double lambda);
/// @}
};
template<>
inline Matrix wedge<Similarity3>(const Vector& xi) {
return Similarity3::wedge(xi);
}
template<>
struct traits<Similarity3> : public internal::LieGroup<Similarity3> {};
template<>
struct traits<const Similarity3> : public internal::LieGroup<Similarity3> {};
} // namespace gtsam
| 2,156 |
402 |
<gh_stars>100-1000
/*
* Copyright 2000-2021 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.flow.dom;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
public class StyleUtilTest {
private static final Map<String, String> stylepPropertyToAttribute = new java.util.HashMap<>();
static {
stylepPropertyToAttribute.put("width", "width");
stylepPropertyToAttribute.put("borderRadius", "border-radius");
stylepPropertyToAttribute.put("webkitBorderRadius",
"-webkit-border-radius");
stylepPropertyToAttribute.put("mozBorderRadius", "-moz-border-radius");
stylepPropertyToAttribute.put("msUserSelect", "-ms-user-select");
stylepPropertyToAttribute.put("oUserSelect", "-o-user-select");
}
@Test
public void attributeToProperty() {
stylepPropertyToAttribute.entrySet().forEach((entry) -> {
String property = entry.getKey();
String attribute = entry.getValue();
Assert.assertEquals(property,
StyleUtil.styleAttributeToProperty(attribute));
});
}
@Test
public void propertyToAttribute() {
stylepPropertyToAttribute.entrySet().forEach((entry) -> {
String property = entry.getKey();
String attribute = entry.getValue();
Assert.assertEquals(attribute,
StyleUtil.stylePropertyToAttribute(property));
});
}
}
| 721 |
488 |
// Example code from Xen.
#define ADD_BRACE 0
int foobar()
{
if (1)
{
switch (42)
{
default:
#if ADD_BRACE
{
#endif
return ( {
int x;
x = 0;
} );
#if ADD_BRACE
}
#endif
}
}
#if 1
else
{
}
#endif
return 0;
}
| 338 |
1,031 |
<reponame>kyletanyag/LL-Smartcard
#include "ccache_swig_config.h"
#define CCACHE_VERSION SWIG_VERSION
#ifndef _WIN32
#include "config.h"
#else
#include <sys/locking.h>
#include "config_win32.h"
#endif
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#ifndef _WIN32
#include <sys/wait.h>
#include <sys/mman.h>
#else
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x0500
#endif
#include <windows.h>
#include <shlobj.h>
#endif
#include <sys/file.h>
#include <fcntl.h>
#include <time.h>
#include <string.h>
#include <ctype.h>
#include <utime.h>
#include <stdarg.h>
#include <dirent.h>
#include <limits.h>
#ifdef HAVE_PWD_H
#include <pwd.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef ENABLE_ZLIB
#include <zlib.h>
#endif
#define STATUS_NOTFOUND 3
#define STATUS_FATAL 4
#define STATUS_NOCACHE 5
#define MYNAME PROGRAM_NAME
#define LIMIT_MULTIPLE 0.8
/* default maximum cache size */
#ifndef DEFAULT_MAXSIZE
#define DEFAULT_MAXSIZE (1000*1000)
#endif
/* file copy mode */
#ifdef ENABLE_ZLIB
#define COPY_UNCOMPRESSED 0
#define COPY_FROM_CACHE 1
#define COPY_TO_CACHE 2
#endif
enum stats {
STATS_NONE=0,
STATS_STDOUT,
STATS_STATUS,
STATS_ERROR,
STATS_TOCACHE,
STATS_PREPROCESSOR,
STATS_COMPILER,
STATS_MISSING,
STATS_CACHED,
STATS_ARGS,
STATS_LINK,
STATS_NUMFILES,
STATS_TOTALSIZE,
STATS_MAXFILES,
STATS_MAXSIZE,
STATS_NOTC,
STATS_DEVICE,
STATS_NOINPUT,
STATS_ENVIRONMMENT,
STATS_MULTIPLE,
STATS_CONFTEST,
STATS_UNSUPPORTED,
STATS_OUTSTDOUT,
STATS_END
};
typedef unsigned uint32;
#include "mdfour.h"
void hash_start(void);
void hash_string(const char *s);
void hash_int(int x);
void hash_file(const char *fname);
char *hash_result(void);
void hash_buffer(const char *s, int len);
void cc_log(const char *format, ...);
void fatal(const char *msg);
void copy_fd(int fd_in, int fd_out);
int safe_rename(const char* oldpath, const char* newpath);
int move_file(const char *src, const char *dest);
int test_if_compressed(const char *filename);
int commit_to_cache(const char *src, const char *dest, int hardlink);
int retrieve_from_cache(const char *src, const char *dest, int hardlink);
int create_dir(const char *dir);
int create_cachedirtag(const char *dir);
void x_asprintf(char **ptr, const char *format, ...);
char *x_strdup(const char *s);
void *x_realloc(void *ptr, size_t size);
void *x_malloc(size_t size);
void traverse(const char *dir, void (*fn)(const char *, struct stat *));
char *str_basename(const char *s);
char *dirname(char *s);
int lock_fd(int fd);
size_t file_size(struct stat *st);
int safe_open(const char *fname);
char *x_realpath(const char *path);
char *gnu_getcwd(void);
int create_empty_file(const char *fname);
const char *get_home_directory(void);
int x_utimes(const char *filename);
#ifdef _WIN32
void perror_win32(LPTSTR pszFunction);
#endif
void stats_update(enum stats stat);
void stats_zero(void);
void stats_summary(void);
void stats_tocache(size_t size, size_t numfiles);
void stats_read(const char *stats_file, unsigned counters[STATS_END]);
int stats_set_limits(long maxfiles, long maxsize);
size_t value_units(const char *s);
void display_size(unsigned v);
void stats_set_sizes(const char *dir, size_t num_files, size_t total_size);
int unify_hash(const char *fname);
#ifndef HAVE_VASPRINTF
int vasprintf(char **, const char *, va_list );
#endif
#ifndef HAVE_ASPRINTF
int asprintf(char **ptr, const char *format, ...);
#endif
#ifndef HAVE_SNPRINTF
int snprintf(char *,size_t ,const char *, ...);
#endif
void cleanup_dir(const char *dir, size_t maxfiles, size_t maxsize, size_t minfiles);
void cleanup_all(const char *dir);
void wipe_all(const char *dir);
#ifdef _WIN32
char *argvtos(char **argv);
#endif
int execute(char **argv,
const char *path_stdout,
const char *path_stderr);
char *find_executable(const char *name, const char *exclude_name);
void display_execute_args(char **argv);
typedef struct {
char **argv;
int argc;
} ARGS;
ARGS *args_init(int , char **);
void args_add(ARGS *args, const char *s);
void args_add_prefix(ARGS *args, const char *s);
void args_pop(ARGS *args, int n);
void args_strip(ARGS *args, const char *prefix);
void args_remove_first(ARGS *args);
extern int ccache_verbose;
#if HAVE_COMPAR_FN_T
#define COMPAR_FN_T __compar_fn_t
#else
typedef int (*COMPAR_FN_T)(const void *, const void *);
#endif
/* work with silly DOS binary open */
#ifndef O_BINARY
#define O_BINARY 0
#endif
/* mkstemp() on some versions of cygwin don't handle binary files, so
override */
/* Seems okay in Cygwin 1.7.0
#ifdef __CYGWIN__
#undef HAVE_MKSTEMP
#endif
*/
| 1,879 |
489 |
<reponame>TheBoringBakery/Riot-Watcher
from unittest.mock import MagicMock
import pytest
from riotwatcher._apis.team_fight_tactics import SummonerApi
@pytest.mark.tft
@pytest.mark.unit
class TestSummonerApi:
def test_by_account(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
encrypted_account_id = "15asf2-54321"
ret = summoner.by_account(region, encrypted_account_id)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_account.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/by-account/{encrypted_account_id}",
{},
)
assert ret is expected_return
def test_by_name(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
summoner_name = "pseudonym117"
ret = summoner.by_name(region, summoner_name)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_name.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/by-name/{summoner_name}",
{},
)
assert ret is expected_return
def test_by_puuid(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
puuid = "15462gsfg321"
ret = summoner.by_puuid(region, puuid)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_puuid.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/by-puuid/{puuid}",
{},
)
assert ret is expected_return
def test_by_id(self):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
summoner = SummonerApi(mock_base_api)
region = "afas"
encrypted_summoner_id = "sdfgasg222"
ret = summoner.by_id(region, encrypted_summoner_id)
mock_base_api.raw_request.assert_called_once_with(
SummonerApi.__name__,
summoner.by_id.__name__,
region,
f"https://{region}.api.riotgames.com/tft/summoner/v1/summoners/{encrypted_summoner_id}",
{},
)
assert ret is expected_return
| 1,382 |
1,418 |
package aima.core.logic.fol.inference.proof;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import aima.core.logic.fol.kb.data.Chain;
import aima.core.logic.fol.parsing.ast.Term;
import aima.core.logic.fol.parsing.ast.Variable;
/**
* @author <NAME>
*
*/
public class ProofStepChainCancellation extends AbstractProofStep {
private List<ProofStep> predecessors = new ArrayList<ProofStep>();
private Chain cancellation = null;
private Chain cancellationOf = null;
private Map<Variable, Term> subst = null;
public ProofStepChainCancellation(Chain cancellation, Chain cancellationOf,
Map<Variable, Term> subst) {
this.cancellation = cancellation;
this.cancellationOf = cancellationOf;
this.subst = subst;
this.predecessors.add(cancellationOf.getProofStep());
}
//
// START-ProofStep
@Override
public List<ProofStep> getPredecessorSteps() {
return Collections.unmodifiableList(predecessors);
}
@Override
public String getProof() {
return cancellation.toString();
}
@Override
public String getJustification() {
return "Cancellation: " + cancellationOf.getProofStep().getStepNumber()
+ " " + subst;
}
// END-ProofStep
//
}
| 467 |
348 |
<filename>docs/data/t2/076/76694.json<gh_stars>100-1000
{"nom":"Tocqueville-en-Caux","dpt":"Seine-Maritime","inscrits":113,"abs":18,"votants":95,"blancs":7,"nuls":3,"exp":85,"res":[{"panneau":"2","voix":52},{"panneau":"1","voix":33}]}
| 104 |
348 |
{"nom":"Saint-Roch","circ":"5ème circonscription","dpt":"Indre-et-Loire","inscrits":953,"abs":500,"votants":453,"blancs":48,"nuls":7,"exp":398,"res":[{"nuance":"REM","nom":"<NAME>","voix":248},{"nuance":"LR","nom":"<NAME>","voix":150}]}
| 94 |
5,964 |
#ifndef V8_TORQUE_ARGUMENTS_FROM_DSL_BASE_H__
#define V8_TORQUE_ARGUMENTS_FROM_DSL_BASE_H__
#include "src/compiler/code-assembler.h"
#include "src/code-stub-assembler.h"
#include "src/utils.h"
#include "torque-generated/class-definitions-from-dsl.h"
namespace v8 {
namespace internal {
class ArgumentsBuiltinsFromDSLAssembler {
public:
explicit ArgumentsBuiltinsFromDSLAssembler(compiler::CodeAssemblerState* state) : state_(state), ca_(state) { USE(state_, ca_); }
struct ArgumentsInfo {
compiler::TNode<RawPtrT> frame;
compiler::TNode<BInt> argument_count;
compiler::TNode<BInt> formal_parameter_count;
std::tuple<compiler::TNode<RawPtrT>, compiler::TNode<BInt>, compiler::TNode<BInt>> Flatten() const {
return std::tuple_cat(std::make_tuple(frame), std::make_tuple(argument_count), std::make_tuple(formal_parameter_count));
}
};
ArgumentsBuiltinsFromDSLAssembler::ArgumentsInfo GetArgumentsFrameAndCount(compiler::TNode<Context> p_context, compiler::TNode<JSFunction> p_f);
private:
compiler::CodeAssemblerState* const state_;
compiler::CodeAssembler ca_;
};
} // namespace internal
} // namespace v8
#endif // V8_TORQUE_ARGUMENTS_FROM_DSL_BASE_H__
| 465 |
678 |
<gh_stars>100-1000
/**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/iAdCore.framework/iAdCore
*/
#import <iAdCore/XXUnknownSuperclass.h>
@class NSMutableArray, NSString, ADLogMetaData;
@interface ADLogAdContentErrorRequest : XXUnknownSuperclass {
ADLogMetaData *_metaData; // 4 = 0x4
BOOL _hasErrorType; // 8 = 0x8
int _errorType; // 12 = 0xc
NSString *_errorMessage; // 16 = 0x10
BOOL _hasLineNumber; // 20 = 0x14
int _lineNumber; // 24 = 0x18
NSString *_uRL; // 28 = 0x1c
NSMutableArray *_lines; // 32 = 0x20
NSMutableArray *_functionNames; // 36 = 0x24
}
@property(retain, nonatomic) NSMutableArray *functionNames; // G=0x4ef79; S=0x4ef89; @synthesize=_functionNames
@property(retain, nonatomic) NSMutableArray *lines; // G=0x4ef45; S=0x4ef55; @synthesize=_lines
@property(retain, nonatomic) NSString *uRL; // G=0x4ef11; S=0x4ef21; @synthesize=_uRL
@property(readonly, assign, nonatomic) BOOL hasURL; // G=0x4e5b9;
@property(assign, nonatomic) int lineNumber; // G=0x4ef01; S=0x4e595; @synthesize=_lineNumber
@property(assign, nonatomic) BOOL hasLineNumber; // G=0x4eee1; S=0x4eef1; @synthesize=_hasLineNumber
@property(retain, nonatomic) NSString *errorMessage; // G=0x4eead; S=0x4eebd; @synthesize=_errorMessage
@property(readonly, assign, nonatomic) BOOL hasErrorMessage; // G=0x4e57d;
@property(assign, nonatomic) int errorType; // G=0x4ee9d; S=0x4e559; @synthesize=_errorType
@property(assign, nonatomic) BOOL hasErrorType; // G=0x4ee7d; S=0x4ee8d; @synthesize=_hasErrorType
@property(retain, nonatomic) ADLogMetaData *metaData; // G=0x4ee49; S=0x4ee59; @synthesize=_metaData
@property(readonly, assign, nonatomic) BOOL hasMetaData; // G=0x4e541;
// declared property setter: - (void)setFunctionNames:(id)names; // 0x4ef89
// declared property getter: - (id)functionNames; // 0x4ef79
// declared property setter: - (void)setLines:(id)lines; // 0x4ef55
// declared property getter: - (id)lines; // 0x4ef45
// declared property setter: - (void)setURL:(id)url; // 0x4ef21
// declared property getter: - (id)uRL; // 0x4ef11
// declared property getter: - (int)lineNumber; // 0x4ef01
// declared property setter: - (void)setHasLineNumber:(BOOL)number; // 0x4eef1
// declared property getter: - (BOOL)hasLineNumber; // 0x4eee1
// declared property setter: - (void)setErrorMessage:(id)message; // 0x4eebd
// declared property getter: - (id)errorMessage; // 0x4eead
// declared property getter: - (int)errorType; // 0x4ee9d
// declared property setter: - (void)setHasErrorType:(BOOL)type; // 0x4ee8d
// declared property getter: - (BOOL)hasErrorType; // 0x4ee7d
// declared property setter: - (void)setMetaData:(id)data; // 0x4ee59
// declared property getter: - (id)metaData; // 0x4ee49
- (Class)responseClass; // 0x4ee2d
- (unsigned)requestTypeCode; // 0x4ee29
- (void)writeTo:(id)to; // 0x4ebad
- (BOOL)readFrom:(id)from; // 0x4e951
- (id)dictionaryRepresentation; // 0x4e789
- (id)description; // 0x4e719
- (id)functionNamesAtIndex:(unsigned)index; // 0x4e6f9
- (unsigned)functionNamesCount; // 0x4e6d9
- (void)addFunctionNames:(id)names; // 0x4e675
- (id)linesAtIndex:(unsigned)index; // 0x4e655
- (unsigned)linesCount; // 0x4e635
- (void)addLines:(id)lines; // 0x4e5d1
// declared property getter: - (BOOL)hasURL; // 0x4e5b9
// declared property setter: - (void)setLineNumber:(int)number; // 0x4e595
// declared property getter: - (BOOL)hasErrorMessage; // 0x4e57d
// declared property setter: - (void)setErrorType:(int)type; // 0x4e559
// declared property getter: - (BOOL)hasMetaData; // 0x4e541
- (void)dealloc; // 0x4e4ad
@end
| 1,479 |
1,309 |
<gh_stars>1000+
#include "perftest.h"
// This file is for giving the performance characteristics of the platform (compiler/OS/CPU).
#if TEST_PLATFORM
#include <cmath>
#include <fcntl.h>
// Windows
#ifdef _WIN32
#include <windows.h>
#endif
// UNIX
#if defined(unix) || defined(__unix__) || defined(__unix)
#include <unistd.h>
#ifdef _POSIX_MAPPED_FILES
#include <sys/mman.h>
#endif
#endif
class Platform : public PerfTest {
public:
virtual void SetUp() {
PerfTest::SetUp();
// temp buffer for testing
temp_ = (char *)malloc(length_ + 1);
memcpy(temp_, json_, length_);
checkSum_ = CheckSum();
}
char CheckSum() {
char c = 0;
for (size_t i = 0; i < length_; ++i)
c += temp_[i];
return c;
}
virtual void TearDown() {
PerfTest::TearDown();
free(temp_);
}
protected:
char *temp_;
char checkSum_;
};
TEST_F(Platform, CheckSum) {
for (int i = 0; i < kTrialCount; i++)
EXPECT_EQ(checkSum_, CheckSum());
}
TEST_F(Platform, strlen) {
for (int i = 0; i < kTrialCount; i++) {
size_t l = strlen(json_);
EXPECT_EQ(length_, l);
}
}
TEST_F(Platform, memcmp) {
for (int i = 0; i < kTrialCount; i++) {
EXPECT_EQ(0, memcmp(temp_, json_, length_));
}
}
TEST_F(Platform, pow) {
double sum = 0;
for (int i = 0; i < kTrialCount * kTrialCount; i++)
sum += pow(10.0, i & 255);
EXPECT_GT(sum, 0.0);
}
TEST_F(Platform, Whitespace_strlen) {
for (int i = 0; i < kTrialCount; i++) {
size_t l = strlen(whitespace_);
EXPECT_GT(l, whitespace_length_);
}
}
TEST_F(Platform, Whitespace_strspn) {
for (int i = 0; i < kTrialCount; i++) {
size_t l = strspn(whitespace_, " \n\r\t");
EXPECT_EQ(whitespace_length_, l);
}
}
TEST_F(Platform, fread) {
for (int i = 0; i < kTrialCount; i++) {
FILE *fp = fopen(filename_, "rb");
ASSERT_EQ(length_, fread(temp_, 1, length_, fp));
EXPECT_EQ(checkSum_, CheckSum());
fclose(fp);
}
}
#ifdef _MSC_VER
TEST_F(Platform, read) {
for (int i = 0; i < kTrialCount; i++) {
int fd = _open(filename_, _O_BINARY | _O_RDONLY);
ASSERT_NE(-1, fd);
ASSERT_EQ(length_, _read(fd, temp_, length_));
EXPECT_EQ(checkSum_, CheckSum());
_close(fd);
}
}
#else
TEST_F(Platform, read) {
for (int i = 0; i < kTrialCount; i++) {
int fd = open(filename_, O_RDONLY);
ASSERT_NE(-1, fd);
ASSERT_EQ(length_, read(fd, temp_, length_));
EXPECT_EQ(checkSum_, CheckSum());
close(fd);
}
}
#endif
#ifdef _WIN32
TEST_F(Platform, MapViewOfFile) {
for (int i = 0; i < kTrialCount; i++) {
HANDLE file = CreateFile(filename_, GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
ASSERT_NE(INVALID_HANDLE_VALUE, file);
HANDLE mapObject = CreateFileMapping(file, NULL, PAGE_READONLY, 0, length_, NULL);
ASSERT_NE(INVALID_HANDLE_VALUE, mapObject);
void *p = MapViewOfFile(mapObject, FILE_MAP_READ, 0, 0, length_);
ASSERT_TRUE(p != NULL);
EXPECT_EQ(checkSum_, CheckSum());
ASSERT_TRUE(UnmapViewOfFile(p) == TRUE);
ASSERT_TRUE(CloseHandle(mapObject) == TRUE);
ASSERT_TRUE(CloseHandle(file) == TRUE);
}
}
#endif
#ifdef _POSIX_MAPPED_FILES
TEST_F(Platform, mmap) {
for (int i = 0; i < kTrialCount; i++) {
int fd = open(filename_, O_RDONLY);
ASSERT_NE(-1, fd);
void *p = mmap(NULL, length_, PROT_READ, MAP_PRIVATE, fd, 0);
ASSERT_TRUE(p != NULL);
EXPECT_EQ(checkSum_, CheckSum());
munmap(p, length_);
close(fd);
}
}
#endif
#endif // TEST_PLATFORM
| 1,709 |
3,170 |
//
// DynamicStruct.h
//
// $Id: //poco/Main/Foundation/include/Poco/DynamicStruct.h#9 $
//
// Library: Foundation
// Package: Dynamic
// Module: Struct
//
// Forward header for Struct class to maintain backward compatibility.
//
// Copyright (c) 2007, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#ifndef Foundation_DynamicStruct_INCLUDED
#define Foundation_DynamicStruct_INCLUDED
//@ deprecated
#include "Poco/Dynamic/Struct.h"
#endif // Foundation_DynamicStruct_INCLUDED
| 171 |
1,738 |
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// Original file Copyright Crytek GMBH or its affiliates, used under license.
// Purpose:
// - Manage and cache glyphs, retrieving them from the renderer as needed
#ifndef CRYINCLUDE_CRYFONT_GLYPHCACHE_H
#define CRYINCLUDE_CRYFONT_GLYPHCACHE_H
#pragma once
#if !defined(USE_NULLFONT_ALWAYS)
#include <vector>
#include "GlyphBitmap.h"
#include "FontRenderer.h"
#include "CryFont.h"
#include <StlUtils.h>
//! Glyph cache slots store the bitmap buffer and glyph metadata from FreeType.
//!
//! This bitmap buffer is eventually copied to a CFontTexture texture buffer.
//! A glyph cache slot bitmap buffer only holds a single glyph, whereas the
//! CFontTexture stores multiple glyphs in a grid (row/col) format.
typedef struct CCacheSlot
{
Vec2i glyphSize = CCryFont::defaultGlyphSize; //!< The render resolution of the glyph in the glyph bitmap
unsigned int dwUsage;
int iCacheSlot;
int iHoriAdvance; //!< Advance width. See FT_Glyph_Metrics::horiAdvance.
uint32 cCurrentChar;
uint8 iCharWidth; //!< Glyph width (in pixel)
uint8 iCharHeight; //!< Glyph height (in pixel)
AZ::s32 iCharOffsetX; //!< Glyph's left-side bearing (in pixels). See FT_GlyphSlotRec::bitmap_left.
AZ::s32 iCharOffsetY; //!< Glyph's top bearing (in pixels). See FT_GlyphSlotRec::bitmap_top.
CGlyphBitmap pGlyphBitmap; //!< Contains a buffer storing a copy of the glyph from FreeType
void Reset()
{
dwUsage = 0;
cCurrentChar = ~0;
iCharWidth = 0;
iCharHeight = 0;
iCharOffsetX = 0;
iCharOffsetY = 0;
pGlyphBitmap.Clear();
}
void GetMemoryUsage(ICrySizer* pSizer) const
{
pSizer->AddObject(this, sizeof(*this));
pSizer->AddObject(pGlyphBitmap);
}
} CCacheSlot;
namespace CryFont
{
namespace GlyphCache
{
//! Height and width pair for glyph size mapping
typedef Vec2i CCacheTableGlyphSizeType;
//! Pair for mapping a height and width size to a UTF32 character/glyph
typedef AZStd::pair<CCacheTableGlyphSizeType, uint32> CCacheTableKey;
//! Hasher for glyph cache table keys (glyphsize-char code pair)
//!
//! Instead of creating our own custom hash, the types are broken down to their
//! native types (ints) and passed to existing hashes that handle those types.
struct HashGlyphCacheTableKey
{
typedef CCacheTableKey ArgumentType;
typedef AZStd::size_t ResultType;
typedef AZStd::pair<int32, int32> Int32Pair;
typedef AZStd::pair<Int32Pair, uint32> Int32PairU32Pair;
ResultType operator()(const ArgumentType& value) const
{
AZStd::hash<Int32PairU32Pair> pairHash;
return pairHash(Int32PairU32Pair(Int32Pair(value.first.x, value.first.y), value.second));
}
};
}
}
//! Maps size-speicifc UTF32 glyphs to their corresponding cache slots
typedef AZStd::unordered_map<CryFont::GlyphCache::CCacheTableKey, CCacheSlot*, CryFont::GlyphCache::HashGlyphCacheTableKey> CCacheTable;
typedef std::vector<CCacheSlot*> CCacheSlotList;
typedef std::vector<CCacheSlot*>::iterator CCacheSlotListItor;
#ifdef WIN64
#undef GetCharWidth
#undef GetCharHeight
#endif
//! The glyph cache maps UTF32 codepoints to their corresponding FreeType data.
//!
//! This cache is used to associate font glyph info (read from FreeType) with
//! UTF32 codepoints. Ultimately the glyph info will be read into a font texture
//! (CFontTexture) to avoid future FreeType lookups.
//!
//! If a CFontTexture is missing a glyph that is currently stored in the glyph
//! cache, the cached data can be returned instead of having to be rendered from
//! FreeType again.
//!
//! \sa CFontTexture
class CGlyphCache
{
public:
CGlyphCache();
~CGlyphCache();
int Create(int iCacheSize, int iGlyphBitmapWidth, int iGlyphBitmapHeight, int iSmoothMethod, int iSmoothAmount, float sizeRatio);
int Release();
int LoadFontFromFile(const string& szFileName);
int LoadFontFromMemory(unsigned char* pFileBuffer, int iDataSize);
int ReleaseFont();
int SetEncoding(FT_Encoding pEncoding) { return m_pFontRenderer.SetEncoding(pEncoding); };
FT_Encoding GetEncoding() { return m_pFontRenderer.GetEncoding(); };
int GetGlyphBitmapSize(int* pWidth, int* pHeight);
void SetGlyphBitmapSize(int width, int height, float sizeRatio);
int PreCacheGlyph(uint32 cChar, const Vec2i& glyphSize = CCryFont::defaultGlyphSize, const CFFont::FontHintParams& glyphFlags = CFFont::FontHintParams());
int UnCacheGlyph(uint32 cChar, const Vec2i& glyphSize = CCryFont::defaultGlyphSize);
int GlyphCached(uint32 cChar, const Vec2i& glyphSize = CCryFont::defaultGlyphSize);
CCacheSlot* GetLRUSlot();
CCacheSlot* GetMRUSlot();
//! Obtains glyph information for the given UTF32 codepoint.
//! This information is obtained from a CCacheSlot that corresponds to
//! the given codepoint. If the codepoint doesn't exist within the cache
//! table (m_pCacheTable), then the information is obtain from FreeType
//! directly via CFontRenderer.
//!
//! Ultimately the glyph bitmap is copied into a font texture
//! (CFontTexture). Once the glyph is copied into the font texture then
//! the font texture is referenced directly rather than relying on the
//! glyph cache or FreeType.
//!
//! \sa CFontRenderer::GetGlyph, CFontTexture::UpdateSlot
int GetGlyph(CGlyphBitmap** pGlyph, int* piHoriAdvance, int* piWidth, int* piHeight, AZ::s32& iCharOffsetX, AZ::s32& iCharOffsetY, uint32 cChar, const Vec2i& glyphSize = CCryFont::defaultGlyphSize, const CFFont::FontHintParams& glyphFlags = CFFont::FontHintParams());
void GetMemoryUsage(ICrySizer* pSizer) const
{
pSizer->AddObject(m_pSlotList);
//pSizer->AddContainer(m_pCacheTable);
pSizer->AddObject(m_pScaleBitmap);
pSizer->AddObject(m_pFontRenderer);
}
bool GetMonospaced() const { return m_pFontRenderer.GetMonospaced(); }
Vec2 GetKerning(uint32_t leftGlyph, uint32_t rightGlyph);
float GetAscenderToHeightRatio();
private:
//! Returns a key for the cache table where the given char is mapped at the given size.
CryFont::GlyphCache::CCacheTableKey GetCacheSlotKey(uint32 cChar, const Vec2i& glyphSize = CCryFont::defaultGlyphSize) const;
int CreateSlotList(int iListSize);
int ReleaseSlotList();
CCacheSlotList m_pSlotList;
CCacheTable m_pCacheTable;
int m_iGlyphBitmapWidth;
int m_iGlyphBitmapHeight;
int m_iSmoothMethod;
int m_iSmoothAmount;
CGlyphBitmap* m_pScaleBitmap;
CFontRenderer m_pFontRenderer;
unsigned int m_dwUsage;
};
#endif // #if !defined(USE_NULLFONT_ALWAYS)
#endif // CRYINCLUDE_CRYFONT_GLYPHCACHE_H
| 3,196 |
432 |
/* mpc_acos -- arccosine of a complex number.
Copyright (C) 2009, 2010, 2011, 2012 INRIA
This file is part of GNU MPC.
GNU MPC is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
GNU MPC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see http://www.gnu.org/licenses/ .
*/
#include <stdio.h> /* for MPC_ASSERT */
#include "mpc-impl.h"
int
mpc_acos (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd)
{
int inex_re, inex_im, inex;
mpfr_prec_t p_re, p_im, p;
mpc_t z1;
mpfr_t pi_over_2;
mpfr_exp_t e1, e2;
mpfr_rnd_t rnd_im;
mpc_rnd_t rnd1;
inex_re = 0;
inex_im = 0;
/* special values */
if (mpfr_nan_p (mpc_realref (op)) || mpfr_nan_p (mpc_imagref (op)))
{
if (mpfr_inf_p (mpc_realref (op)) || mpfr_inf_p (mpc_imagref (op)))
{
mpfr_set_inf (mpc_imagref (rop), mpfr_signbit (mpc_imagref (op)) ? +1 : -1);
mpfr_set_nan (mpc_realref (rop));
}
else if (mpfr_zero_p (mpc_realref (op)))
{
inex_re = set_pi_over_2 (mpc_realref (rop), +1, MPC_RND_RE (rnd));
mpfr_set_nan (mpc_imagref (rop));
}
else
{
mpfr_set_nan (mpc_realref (rop));
mpfr_set_nan (mpc_imagref (rop));
}
return MPC_INEX (inex_re, 0);
}
if (mpfr_inf_p (mpc_realref (op)) || mpfr_inf_p (mpc_imagref (op)))
{
if (mpfr_inf_p (mpc_realref (op)))
{
if (mpfr_inf_p (mpc_imagref (op)))
{
if (mpfr_sgn (mpc_realref (op)) > 0)
{
inex_re =
set_pi_over_2 (mpc_realref (rop), +1, MPC_RND_RE (rnd));
mpfr_div_2ui (mpc_realref (rop), mpc_realref (rop), 1, GMP_RNDN);
}
else
{
/* the real part of the result is 3*pi/4
a = o(pi) error(a) < 1 ulp(a)
b = o(3*a) error(b) < 2 ulp(b)
c = b/4 exact
thus 1 bit is lost */
mpfr_t x;
mpfr_prec_t prec;
int ok;
mpfr_init (x);
prec = mpfr_get_prec (mpc_realref (rop));
p = prec;
do
{
p += mpc_ceil_log2 (p);
mpfr_set_prec (x, p);
mpfr_const_pi (x, GMP_RNDD);
mpfr_mul_ui (x, x, 3, GMP_RNDD);
ok =
mpfr_can_round (x, p - 1, GMP_RNDD, MPC_RND_RE (rnd),
prec+(MPC_RND_RE (rnd) == GMP_RNDN));
} while (ok == 0);
inex_re =
mpfr_div_2ui (mpc_realref (rop), x, 2, MPC_RND_RE (rnd));
mpfr_clear (x);
}
}
else
{
if (mpfr_sgn (mpc_realref (op)) > 0)
mpfr_set_ui (mpc_realref (rop), 0, GMP_RNDN);
else
inex_re = mpfr_const_pi (mpc_realref (rop), MPC_RND_RE (rnd));
}
}
else
inex_re = set_pi_over_2 (mpc_realref (rop), +1, MPC_RND_RE (rnd));
mpfr_set_inf (mpc_imagref (rop), mpfr_signbit (mpc_imagref (op)) ? +1 : -1);
return MPC_INEX (inex_re, 0);
}
/* pure real argument */
if (mpfr_zero_p (mpc_imagref (op)))
{
int s_im;
s_im = mpfr_signbit (mpc_imagref (op));
if (mpfr_cmp_ui (mpc_realref (op), 1) > 0)
{
if (s_im)
inex_im = mpfr_acosh (mpc_imagref (rop), mpc_realref (op),
MPC_RND_IM (rnd));
else
inex_im = -mpfr_acosh (mpc_imagref (rop), mpc_realref (op),
INV_RND (MPC_RND_IM (rnd)));
mpfr_set_ui (mpc_realref (rop), 0, GMP_RNDN);
}
else if (mpfr_cmp_si (mpc_realref (op), -1) < 0)
{
mpfr_t minus_op_re;
minus_op_re[0] = mpc_realref (op)[0];
MPFR_CHANGE_SIGN (minus_op_re);
if (s_im)
inex_im = mpfr_acosh (mpc_imagref (rop), minus_op_re,
MPC_RND_IM (rnd));
else
inex_im = -mpfr_acosh (mpc_imagref (rop), minus_op_re,
INV_RND (MPC_RND_IM (rnd)));
inex_re = mpfr_const_pi (mpc_realref (rop), MPC_RND_RE (rnd));
}
else
{
inex_re = mpfr_acos (mpc_realref (rop), mpc_realref (op), MPC_RND_RE (rnd));
mpfr_set_ui (mpc_imagref (rop), 0, MPC_RND_IM (rnd));
}
if (!s_im)
mpc_conj (rop, rop, MPC_RNDNN);
return MPC_INEX (inex_re, inex_im);
}
/* pure imaginary argument */
if (mpfr_zero_p (mpc_realref (op)))
{
inex_re = set_pi_over_2 (mpc_realref (rop), +1, MPC_RND_RE (rnd));
inex_im = -mpfr_asinh (mpc_imagref (rop), mpc_imagref (op),
INV_RND (MPC_RND_IM (rnd)));
mpc_conj (rop,rop, MPC_RNDNN);
return MPC_INEX (inex_re, inex_im);
}
/* regular complex argument: acos(z) = Pi/2 - asin(z) */
p_re = mpfr_get_prec (mpc_realref(rop));
p_im = mpfr_get_prec (mpc_imagref(rop));
p = p_re;
mpc_init3 (z1, p, p_im); /* we round directly the imaginary part to p_im,
with rounding mode opposite to rnd_im */
rnd_im = MPC_RND_IM(rnd);
/* the imaginary part of asin(z) has the same sign as Im(z), thus if
Im(z) > 0 and rnd_im = RNDZ, we want to round the Im(asin(z)) to -Inf
so that -Im(asin(z)) is rounded to zero */
if (rnd_im == GMP_RNDZ)
rnd_im = mpfr_sgn (mpc_imagref(op)) > 0 ? GMP_RNDD : GMP_RNDU;
else
rnd_im = rnd_im == GMP_RNDU ? GMP_RNDD
: rnd_im == GMP_RNDD ? GMP_RNDU
: rnd_im; /* both RNDZ and RNDA map to themselves for -asin(z) */
rnd1 = MPC_RND (GMP_RNDN, rnd_im);
mpfr_init2 (pi_over_2, p);
for (;;)
{
p += mpc_ceil_log2 (p) + 3;
mpfr_set_prec (mpc_realref(z1), p);
mpfr_set_prec (pi_over_2, p);
set_pi_over_2 (pi_over_2, +1, GMP_RNDN);
e1 = 1; /* Exp(pi_over_2) */
inex = mpc_asin (z1, op, rnd1); /* asin(z) */
MPC_ASSERT (mpfr_sgn (mpc_imagref(z1)) * mpfr_sgn (mpc_imagref(op)) > 0);
inex_im = MPC_INEX_IM(inex); /* inex_im is in {-1, 0, 1} */
e2 = mpfr_get_exp (mpc_realref(z1));
mpfr_sub (mpc_realref(z1), pi_over_2, mpc_realref(z1), GMP_RNDN);
if (!mpfr_zero_p (mpc_realref(z1)))
{
/* the error on x=Re(z1) is bounded by 1/2 ulp(x) + 2^(e1-p-1) +
2^(e2-p-1) */
e1 = e1 >= e2 ? e1 + 1 : e2 + 1;
/* the error on x is bounded by 1/2 ulp(x) + 2^(e1-p-1) */
e1 -= mpfr_get_exp (mpc_realref(z1));
/* the error on x is bounded by 1/2 ulp(x) [1 + 2^e1] */
e1 = e1 <= 0 ? 0 : e1;
/* the error on x is bounded by 2^e1 * ulp(x) */
mpfr_neg (mpc_imagref(z1), mpc_imagref(z1), GMP_RNDN); /* exact */
inex_im = -inex_im;
if (mpfr_can_round (mpc_realref(z1), p - e1, GMP_RNDN, GMP_RNDZ,
p_re + (MPC_RND_RE(rnd) == GMP_RNDN)))
break;
}
}
inex = mpc_set (rop, z1, rnd);
inex_re = MPC_INEX_RE(inex);
mpc_clear (z1);
mpfr_clear (pi_over_2);
return MPC_INEX(inex_re, inex_im);
}
| 4,565 |
1,556 |
def warn(msg, cat=None, stacklevel=1):
print("%s: %s" % ("Warning" if cat is None else cat.__name__, msg))
| 44 |
672 |
<filename>velox/experimental/codegen/Codegen.cpp<gh_stars>100-1000
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "velox/experimental/codegen/Codegen.h"
#include <glog/logging.h>
#include <memory>
#include "velox/core/PlanNode.h"
#include "velox/experimental/codegen/CodegenCompiledExpressionTransform.h"
#include "velox/experimental/codegen/CodegenExceptions.h"
#include "velox/experimental/codegen/external_process/Command.h"
#include "velox/experimental/codegen/external_process/subprocess.h"
#include "velox/experimental/codegen/proto/ProtoUtils.h"
#include "velox/experimental/codegen/udf_manager/UDFManager.h"
#include "velox/experimental/codegen/utils/timer/NestedScopedTimer.h"
namespace facebook {
namespace velox {
namespace codegen {
bool Codegen::initialize(
const std::string_view& codegenOptionsJson,
bool lazyLoading) {
try {
codegenLogger_->onInitialize(lazyLoading);
auto codegenOptionsProto = proto::proto_utils::ProtoUtils<
proto::CodegenOptionsProto>::loadProtoFromJson(codegenOptionsJson);
useSymbolsForArithmetic_ = codegenOptionsProto.usesymbolsforarithmetic();
initializeCodeManager(codegenOptionsProto.compileroptions());
initializeUDFManager();
initializeTransform();
if (!lazyLoading) {
runInitializationTests();
}
} catch (std::exception& e) {
throw CodegenInitializationException(e.what());
}
return true;
}
bool Codegen::initializeFromFile(
const std::filesystem::path& codegenOptionsJsonFile,
bool lazyLoading) {
codegenLogger_->onInitializeFromFile(codegenOptionsJsonFile, lazyLoading);
return initialize(
proto::proto_utils::readFromFile(codegenOptionsJsonFile).str(),
lazyLoading);
}
std::shared_ptr<const core::PlanNode> Codegen::compile(
const core::PlanNode& planNode) {
codegenLogger_->onCompileStart(planNode);
auto transformedPlanNode = transform_->transform(planNode);
codegenLogger_->onCompileEnd(
*std::static_pointer_cast<DefaultScopedTimer::EventSequence>(
eventSequence_),
planNode);
return transformedPlanNode;
}
bool Codegen::initializeCodeManager(
const proto::CompilerOptionsProto& compilerOptionsProto) {
LOG(INFO) << "Codegen: initializing CodeManager";
eventSequence_ = std::make_shared<DefaultScopedTimer::EventSequence>();
codeManager_ = std::make_shared<CodeManager>(
CompilerOptions::fromProto(compilerOptionsProto),
*std::static_pointer_cast<DefaultEventSequence>(eventSequence_));
return true;
}
bool Codegen::initializeUDFManager() {
LOG(INFO) << "Codegen: initializing UDFManager";
// TODO: we want to register UDFs via config files rather than in code
udfManager_ = std::make_shared<UDFManager>();
registerVeloxArithmeticUDFs(*udfManager_);
return true;
}
bool Codegen::initializeTransform() {
LOG(INFO) << "Codegen: initializing Transform";
transform_ = std::make_shared<CodegenCompiledExpressionTransform>(
CodegenCompiledExpressionTransform(
codeManager_->compiler().compilerOptions(),
*udfManager_,
useSymbolsForArithmetic_,
*std::static_pointer_cast<DefaultEventSequence>(eventSequence_)));
return true;
}
bool Codegen::runInitializationTests() {
LOG(INFO) << "Codegen: running initialization tests";
auto sourceCode1 = R"a(
extern "C" {
int f() {
return 24;
};
}
)a";
auto sourceCode2 = R"a(
extern "C" {
int g() {
return 32;
};
}
)a";
Compiler compiler = codeManager_->compiler();
LOG(INFO) << "Codegen: attempting to compile test binary1";
auto binary = compiler.compileString({}, sourceCode1);
VELOX_CHECK(std::filesystem::exists(binary));
VELOX_CHECK_GT(std::filesystem::file_size(binary), 0);
LOG(INFO) << "Codegen: attempting to compile test binary2";
auto binary2 = compiler.compileString({}, sourceCode2);
VELOX_CHECK(std::filesystem::exists(binary2));
VELOX_CHECK_GT(std::filesystem::file_size(binary2), 0);
LOG(INFO) << "Codegen: attempting to link test sharedObject";
auto sharedObject = compiler.link({}, {binary, binary2});
VELOX_CHECK(std::filesystem::exists(sharedObject));
VELOX_CHECK_GT(std::filesystem::file_size(sharedObject), 0);
// From dlopen man page, one of RTLD_NOW or RTLD_LAZY must be set
// in linux machines. Note that mac has no such requirements.
// https://man7.org/linux/man-pages/man3/dlopen.3.html
auto libraryPtr =
dlopen(sharedObject.string().c_str(), RTLD_LOCAL | RTLD_LAZY);
VELOX_CHECK_EQ(dlerror(), nullptr);
auto sym_g = (int (*)())dlsym(libraryPtr, "g");
VELOX_CHECK_EQ(dlerror(), nullptr);
VELOX_CHECK_EQ(sym_g(), 32);
auto sym_f = (int (*)())dlsym(libraryPtr, "f");
VELOX_CHECK_EQ(dlerror(), nullptr);
VELOX_CHECK_EQ(sym_f(), 24);
LOG(INFO) << "Codegen: initialization tests done";
return true;
}
} // namespace codegen
} // namespace velox
}; // namespace facebook
| 1,900 |
5,079 |
from __future__ import absolute_import, unicode_literals
from case import skip
from celery.utils.sysinfo import df, load_average
@skip.unless_symbol('os.getloadavg')
def test_load_average(patching):
getloadavg = patching('os.getloadavg')
getloadavg.return_value = 0.54736328125, 0.6357421875, 0.69921875
l = load_average()
assert l
assert l == (0.55, 0.64, 0.7)
@skip.unless_symbol('posix.statvfs_result')
def test_df():
x = df('/')
assert x.total_blocks
assert x.available
assert x.capacity
assert x.stat
| 223 |
3,651 |
package com.orientechnologies.orient.client.remote.message;
import com.orientechnologies.orient.client.remote.OBinaryResponse;
import com.orientechnologies.orient.client.remote.OStorageRemoteSession;
import com.orientechnologies.orient.core.record.impl.ODocument;
import com.orientechnologies.orient.core.serialization.serializer.record.ORecordSerializer;
import com.orientechnologies.orient.core.serialization.serializer.record.binary.ORecordSerializerNetworkFactory;
import com.orientechnologies.orient.enterprise.channel.binary.OChannelDataInput;
import com.orientechnologies.orient.enterprise.channel.binary.OChannelDataOutput;
import java.io.IOException;
import java.util.Map;
public class OListDatabasesResponse implements OBinaryResponse {
private Map<String, String> databases;
public OListDatabasesResponse(Map<String, String> databases) {
this.databases = databases;
}
public OListDatabasesResponse() {}
@Override
public void write(OChannelDataOutput channel, int protocolVersion, ORecordSerializer serializer)
throws IOException {
final ODocument result = new ODocument();
result.field("databases", databases);
byte[] toSend = serializer.toStream(result);
channel.writeBytes(toSend);
}
@Override
public void read(OChannelDataInput network, OStorageRemoteSession session) throws IOException {
ORecordSerializer serializer = ORecordSerializerNetworkFactory.INSTANCE.current();
final ODocument result = new ODocument();
serializer.fromStream(network.readBytes(), result, null);
databases = result.field("databases");
}
public Map<String, String> getDatabases() {
return databases;
}
}
| 486 |
378 |
package com.github.kayvannj.permission_utils;
public abstract class Func2{
protected abstract void call(int requestCode, String permissions[], int[] grantResults);
}
| 49 |
2,637 |
<reponame>nateglims/amazon-freertos<filename>vendors/st/stm32l475_discovery/BSP/Components/lis3mdl/lis3mdl.c<gh_stars>1000+
/**
******************************************************************************
* @file lis3mdl.c
* @author MCD Application Team
* @version V1.0.0
* @date 14-February-2017
* @brief This file provides a set of functions needed to manage the LIS3MDL
* magnetometer devices
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "lis3mdl.h"
/** @addtogroup BSP
* @{
*/
/** @addtogroup Component
* @{
*/
/** @defgroup LIS3MDL LIS3MDL
* @{
*/
/** @defgroup LIS3MDL_Mag_Private_Variables LIS3MDL Mag Private Variables
* @{
*/
MAGNETO_DrvTypeDef Lis3mdlMagDrv =
{
LIS3MDL_MagInit,
LIS3MDL_MagDeInit,
LIS3MDL_MagReadID,
0,
LIS3MDL_MagLowPower,
0,
0,
0,
0,
0,
0,
0,
LIS3MDL_MagReadXYZ
};
/**
* @}
*/
/** @defgroup LIS3MDL_Mag_Private_Functions LIS3MDL Mag Private Functions
* @{
*/
/**
* @brief Set LIS3MDL Magnetometer Initialization.
* @param LIS3MDL_InitStruct: pointer to a LIS3MDL_MagInitTypeDef structure
* that contains the configuration setting for the LIS3MDL.
*/
void LIS3MDL_MagInit(MAGNETO_InitTypeDef LIS3MDL_InitStruct)
{
SENSOR_IO_Write(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG1, LIS3MDL_InitStruct.Register1);
SENSOR_IO_Write(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG2, LIS3MDL_InitStruct.Register2);
SENSOR_IO_Write(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG3, LIS3MDL_InitStruct.Register3);
SENSOR_IO_Write(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG4, LIS3MDL_InitStruct.Register4);
SENSOR_IO_Write(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG5, LIS3MDL_InitStruct.Register5);
}
/**
* @brief LIS3MDL Magnetometer De-initialization.
*/
void LIS3MDL_MagDeInit(void)
{
uint8_t ctrl = 0x00;
/* Read control register 1 value */
ctrl = SENSOR_IO_Read(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG3);
/* Clear Selection Mode bits */
ctrl &= ~(LIS3MDL_MAG_SELECTION_MODE);
/* Set Power down */
ctrl |= LIS3MDL_MAG_POWERDOWN2_MODE;
/* write back control register */
SENSOR_IO_Write(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG3, ctrl);
}
/**
* @brief Read LIS3MDL ID.
* @retval ID
*/
uint8_t LIS3MDL_MagReadID(void)
{
/* IO interface initialization */
SENSOR_IO_Init();
/* Read value at Who am I register address */
return (SENSOR_IO_Read(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_WHO_AM_I_REG));
}
/**
* @brief Set/Unset Magnetometer in low power mode.
* @param status 0 means disable Low Power Mode, otherwise Low Power Mode is enabled
*/
void LIS3MDL_MagLowPower(uint16_t status)
{
uint8_t ctrl = 0;
/* Read control register 1 value */
ctrl = SENSOR_IO_Read(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG3);
/* Clear Low Power Mode bit */
ctrl &= ~(0x20);
/* Set Low Power Mode */
if(status)
{
ctrl |= LIS3MDL_MAG_CONFIG_LOWPOWER_MODE;
}else
{
ctrl |= LIS3MDL_MAG_CONFIG_NORMAL_MODE;
}
/* write back control register */
SENSOR_IO_Write(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG3, ctrl);
}
/**
* @brief Read X, Y & Z Magnetometer values
* @param pData: Data out pointer
*/
void LIS3MDL_MagReadXYZ(int16_t* pData)
{
int16_t pnRawData[3];
uint8_t ctrlm= 0;
uint8_t buffer[6];
uint8_t i = 0;
float sensitivity = 0;
/* Read the magnetometer control register content */
ctrlm = SENSOR_IO_Read(LIS3MDL_MAG_I2C_ADDRESS_HIGH, LIS3MDL_MAG_CTRL_REG2);
/* Read output register X, Y & Z acceleration */
SENSOR_IO_ReadMultiple(LIS3MDL_MAG_I2C_ADDRESS_HIGH, (LIS3MDL_MAG_OUTX_L | 0x80), buffer, 6);
for(i=0; i<3; i++)
{
pnRawData[i]=((((uint16_t)buffer[2*i+1]) << 8) + (uint16_t)buffer[2*i]);
}
/* Normal mode */
/* Switch the sensitivity value set in the CRTL_REG2 */
switch(ctrlm & 0x60)
{
case LIS3MDL_MAG_FS_4_GA:
sensitivity = LIS3MDL_MAG_SENSITIVITY_FOR_FS_4GA;
break;
case LIS3MDL_MAG_FS_8_GA:
sensitivity = LIS3MDL_MAG_SENSITIVITY_FOR_FS_8GA;
break;
case LIS3MDL_MAG_FS_12_GA:
sensitivity = LIS3MDL_MAG_SENSITIVITY_FOR_FS_12GA;
break;
case LIS3MDL_MAG_FS_16_GA:
sensitivity = LIS3MDL_MAG_SENSITIVITY_FOR_FS_16GA;
break;
}
/* Obtain the mGauss value for the three axis */
for(i=0; i<3; i++)
{
pData[i]=( int16_t )(pnRawData[i] * sensitivity);
}
}
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| 2,621 |
2,446 |
/*
Copyright (C) 2016 Apple Inc. All Rights Reserved.
See LICENSE.txt for this sample’s licensing information
Abstract:
We define some custom atomics to be used the network so seperate threads at end of commandBuffers can safely increment.
*/
#ifndef atomics_h
#define atomics_h
#import <stdatomic.h>
static atomic_int cnt = ATOMIC_VAR_INIT(0);
void __atomic_increment();
void __atomic_reset();
int __get_atomic_count();
#endif /* atomics_h */
| 159 |
1,178 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the network_util module."""
import os
import unittest
import makani
from makani.avionics.network import network_config
from makani.avionics.network import network_util
class NetworkUtilTest(unittest.TestCase):
def setUp(self):
filename = os.path.join(makani.HOME, 'avionics/network/test.yaml')
self._network_config = network_config.NetworkConfig(filename)
def _GetSegmentStats(self, message_name):
switches = self._network_config.GetSwitches()
message_types = self._network_config.all_messages
path_finder = network_util.PathFinder(switches, message_types)
message = self._network_config.GetAioMessageType(message_name)
graph = network_util.MessageGraph(path_finder, message)
return network_util.GetSegmentStats(graph, message.all_senders)
def _GetPeak(self, stats, in_tag, out_tag):
segment = (in_tag, out_tag)
return sum([s['peak'] for s in stats[segment].values()])
def testFlightComputerSensor(self):
stats = self._GetSegmentStats('FlightComputerSensor')
# Each Controller may receive a peak of 5 FlightComputerSensor messages.
# - FcA -> ControllerA on local switch (1 copy).
# - FcB -> ControllerA over A and B networks (2 copies).
# - FcC -> ControllerA over A and B networks (2 copies).
self.assertEqual(5, self._GetPeak(stats, 'switches.fc_a.4',
'aio_nodes.controller_a'))
# RecorderWing may receive a peak of 6 FlightComputerSensor messages.
# - FcA -> RecorderWing over A and B networks (2 copies).
# - FcB -> RecorderWing over A and B networks (2 copies).
# - FcC -> RecorderWing over A and B networks (2 copies).
self.assertEqual(6, self._GetPeak(stats, 'switches.recorder_wing.4',
'aio_nodes.recorder_wing'))
# RecorderGs may receive a peak of 12 FlightComputerSensor messages.
# - FcA -> RecorderGs over A and B networks times 2 trunks (4 copies).
# - FcB -> RecorderGs over A and B networks times 2 trunks (4 copies).
# - FcC -> RecorderGs over A and B networks times 2 trunks (4 copies).
self.assertEqual(12, self._GetPeak(stats, 'switches.recorder_gs.4',
'aio_nodes.recorder_gs'))
def testControllerCommand(self):
stats = self._GetSegmentStats('ControllerCommand')
# Each Motor may receive a peak of 10 ControllerCommand messages.
# - Host -> MotorPbi over A and B networks times 2 trunks (4 copies).
# - ControllerA -> MotorPbi over A and B networks (2 copies).
# - ControllerB -> MotorPbi over A and B networks (2 copies).
# - ControllerC -> MotorPbi over A and B networks (2 copies).
self.assertEqual(10, self._GetPeak(stats, 'switches.motor_pbi.5',
'aio_nodes.motor_pbi'))
# Host may receive a peak of 12 ControllerCommand messages.
# - ControllerA -> Host over A and B networks times 2 trunks (4 copies).
# - ControllerB -> Host over A and B networks times 2 trunks (4 copies).
# - ControllerC -> Host over A and B networks times 2 trunks (4 copies).
self.assertEqual(12, self._GetPeak(stats, 'switches.host.5',
'aio_nodes.host'))
def testMotorStacking(self):
stats = self._GetSegmentStats('MotorStacking')
# Each Motor may receive a peak of 6 MotorStacking messages.
# - MotorPbo -> MotorPbi over A and B networks (2 copies).
# - MotorSbi -> MotorPbi over A and B networks (2 copies).
# - MotorSbo -> MotorPbi over A and B networks (2 copies).
self.assertEqual(6, self._GetPeak(stats, 'switches.motor_pbi.5',
'aio_nodes.motor_pbi'))
def testGetNodeBandwidthStatistics(self):
switches = self._network_config.GetSwitches()
message_types = self._network_config.all_messages
path_finder = network_util.PathFinder(switches, message_types)
node_stats = network_util.GetNodeBandwidthStatistics(path_finder,
message_types)
for aio_node in ['aio_nodes.controller_a', 'aio_nodes.controller_b',
'aio_nodes.controller_c']:
self.assertEqual(node_stats[aio_node].send, {'ControllerCommand': 100})
self.assertEqual(node_stats[aio_node].receive, {
'FlightComputerSensor': 50, # 2 networks for all but the local Fc.
'MotorStatus': 800, # 4 motors * 2 networks.
})
self.assertEqual(node_stats[aio_node].multicast_packet_rate['tx'], 100)
self.assertEqual(node_stats[aio_node].multicast_packet_rate['rx'], 850)
sensor_messages_sent = 0
for aio_node in ['aio_nodes.fc_a', 'aio_nodes.fc_b', 'aio_nodes.fc_c']:
self.assertEqual(node_stats[aio_node].send, {'FlightComputerSensor': 10})
self.assertEqual(node_stats[aio_node].receive, {})
self.assertEqual(node_stats[aio_node].multicast_packet_rate['tx'], 10)
self.assertEqual(node_stats[aio_node].multicast_packet_rate['rx'], 0)
sensor_messages_sent += node_stats[aio_node].send['FlightComputerSensor']
for aio_node in ['aio_nodes.motor_pbo', 'aio_nodes.motor_sbi',
'aio_nodes.motor_pbi', 'aio_nodes.motor_sbo']:
self.assertEqual(node_stats[aio_node].send,
{'MotorStatus': 100, 'MotorStacking': 1000})
# 2 networks * each other motor, no loopback packets.
self.assertEqual(node_stats[aio_node].receive, {
'ControllerCommand': 1000,
'MotorStacking': 6000})
self.assertEqual(node_stats[aio_node].multicast_packet_rate['tx'], 1100)
self.assertEqual(node_stats[aio_node].multicast_packet_rate['rx'], 7000)
# Multiply by two for the two networks, the by 2 for the two paths from the
# wing to the ground.
self.assertEqual(node_stats['aio_nodes.host'].receive[
'FlightComputerSensor'], sensor_messages_sent * 2 * 2)
if __name__ == '__main__':
unittest.main()
| 2,584 |
639 |
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
// Microsoft (c) 2019, NNFusion Team
#include "pad.hpp"
#include "nnfusion/common/util.hpp"
#include "nnfusion/core/graph/gnode.hpp"
using namespace std;
using namespace nnfusion::op;
Pad::Pad(const nnfusion::Shape& padding_below,
const nnfusion::Shape& padding_above,
const nnfusion::Shape& padding_interior)
: Op("Pad")
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_padding_interior(padding_interior)
{
}
void Pad::validate_and_infer_types(std::shared_ptr<graph::GNode> gnode)
{
nnfusion::element::Type result_et;
OP_VALIDATION(this,
nnfusion::element::Type::merge(result_et,
gnode->get_input_element_type(0),
gnode->get_input_element_type(1)))
<< "Argument element types do not match (arg0 element type: "
<< gnode->get_input_element_type(0)
<< ", arg1 element type: " << gnode->get_input_element_type(1) << ").";
OP_VALIDATION(this, gnode->get_input_partial_shape(1).compatible(nnfusion::PartialShape{}))
<< "Argument for padding value is not a scalar (shape: "
<< gnode->get_input_partial_shape(1) << ").";
auto arg_shape = gnode->get_input_partial_shape(0);
OP_VALIDATION(this,
m_padding_below.size() == m_padding_above.size() &&
m_padding_below.size() == m_padding_interior.size())
<< "Ranks for padding below (" << m_padding_below << "), padding above (" << m_padding_above
<< ") and interior padding (" << m_padding_interior << ") "
<< "do not match.";
size_t implied_rank = m_padding_below.size();
OP_VALIDATION(this, arg_shape.rank().compatible(implied_rank))
<< "Rank for padding below/padding above/interior padding does not match the rank of the "
<< "data argument (padding below: " << m_padding_below << ", "
<< ", padding above: " << m_padding_above << ", interior padding: " << m_padding_interior
<< ").";
std::vector<nnfusion::Dimension> result_dims(implied_rank, nnfusion::Dimension::dynamic());
if (arg_shape.rank().is_static())
{
for (size_t i = 0; i < implied_rank; i++)
{
if (arg_shape[i].is_static())
{
result_dims[i] =
m_padding_below[i] +
subtract_or_zero(size_t(arg_shape[i]) * (m_padding_interior[i] + 1),
m_padding_interior[i]) +
m_padding_above[i];
}
}
}
gnode->set_output_type_and_shape(0, result_et, PartialShape(result_dims));
}
| 1,434 |
308 |
package org.getopentest.selenium;
import org.getopentest.selenium.core.SeleniumTestAction;
import org.openqa.selenium.By;
import org.openqa.selenium.WebElement;
public class AssertCssProperty extends SeleniumTestAction {
@Override
public void run() {
super.run();
By locator = this.readLocatorArgument("locator");
String property = this.readStringArgument("property", null);
String expectedValue = this.readStringArgument("value", null);
this.waitForAsyncCallsToFinish();
WebElement element = this.getElement(locator);
String actualValue = element.getCssValue(property);
if (!actualValue.equals(expectedValue)) {
throw new RuntimeException(String.format(
"Assertion failed for CSS property %s of element %s. Expected value: %s. Actual value: %s.",
property,
locator,
expectedValue,
actualValue));
}
}
}
| 422 |
575 |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for webgpu command buffers."""
import filecmp
import os
import os.path
import sys
from optparse import OptionParser
import build_cmd_buffer_lib
# Named type info object represents a named type that is used in API call
# arguments. The named types are used in 'webgpu_cmd_buffer_functions.txt'.
#
# Options are documented in build_gles2_cmd_buffer.py/build_raster_cmd_buffer.py
_NAMED_TYPE_INFO = {
'PowerPreference': {
'type': 'PowerPreference',
'valid': [
'PowerPreference::kDefault',
'PowerPreference::kHighPerformance',
'PowerPreference::kLowPower',
],
'invalid': [
'PowerPreference::kNumPowerPreferences',
],
}
}
# A function info object specifies the type and other special data for the
# command that will be generated. A base function info object is generated by
# parsing the "webgpu_cmd_buffer_functions.txt", one for each function in the
# file. These function info objects can be augmented and their values can be
# overridden by adding an object to the table below.
#
# Must match function names specified in "webgpu_cmd_buffer_functions.txt".
#
# Options are documented in build_gles2_cmd_buffer.py/build_raster_cmd_buffer.py
# (Note: some options (like decoder_func and unit_test) currently have no
# effect, because WriteServiceImplementation and WriteServiceUnitTests are not
# used below.)
_FUNCTION_INFO = {
'DawnCommands': {
'impl_func': False,
'internal': True,
'data_transfer_methods': ['shm'],
'cmd_args': 'uint32_t commands_shm_id, '
'uint32_t commands_shm_offset, uint32_t size',
'size_args': {
'commands': 'size * sizeof(char)',
},
},
'AssociateMailbox': {
'impl_func': False,
'client_test': False,
'type': 'PUT',
'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
},
'DissociateMailbox': {
'impl_func': False,
'client_test': False,
},
'RequestAdapter': {
'impl_func': False,
'internal': True,
'cmd_args': 'uint64_t request_adapter_serial, uint32_t power_preference'
},
'RequestDevice': {
'impl_func': False,
'internal': True,
'data_transfer_methods': ['shm'],
'cmd_args': 'uint64_t request_device_serial, '
'uint32_t adapter_service_id, '
'uint32_t device_id, '
'uint32_t device_generation, '
'uint32_t request_device_properties_shm_id, '
'uint32_t request_device_properties_shm_offset, '
'uint32_t request_device_properties_size',
'size_args': {
'request_device_properties':
'request_device_properties_size * sizeof(char)',
},
},
}
def main(argv):
"""This is the main function."""
parser = OptionParser()
parser.add_option(
"--output-dir",
help="Output directory for generated files. Defaults to chromium root "
"directory.")
parser.add_option(
"-v", "--verbose", action="store_true", help="Verbose logging output.")
parser.add_option(
"-c", "--check", action="store_true",
help="Check if output files match generated files in chromium root "
"directory. Use this in PRESUBMIT scripts with --output-dir.")
(options, _) = parser.parse_args(args=argv)
# This script lives under src/gpu/command_buffer.
script_dir = os.path.dirname(os.path.abspath(__file__))
assert script_dir.endswith(os.path.normpath("src/gpu/command_buffer"))
# os.path.join doesn't do the right thing with relative paths.
chromium_root_dir = os.path.abspath(script_dir + "/../..")
# Support generating files under gen/ and for PRESUBMIT.
if options.output_dir:
output_dir = options.output_dir
else:
output_dir = chromium_root_dir
os.chdir(output_dir)
# This script lives under gpu/command_buffer, cd to base directory.
build_cmd_buffer_lib.InitializePrefix("WebGPU")
gen = build_cmd_buffer_lib.GLGenerator(
options.verbose, "2018", _FUNCTION_INFO, _NAMED_TYPE_INFO,
chromium_root_dir)
gen.ParseGLH("gpu/command_buffer/webgpu_cmd_buffer_functions.txt")
gen.WriteCommandIds("gpu/command_buffer/common/webgpu_cmd_ids_autogen.h")
gen.WriteFormat("gpu/command_buffer/common/webgpu_cmd_format_autogen.h")
gen.WriteFormatTest(
"gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h")
gen.WriteGLES2InterfaceHeader(
"gpu/command_buffer/client/webgpu_interface_autogen.h")
gen.WriteGLES2ImplementationHeader(
"gpu/command_buffer/client/webgpu_implementation_autogen.h")
gen.WriteGLES2InterfaceStub(
"gpu/command_buffer/client/webgpu_interface_stub_autogen.h")
gen.WriteGLES2InterfaceStubImpl(
"gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h")
gen.WriteGLES2Implementation(
"gpu/command_buffer/client/webgpu_implementation_impl_autogen.h")
gen.WriteGLES2ImplementationUnitTests(
"gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h")
gen.WriteCmdHelperHeader(
"gpu/command_buffer/client/webgpu_cmd_helper_autogen.h")
# Note: No gen.WriteServiceImplementation
# Note: No gen.WriteServiceUnitTests
gen.WriteServiceUtilsHeader(
"gpu/command_buffer/service/webgpu_cmd_validation_autogen.h")
gen.WriteServiceUtilsImplementation(
"gpu/command_buffer/service/"
"webgpu_cmd_validation_implementation_autogen.h")
build_cmd_buffer_lib.Format(gen.generated_cpp_filenames, output_dir,
chromium_root_dir)
if gen.errors > 0:
print "build_webgpu_cmd_buffer.py: Failed with %d errors" % gen.errors
return 1
check_failed_filenames = []
if options.check:
for filename in gen.generated_cpp_filenames:
if not filecmp.cmp(os.path.join(output_dir, filename),
os.path.join(chromium_root_dir, filename)):
check_failed_filenames.append(filename)
if len(check_failed_filenames) > 0:
print 'Please run gpu/command_buffer/build_webgpu_cmd_buffer.py'
print 'Failed check on autogenerated command buffer files:'
for filename in check_failed_filenames:
print filename
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2,379 |
2,061 |
# -*- coding: utf-8 -*-
from macropy.core.macros import Macros
from macropy.core.hquotes import macros, hq, u, ast_literal
macros = Macros() # noqa: F811
@macros.expr
def log(tree, exact_src, **kw):
new_tree = hq[wrap(u[exact_src(tree)], ast_literal[tree])]
return new_tree
def wrap(txt, x):
print(txt + " -> " + repr(x))
return x
| 154 |
6,098 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.transforms.preprocessing import H2OScaler
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RandomizedSearchCV
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
from scipy.stats import randint
def scale_svd_rf_pipe():
from h2o.transforms.decomposition import H2OSVD
print("Importing USArrests.csv data...")
arrests = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
# build transformation pipeline using sklearn's Pipeline and H2OSVD
pipe = Pipeline([
("standardize", H2OScaler()),
("svd", H2OSVD()),
("rf", H2ORandomForestEstimator())
])
params = {"standardize__center": [True, False],
"standardize__scale": [True, False],
"svd__nv": [2, 3],
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),
"svd__transform": ["none", "standardize"],
}
custom_cv = H2OKFold(arrests, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe,
params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(arrests[1:],arrests[0])
print(random_search.best_estimator_)
def scale_svd_rf_pipe_new_import():
from h2o.estimators.svd import H2OSingularValueDecompositionEstimator
print("Importing USArrests.csv data...")
arrests = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
print("Compare with SVD")
# build transformation pipeline using sklearn's Pipeline and H2OSingularValueDecompositionEstimator
pipe = Pipeline([
("standardize", H2OScaler()),
# H2OSingularValueDecompositionEstimator() call will fail, you have to call init_for_pipeline method
("svd", H2OSingularValueDecompositionEstimator().init_for_pipeline()),
("rf", H2ORandomForestEstimator())
])
params = {"standardize__center": [True, False],
"standardize__scale": [True, False],
"svd__nv": [2, 3],
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),
"svd__transform": ["none", "standardize"],
}
custom_cv = H2OKFold(arrests, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe,
params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(arrests[1:], arrests[0])
print(random_search.best_estimator_)
if __name__ == "__main__":
pyunit_utils.standalone_test(scale_svd_rf_pipe)
pyunit_utils.standalone_test(scale_svd_rf_pipe_new_import)
else:
scale_svd_rf_pipe()
scale_svd_rf_pipe_new_import()
| 1,932 |
577 |
<gh_stars>100-1000
"""generate code for exceptions and for the types module"""
template1 = ' %(name)s = new PyString("%(name)s");'
template2 = ' dict.__setitem__("%(name)s", Py.%(name)s);'
template3 = '''\
%(name)s = new PyTuple(new PyObject[]
{%(values)s});
'''
template4 = '''\
tmp = exceptions.__findattr__("%(name)s");
if (tmp != null) %(name)s = tmp;'''
import exceptions, types, string
excs = {}
for name in dir(exceptions):
c = getattr(exceptions, name)
try:
if issubclass(c, exceptions.Exception):
excs[c] = {}
except:
pass
for key, value in excs.items():
for base in key.__bases__:
excs[base][key] = 1
import sys
fp = open('c:\\jpython\\JavaCode\\org\\python\\core\\excs.txt', 'w')
sys.stdout = fp
for exc in excs.keys():
print template4 % {'name': exc.__name__}
print
print
for exc in excs.keys():
print template2 % {'name': exc.__name__}
print
print
for exc, values in excs.items():
if len(values) == 0:
print template1 % {'name': exc.__name__}
for exc, values in excs.items():
if len(values) != 0:
vl = []
for key in values.keys():
vl.append('Py.'+key.__name__)
print template3 % {'name': exc.__name__, 'values':string.join(vl, ', ')}
print
print
sys.exit()
temp = """\
public static PyObject %(name)s;
public static PyException %(name)s(String message) {
return new PyException(Py.%(name)s, message);
}
"""
for exc, values in excs.items():
if len(values) == 0:
print temp % {'name': exc.__name__}
print
print
types = ['ArrayType', 'BuiltinFunctionType', 'BuiltinMethodType', 'ClassType', 'CodeType', 'ComplexType',
'DictType', 'DictionaryType', 'EllipsisType', 'FileType', 'FloatType', 'FrameType',
'FunctionType', 'InstanceType', 'IntType', 'LambdaType', 'ListType', 'LongType',
'MethodType', 'ModuleType', 'NoneType', 'SliceType', 'StringType',
'TracebackType', 'TupleType', 'TypeType', 'UnboundMethodType', 'XRangeType']
line = '\t\tdict.__setitem__("%(name)sType", PyJavaClass.lookup(Py%(name)s.class));'
for name in types:
name = name[:-4]
print line % {'name':name}
fp.close()
| 870 |
794 |
<filename>android/modules/module_ops/src/main/java/github/tornaco/thanos/android/ops/model/OpGroup.java<gh_stars>100-1000
package github.tornaco.thanos.android.ops.model;
import androidx.annotation.NonNull;
import java.util.List;
public class OpGroup implements Comparable<OpGroup> {
private OpsTemplate opsTemplate;
private List<Op> opList;
public OpGroup(OpsTemplate opsTemplate, List<Op> opList) {
this.opsTemplate = opsTemplate;
this.opList = opList;
}
public boolean isEmpty() {
return opList == null || opList.isEmpty();
}
@Override
public int compareTo(@NonNull OpGroup opGroup) {
return Integer.compare(opsTemplate.sort, opGroup.opsTemplate.sort);
}
public OpsTemplate getOpsTemplate() {
return this.opsTemplate;
}
public List<Op> getOpList() {
return this.opList;
}
public String toString() {
return "OpGroup(opsTemplate=" + this.getOpsTemplate() + ", opList=" + this.getOpList() + ")";
}
}
| 386 |
4,879 |
#include "generator/mwm_diff/diff.hpp"
#include "coding/buffered_file_writer.hpp"
#include "coding/file_reader.hpp"
#include "coding/file_writer.hpp"
#include "coding/reader.hpp"
#include "coding/write_to_sink.hpp"
#include "coding/writer.hpp"
#include "coding/zlib.hpp"
#include "base/assert.hpp"
#include "base/cancellable.hpp"
#include "base/checked_cast.hpp"
#include "base/logging.hpp"
#include <cstdint>
#include <iterator>
#include <vector>
#include "3party/bsdiff-courgette/bsdiff/bsdiff.h"
using namespace std;
namespace
{
enum Version
{
// Format Version 0: bsdiff+gzip.
VERSION_V0 = 0,
VERSION_LATEST = VERSION_V0
};
bool MakeDiffVersion0(FileReader & oldReader, FileReader & newReader, FileWriter & diffFileWriter)
{
vector<uint8_t> diffBuf;
MemWriter<vector<uint8_t>> diffMemWriter(diffBuf);
auto const status = bsdiff::CreateBinaryPatch(oldReader, newReader, diffMemWriter);
if (status != bsdiff::BSDiffStatus::OK)
{
LOG(LERROR, ("Could not create patch with bsdiff:", status));
return false;
}
using Deflate = coding::ZLib::Deflate;
Deflate deflate(Deflate::Format::ZLib, Deflate::Level::BestCompression);
vector<uint8_t> deflatedDiffBuf;
deflate(diffBuf.data(), diffBuf.size(), back_inserter(deflatedDiffBuf));
// A basic header that holds only version.
WriteToSink(diffFileWriter, static_cast<uint32_t>(VERSION_V0));
diffFileWriter.Write(deflatedDiffBuf.data(), deflatedDiffBuf.size());
return true;
}
generator::mwm_diff::DiffApplicationResult ApplyDiffVersion0(
FileReader & oldReader, FileWriter & newWriter, ReaderSource<FileReader> & diffFileSource,
base::Cancellable const & cancellable)
{
using generator::mwm_diff::DiffApplicationResult;
vector<uint8_t> deflatedDiff(base::checked_cast<size_t>(diffFileSource.Size()));
diffFileSource.Read(deflatedDiff.data(), deflatedDiff.size());
using Inflate = coding::ZLib::Inflate;
Inflate inflate(Inflate::Format::ZLib);
vector<uint8_t> diffBuf;
inflate(deflatedDiff.data(), deflatedDiff.size(), back_inserter(diffBuf));
// Our bsdiff assumes that both the old mwm and the diff files are correct and
// does no checks when using its readers.
// Yet sometimes we observe corrupted files in the logs, and to avoid
// crashes from such files the exception-throwing version of MemReader is used here.
// |oldReader| is a FileReader so it throws exceptions too but we
// are more confident in the uncorrupted status of the old file because
// its checksum is compared to the one stored in the diff file.
MemReaderWithExceptions diffMemReader(diffBuf.data(), diffBuf.size());
auto const status = bsdiff::ApplyBinaryPatch(oldReader, newWriter, diffMemReader, cancellable);
if (status == bsdiff::BSDiffStatus::CANCELLED)
{
LOG(LDEBUG, ("Diff application has been cancelled"));
return DiffApplicationResult::Cancelled;
}
if (status == bsdiff::BSDiffStatus::OK)
return DiffApplicationResult::Ok;
LOG(LERROR, ("Could not apply patch with bsdiff:", status));
return DiffApplicationResult::Failed;
}
} // namespace
namespace generator
{
namespace mwm_diff
{
bool MakeDiff(string const & oldMwmPath, string const & newMwmPath, string const & diffPath)
{
try
{
FileReader oldReader(oldMwmPath);
FileReader newReader(newMwmPath);
FileWriter diffFileWriter(diffPath);
switch (VERSION_LATEST)
{
case VERSION_V0: return MakeDiffVersion0(oldReader, newReader, diffFileWriter);
default:
LOG(LERROR,
("Making mwm diffs with diff format version", VERSION_LATEST, "is not implemented"));
}
}
catch (Reader::Exception const & e)
{
LOG(LERROR, ("Could not open file when creating a patch:", e.Msg()));
return false;
}
catch (Writer::Exception const & e)
{
LOG(LERROR, ("Could not open file when creating a patch:", e.Msg()));
return false;
}
return false;
}
DiffApplicationResult ApplyDiff(string const & oldMwmPath, string const & newMwmPath,
string const & diffPath, base::Cancellable const & cancellable)
{
try
{
FileReader oldReader(oldMwmPath);
BufferedFileWriter newWriter(newMwmPath);
FileReader diffFileReader(diffPath);
ReaderSource<FileReader> diffFileSource(diffFileReader);
auto const version = ReadPrimitiveFromSource<uint32_t>(diffFileSource);
switch (version)
{
case VERSION_V0:
return ApplyDiffVersion0(oldReader, newWriter, diffFileSource, cancellable);
default:
LOG(LERROR, ("Unknown version format of mwm diff:", version));
return DiffApplicationResult::Failed;
}
}
catch (Reader::Exception const & e)
{
LOG(LERROR, ("Could not open file for reading when applying a patch:", e.Msg()));
}
catch (Writer::Exception const & e)
{
LOG(LERROR, ("Could not open file for writing when applying a patch:", e.Msg()));
}
return cancellable.IsCancelled() ? DiffApplicationResult::Cancelled
: DiffApplicationResult::Failed;
}
string DebugPrint(DiffApplicationResult const & result)
{
switch (result)
{
case DiffApplicationResult::Ok: return "Ok";
case DiffApplicationResult::Failed: return "Failed";
case DiffApplicationResult::Cancelled: return "Cancelled";
}
UNREACHABLE();
}
} // namespace mwm_diff
} // namespace generator
| 1,858 |
330 |
<filename>runners/image_editing.py
import os
import numpy as np
from tqdm import tqdm
import torch
import torchvision.utils as tvu
from models.diffusion import Model
from functions.process_data import *
def get_beta_schedule(*, beta_start, beta_end, num_diffusion_timesteps):
betas = np.linspace(beta_start, beta_end,
num_diffusion_timesteps, dtype=np.float64)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def extract(a, t, x_shape):
"""Extract coefficients from a based on t and reshape to make it
broadcastable with x_shape."""
bs, = t.shape
assert x_shape[0] == bs
out = torch.gather(torch.tensor(a, dtype=torch.float, device=t.device), 0, t.long())
assert out.shape == (bs,)
out = out.reshape((bs,) + (1,) * (len(x_shape) - 1))
return out
def image_editing_denoising_step_flexible_mask(x, t, *,
model,
logvar,
betas):
"""
Sample from p(x_{t-1} | x_t)
"""
alphas = 1.0 - betas
alphas_cumprod = alphas.cumprod(dim=0)
model_output = model(x, t)
weighted_score = betas / torch.sqrt(1 - alphas_cumprod)
mean = extract(1 / torch.sqrt(alphas), t, x.shape) * (x - extract(weighted_score, t, x.shape) * model_output)
logvar = extract(logvar, t, x.shape)
noise = torch.randn_like(x)
mask = 1 - (t == 0).float()
mask = mask.reshape((x.shape[0],) + (1,) * (len(x.shape) - 1))
sample = mean + mask * torch.exp(0.5 * logvar) * noise
sample = sample.float()
return sample
class Diffusion(object):
def __init__(self, args, config, device=None):
self.args = args
self.config = config
if device is None:
device = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
self.model_var_type = config.model.var_type
betas = get_beta_schedule(
beta_start=config.diffusion.beta_start,
beta_end=config.diffusion.beta_end,
num_diffusion_timesteps=config.diffusion.num_diffusion_timesteps
)
self.betas = torch.from_numpy(betas).float().to(self.device)
self.num_timesteps = betas.shape[0]
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
posterior_variance = betas * \
(1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
if self.model_var_type == "fixedlarge":
self.logvar = np.log(np.append(posterior_variance[1], betas[1:]))
elif self.model_var_type == 'fixedsmall':
self.logvar = np.log(np.maximum(posterior_variance, 1e-20))
def image_editing_sample(self):
print("Loading model")
if self.config.data.dataset == "LSUN":
if self.config.data.category == "bedroom":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/bedroom.ckpt"
elif self.config.data.category == "church_outdoor":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/church_outdoor.ckpt"
elif self.config.data.dataset == "CelebA_HQ":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/celeba_hq.ckpt"
else:
raise ValueError
model = Model(self.config)
ckpt = torch.hub.load_state_dict_from_url(url, map_location=self.device)
model.load_state_dict(ckpt)
model.to(self.device)
model = torch.nn.DataParallel(model)
print("Model loaded")
ckpt_id = 0
download_process_data(path="colab_demo")
n = self.config.sampling.batch_size
model.eval()
print("Start sampling")
with torch.no_grad():
name = self.args.npy_name
[mask, img] = torch.load("colab_demo/{}.pth".format(name))
mask = mask.to(self.config.device)
img = img.to(self.config.device)
img = img.unsqueeze(dim=0)
img = img.repeat(n, 1, 1, 1)
x0 = img
tvu.save_image(x0, os.path.join(self.args.image_folder, f'original_input.png'))
x0 = (x0 - 0.5) * 2.
for it in range(self.args.sample_step):
e = torch.randn_like(x0)
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
tvu.save_image((x + 1) * 0.5, os.path.join(self.args.image_folder, f'init_{ckpt_id}.png'))
with tqdm(total=total_noise_levels, desc="Iteration {}".format(it)) as progress_bar:
for i in reversed(range(total_noise_levels)):
t = (torch.ones(n) * i).to(self.device)
x_ = image_editing_denoising_step_flexible_mask(x, t=t, model=model,
logvar=self.logvar,
betas=self.betas)
x = x0 * a[i].sqrt() + e * (1.0 - a[i]).sqrt()
x[:, (mask != 1.)] = x_[:, (mask != 1.)]
# added intermediate step vis
if (i - 99) % 100 == 0:
tvu.save_image((x + 1) * 0.5, os.path.join(self.args.image_folder,
f'noise_t_{i}_{it}.png'))
progress_bar.update(1)
x0[:, (mask != 1.)] = x[:, (mask != 1.)]
torch.save(x, os.path.join(self.args.image_folder,
f'samples_{it}.pth'))
tvu.save_image((x + 1) * 0.5, os.path.join(self.args.image_folder,
f'samples_{it}.png'))
| 3,270 |
1,600 |
<filename>chapter-07/recipe-04/cxx-example/src/main.cpp
../../../../chapter-04/recipe-02/cxx-example/main.cpp
| 44 |
677 |
<gh_stars>100-1000
{
"resourceType": "ValueSet",
"id": "us-core-narrative-status",
"text": {
"status": "generated",
"div": "<div xmlns=\"http://www.w3.org/1999/xhtml\"><h2>Narrative Status</h2><div><p>This value set limits the text status for the resource narrative.</p>\n</div><p><b>Copyright Statement:</b> HL7</p><p>This value set includes codes from the following code systems:</p><ul><li>Include these codes as defined in <a href=\"http://hl7.org/fhir/STU3/codesystem-narrative-status.html\"><code>http://hl7.org/fhir/narrative-status</code></a><table class=\"none\"><tr><td style=\"white-space:nowrap\"><b>Code</b></td><td><b>Display</b></td></tr><tr><td><a href=\"http://hl7.org/fhir/STU3/codesystem-narrative-status.html#narrative-status-additional\">additional</a></td><td>additional</td><td>The contents of the narrative may contain additional information not found in the structured data. Note that there is no computable way to determine what the extra information is, other than by human inspection</td></tr><tr><td><a href=\"http://hl7.org/fhir/STU3/codesystem-narrative-status.html#narrative-status-generated\">generated</a></td><td>generated</td><td>The contents of the narrative are entirely generated from the structured data in the content.</td></tr></table></li></ul></div>"
},
"url": "http://hl7.org/fhir/us/core/ValueSet/us-core-narrative-status",
"version": "2.0.0",
"name": "Narrative Status",
"status": "draft",
"date": "2018-12-04T10:36:33+11:00",
"publisher": "HL7 US Realm Steering Committee",
"contact": [
{
"telecom": [
{
"system": "other",
"value": "http://hl7.org/fhir"
}
]
}
],
"description": "This value set limits the text status for the resource narrative.",
"jurisdiction": [
{
"coding": [
{
"system": "urn:iso:std:iso:3166",
"code": "US",
"display": "United States of America"
}
]
}
],
"copyright": "HL7",
"compose": {
"include": [
{
"system": "http://hl7.org/fhir/narrative-status",
"concept": [
{
"code": "additional",
"display": "additional"
},
{
"code": "generated",
"display": "generated"
}
]
}
]
}
}
| 1,285 |
679 |
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_fpicker.hxx"
//------------------------------------------------------------------------
// includes
//------------------------------------------------------------------------
#include <osl/diagnose.h>
#include <rtl/ustrbuf.hxx>
#include "resourceprovider.hxx"
#include <vos/mutex.hxx>
#include <vcl/svapp.hxx>
#ifndef _TOOLS_SIMPLERESMGR_HXX
#include <tools/simplerm.hxx>
#endif
#include <com/sun/star/ui/dialogs/CommonFilePickerElementIds.hpp>
#include <com/sun/star/ui/dialogs/ExtendedFilePickerElementIds.hpp>
#include <svtools/svtools.hrc>
//------------------------------------------------------------
// namespace directives
//------------------------------------------------------------
using rtl::OUString;
using namespace ::com::sun::star::ui::dialogs::ExtendedFilePickerElementIds;
using namespace ::com::sun::star::ui::dialogs::CommonFilePickerElementIds;
//------------------------------------------------------------
//
//------------------------------------------------------------
#define FOLDERPICKER_TITLE 500
#define FOLDER_PICKER_DEF_DESCRIPTION 501
//------------------------------------------------------------
// we have to translate control ids to resource ids
//------------------------------------------------------------
struct _Entry
{
sal_Int32 ctrlId;
sal_Int16 resId;
};
_Entry CtrlIdToResIdTable[] = {
{ CHECKBOX_AUTOEXTENSION, STR_SVT_FILEPICKER_AUTO_EXTENSION },
{ CHECKBOX_PASSWORD, STR_SVT_FILEPICKER_PASSWORD },
{ CHECKBOX_FILTEROPTIONS, STR_SVT_FILEPICKER_FILTER_OPTIONS },
{ CHECKBOX_READONLY, STR_SVT_FILEPICKER_READONLY },
{ CHECKBOX_LINK, STR_SVT_FILEPICKER_INSERT_AS_LINK },
{ CHECKBOX_PREVIEW, STR_SVT_FILEPICKER_SHOW_PREVIEW },
{ PUSHBUTTON_PLAY, STR_SVT_FILEPICKER_PLAY },
{ LISTBOX_VERSION_LABEL, STR_SVT_FILEPICKER_VERSION },
{ LISTBOX_TEMPLATE_LABEL, STR_SVT_FILEPICKER_TEMPLATES },
{ LISTBOX_IMAGE_TEMPLATE_LABEL, STR_SVT_FILEPICKER_IMAGE_TEMPLATE },
{ CHECKBOX_SELECTION, STR_SVT_FILEPICKER_SELECTION },
{ FOLDERPICKER_TITLE, STR_SVT_FOLDERPICKER_DEFAULT_TITLE },
{ FOLDER_PICKER_DEF_DESCRIPTION, STR_SVT_FOLDERPICKER_DEFAULT_DESCRIPTION }
};
const sal_Int32 SIZE_TABLE = sizeof( CtrlIdToResIdTable ) / sizeof( _Entry );
//------------------------------------------------------------
//
//------------------------------------------------------------
sal_Int16 CtrlIdToResId( sal_Int32 aControlId )
{
sal_Int16 aResId = -1;
for ( sal_Int32 i = 0; i < SIZE_TABLE; i++ )
{
if ( CtrlIdToResIdTable[i].ctrlId == aControlId )
{
aResId = CtrlIdToResIdTable[i].resId;
break;
}
}
return aResId;
}
//------------------------------------------------------------
//
//------------------------------------------------------------
class CResourceProvider_Impl
{
public:
//-------------------------------------
//
//-------------------------------------
CResourceProvider_Impl( )
{
const ::vos::OGuard aGuard( Application::GetSolarMutex() );
com::sun::star::lang::Locale aLoc( Application::GetSettings().GetUILocale() );
m_ResMgr = new SimpleResMgr( CREATEVERSIONRESMGR_NAME( fps_office ), aLoc );
}
//-------------------------------------
//
//-------------------------------------
~CResourceProvider_Impl( )
{
delete m_ResMgr;
}
//-------------------------------------
//
//-------------------------------------
OUString getResString( sal_Int16 aId )
{
OUString aResOUString;
try
{
OSL_ASSERT( m_ResMgr );
// translate the control id to a resource id
sal_Int16 aResId = CtrlIdToResId( aId );
if ( aResId > -1 )
aResOUString = m_ResMgr->ReadString( aResId );
}
catch(...)
{
}
return aResOUString;
}
public:
SimpleResMgr* m_ResMgr;
};
//------------------------------------------------------------
//
//------------------------------------------------------------
CResourceProvider::CResourceProvider( ) :
m_pImpl( new CResourceProvider_Impl() )
{
}
//------------------------------------------------------------
//
//------------------------------------------------------------
CResourceProvider::~CResourceProvider( )
{
delete m_pImpl;
}
//------------------------------------------------------------
//
//------------------------------------------------------------
OUString CResourceProvider::getResString( sal_Int16 aId )
{
return m_pImpl->getResString( aId );
}
| 2,126 |
778 |
<gh_stars>100-1000
#ifndef VEXCL_SPARSE_MATRIX_HPP
#define VEXCL_SPARSE_MATRIX_HPP
#include <vexcl/sparse/ell.hpp>
#include <vexcl/sparse/csr.hpp>
namespace vex {
namespace sparse {
template <typename Val, typename Col = int, typename Ptr = Col>
class matrix {
public:
typedef Val value_type;
typedef Val val_type;
typedef Col col_type;
typedef Ptr ptr_type;
template <class PtrRange, class ColRange, class ValRange>
matrix(
const std::vector<backend::command_queue> &q,
size_t nrows, size_t ncols,
const PtrRange &ptr,
const ColRange &col,
const ValRange &val,
bool fast_setup = true
) : q(q[0])
{
if (is_cpu(q[0])) {
Acpu = std::make_shared<Csr>(q, nrows, ncols, ptr, col, val);
} else {
Agpu = std::make_shared<Ell>(q, nrows, ncols, ptr, col, val, fast_setup);
}
}
// Dummy matrix
matrix() {}
// Dummy matrix; used internally to pass empty parameters to kernels.
matrix(const backend::command_queue &q) : q(q) {}
template <class Expr>
friend
typename std::enable_if<
boost::proto::matches<
typename boost::proto::result_of::as_expr<Expr>::type,
vector_expr_grammar
>::value,
matrix_vector_product<matrix, Expr>
>::type
operator*(const matrix &A, const Expr &x) {
return matrix_vector_product<matrix, Expr>(A, x);
}
template <class Vector>
static void terminal_preamble(const Vector &x, backend::source_generator &src,
const backend::command_queue &q, const std::string &prm_name,
detail::kernel_generator_state_ptr state)
{
if (is_cpu(q)) {
Csr::terminal_preamble(x, src, q, prm_name, state);
} else {
Ell::terminal_preamble(x, src, q, prm_name, state);
}
}
template <class Vector>
static void local_terminal_init(const Vector &x, backend::source_generator &src,
const backend::command_queue &q, const std::string &prm_name,
detail::kernel_generator_state_ptr state)
{
if (is_cpu(q)) {
Csr::local_terminal_init(x, src, q, prm_name, state);
} else {
Ell::local_terminal_init(x, src, q, prm_name, state);
}
}
template <class Vector>
static void kernel_param_declaration(const Vector &x, backend::source_generator &src,
const backend::command_queue &q, const std::string &prm_name,
detail::kernel_generator_state_ptr state)
{
if (is_cpu(q)) {
Csr::kernel_param_declaration(x, src, q, prm_name, state);
} else {
Ell::kernel_param_declaration(x, src, q, prm_name, state);
}
}
template <class Vector>
static void partial_vector_expr(const Vector &x, backend::source_generator &src,
const backend::command_queue &q, const std::string &prm_name,
detail::kernel_generator_state_ptr state)
{
if (is_cpu(q)) {
Csr::partial_vector_expr(x, src, q, prm_name, state);
} else {
Ell::partial_vector_expr(x, src, q, prm_name, state);
}
}
template <class Vector>
void kernel_arg_setter(const Vector &x,
backend::kernel &kernel, unsigned part, size_t index_offset,
detail::kernel_generator_state_ptr state) const
{
if (is_cpu(q)) {
if (Acpu) {
Acpu->kernel_arg_setter(x, kernel, part, index_offset, state);
} else {
Csr dummy_A(q);
dummy_A.kernel_arg_setter(x, kernel, part, index_offset, state);
}
} else {
if (Agpu) {
Agpu->kernel_arg_setter(x, kernel, part, index_offset, state);
} else {
Ell dummy_A(q);
dummy_A.kernel_arg_setter(x, kernel, part, index_offset, state);
}
}
}
template <class Vector>
void expression_properties(const Vector &x,
std::vector<backend::command_queue> &queue_list,
std::vector<size_t> &partition,
size_t &size) const
{
if (Acpu) {
Acpu->expression_properties(x, queue_list, partition, size);
} else if (Agpu) {
Agpu->expression_properties(x, queue_list, partition, size);
}
}
size_t rows() const { return Acpu ? Acpu->rows() : Agpu->rows(); }
size_t cols() const { return Acpu ? Acpu->cols() : Agpu->cols(); }
size_t nonzeros() const { return Acpu ? Acpu->nonzeros() : Agpu->nonzeros(); }
private:
typedef ell<Val, Col, Ptr> Ell;
typedef csr<Val, Col, Ptr> Csr;
backend::command_queue q;
std::shared_ptr<Ell> Agpu;
std::shared_ptr<Csr> Acpu;
};
} // namespace sparse
} // namespace vex
#endif
| 2,800 |
2,360 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyopencl(PythonPackage):
"""Python wrapper for OpenCL."""
homepage = "https://documen.tician.de/pyopencl/"
pypi = "pyopencl/pyopencl-2020.2.2.tar.gz"
maintainers = ['matthiasdiener']
version('2020.2.2', sha256='31fcc79fb6862998e98d91a624c0bd4f0ab4c5d418d199912d4d312c64e437ec')
depends_on('ocl-icd', type=('build', 'link', 'run'))
depends_on('opencl', type=('build', 'link', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
@run_before('build')
def prepare(self):
cl_prefix = self.spec['ocl-icd'].prefix
python('configure.py', '--cl-inc-dir=' + cl_prefix.include,
'--cl-lib-dir=' + cl_prefix.lib)
| 561 |
640 |
import readchar
class Event(object):
pass
class KeyPressed(Event):
def __init__(self, value):
self.value = value
class Repaint(Event):
pass
class KeyEventGenerator(object):
def __init__(self, key_generator=None):
self._key_gen = key_generator or readchar.readkey
def next(self):
return KeyPressed(self._key_gen())
| 144 |
4,612 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Commands for reporting test success of failure to the manager.
@since: 12.3
"""
from twisted.protocols.amp import Boolean, Command, ListOf, Unicode
NativeString = Unicode
class AddSuccess(Command):
"""
Add a success.
"""
arguments = [(b"testName", NativeString())]
response = [(b"success", Boolean())]
class AddError(Command):
"""
Add an error.
"""
arguments = [
(b"testName", NativeString()),
(b"error", NativeString()),
(b"errorClass", NativeString()),
(b"frames", ListOf(NativeString())),
]
response = [(b"success", Boolean())]
class AddFailure(Command):
"""
Add a failure.
"""
arguments = [
(b"testName", NativeString()),
(b"fail", NativeString()),
(b"failClass", NativeString()),
(b"frames", ListOf(NativeString())),
]
response = [(b"success", Boolean())]
class AddSkip(Command):
"""
Add a skip.
"""
arguments = [(b"testName", NativeString()), (b"reason", NativeString())]
response = [(b"success", Boolean())]
class AddExpectedFailure(Command):
"""
Add an expected failure.
"""
arguments = [
(b"testName", NativeString()),
(b"error", NativeString()),
(b"todo", NativeString()),
]
response = [(b"success", Boolean())]
class AddUnexpectedSuccess(Command):
"""
Add an unexpected success.
"""
arguments = [(b"testName", NativeString()), (b"todo", NativeString())]
response = [(b"success", Boolean())]
class TestWrite(Command):
"""
Write test log.
"""
arguments = [(b"out", NativeString())]
response = [(b"success", Boolean())]
| 672 |
794 |
package github.tornaco.android.thanos.widget;
import android.annotation.SuppressLint;
import android.content.Context;
import android.util.TypedValue;
import androidx.annotation.NonNull;
import androidx.annotation.StyleRes;
import androidx.appcompat.app.AlertDialog;
import androidx.appcompat.widget.AppCompatEditText;
import github.tornaco.android.thanos.module.common.R;
import util.Consumer;
public class EditTextDialog extends AlertDialog {
private AppCompatEditText editText;
public static void show(Context context, String title, Consumer<String> stringConsumer) {
new EditTextDialog(context, title, stringConsumer).show();
}
static int resolveDialogTheme(@NonNull Context context, @StyleRes int resid) {
// Check to see if this resourceId has a valid package ID.
if (((resid >>> 24) & 0x000000ff) >= 0x00000001) { // start of real resource IDs.
return resid;
} else {
TypedValue outValue = new TypedValue();
context.getTheme().resolveAttribute(R.attr.alertDialogTheme, outValue, true);
return outValue.resourceId;
}
}
@SuppressLint("InflateParams")
protected EditTextDialog(Context context, String title, Consumer<String> stringConsumer) {
super(context, resolveDialogTheme(context, 0));
setView(getLayoutInflater().inflate(R.layout.common_dialog_edittext, null, false));
setCancelable(false);
setTitle(title);
setButton(
BUTTON_POSITIVE,
context.getString(android.R.string.ok),
(dialog, which) -> {
String content = editText.getEditableText().toString();
stringConsumer.accept(content);
});
setButton(BUTTON_NEGATIVE, context.getString(android.R.string.cancel), (dialog, which) -> {});
}
@Override
protected void onStart() {
super.onStart();
editText = findViewById(R.id.editor);
}
}
| 636 |
521 |
/* $Id: scsiinline.h $ */
/** @file
* VirtualBox: SCSI inline helpers used by devices, drivers, etc.
*/
/*
* Copyright (C) 2006-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___VBox_scsiinline_h
#define ___VBox_scsiinline_h
#include <iprt/stdint.h>
/** @defgroup grp_scsi_inline The SCSI inline helpers
* @{
*/
/**
* Converts a given 16bit value to big endian and stores it in the given buffer.
*
* @returns nothing.
* @param pbBuf The buffer to store the value into.
* @param u16Val The value to store.
*/
DECLINLINE(void) scsiH2BE_U16(uint8_t *pbBuf, uint16_t u16Val)
{
pbBuf[0] = u16Val >> 8;
pbBuf[1] = u16Val;
}
/**
* Converts a given 24bit value to big endian and stores it in the given buffer.
*
* @returns nothing.
* @param pbBuf The buffer to store the value into.
* @param u32Val The value to store.
*/
DECLINLINE(void) scsiH2BE_U24(uint8_t *pbBuf, uint32_t u32Val)
{
pbBuf[0] = u32Val >> 16;
pbBuf[1] = u32Val >> 8;
pbBuf[2] = u32Val;
}
/**
* Converts a given 32bit value to big endian and stores it in the given buffer.
*
* @returns nothing.
* @param pbBuf The buffer to store the value into.
* @param u32Val The value to store.
*/
DECLINLINE(void) scsiH2BE_U32(uint8_t *pbBuf, uint32_t u32Val)
{
pbBuf[0] = u32Val >> 24;
pbBuf[1] = u32Val >> 16;
pbBuf[2] = u32Val >> 8;
pbBuf[3] = u32Val;
}
/**
* Converts a given 64bit value to big endian and stores it in the given buffer.
*
* @returns nothing.
* @param pbBuf The buffer to store the value into.
* @param u64Val The value to store.
*/
DECLINLINE(void) scsiH2BE_U64(uint8_t *pbBuf, uint64_t u64Val)
{
pbBuf[0] = u64Val >> 56;
pbBuf[1] = u64Val >> 48;
pbBuf[2] = u64Val >> 40;
pbBuf[3] = u64Val >> 32;
pbBuf[4] = u64Val >> 24;
pbBuf[5] = u64Val >> 16;
pbBuf[6] = u64Val >> 8;
pbBuf[7] = u64Val;
}
/**
* Returns a 16bit value read from the given buffer converted to host endianess.
*
* @returns The converted 16bit value.
* @param pbBuf The buffer to read the value from.
*/
DECLINLINE(uint16_t) scsiBE2H_U16(const uint8_t *pbBuf)
{
return (pbBuf[0] << 8) | pbBuf[1];
}
/**
* Returns a 24bit value read from the given buffer converted to host endianess.
*
* @returns The converted 24bit value as a 32bit unsigned integer.
* @param pbBuf The buffer to read the value from.
*/
DECLINLINE(uint32_t) scsiBE2H_U24(const uint8_t *pbBuf)
{
return (pbBuf[0] << 16) | (pbBuf[1] << 8) | pbBuf[2];
}
/**
* Returns a 32bit value read from the given buffer converted to host endianess.
*
* @returns The converted 32bit value.
* @param pbBuf The buffer to read the value from.
*/
DECLINLINE(uint32_t) scsiBE2H_U32(const uint8_t *pbBuf)
{
return (pbBuf[0] << 24) | (pbBuf[1] << 16) | (pbBuf[2] << 8) | pbBuf[3];
}
/**
* Returns a 64bit value read from the given buffer converted to host endianess.
*
* @returns The converted 64bit value.
* @param pbBuf The buffer to read the value from.
*/
DECLINLINE(uint64_t) scsiBE2H_U64(const uint8_t *pbBuf)
{
return ((uint64_t)pbBuf[0] << 56)
| ((uint64_t)pbBuf[1] << 48)
| ((uint64_t)pbBuf[2] << 40)
| ((uint64_t)pbBuf[3] << 32)
| ((uint64_t)pbBuf[4] << 24)
| ((uint64_t)pbBuf[5] << 16)
| ((uint64_t)pbBuf[6] << 8)
| (uint64_t)pbBuf[7];
}
/**
* Converts the given LBA number to the MSF (Minutes:Seconds:Frames) format
* and stores it in the given buffer.
*
* @returns nothing.
* @param pbBuf The buffer to store the value into.
* @param iLBA The LBA to convert.
*/
DECLINLINE(void) scsiLBA2MSF(uint8_t *pbBuf, uint32_t iLBA)
{
iLBA += 150;
pbBuf[0] = (iLBA / 75) / 60;
pbBuf[1] = (iLBA / 75) % 60;
pbBuf[2] = iLBA % 75;
}
/**
* Converts a MSF formatted address value read from the given buffer
* to an LBA number.
*
* @returns The LBA number.
* @param pbBuf The buffer to read the MSF formatted address
* from.
*/
DECLINLINE(uint32_t) scsiMSF2LBA(const uint8_t *pbBuf)
{
return (pbBuf[0] * 60 + pbBuf[1]) * 75 + pbBuf[2];
}
/**
* Copies a given string to the given destination padding all unused space
* in the destination with spaces.
*
* @returns nothing.
* @param pbDst Where to store the string padded with spaces.
* @param pbSrc The string to copy.
* @param cbSize Size of the destination buffer.
*/
DECLINLINE(void) scsiPadStr(uint8_t *pbDst, const char *pbSrc, uint32_t cbSize)
{
uint32_t i;
for (i = 0; i < cbSize; i++)
{
if (*pbSrc)
pbDst[i] = *pbSrc++;
else
pbDst[i] = ' ';
}
}
/**
* Copies a given string to the given destination padding all unused space
* in the destination with spaces.
*
* @returns nothing.
* @param pbDst Where to store the string padded with spaces.
* @param pbSrc The string to copy.
* @param cbSize Size of the destination buffer.
*/
DECLINLINE(void) scsiPadStrS(int8_t *pbDst, const char *pbSrc, uint32_t cbSize)
{
uint32_t i;
for (i = 0; i < cbSize; i++)
{
if (*pbSrc)
pbDst[i] = *pbSrc++;
else
pbDst[i] = ' ';
}
}
/** @} */
#endif
| 2,851 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.