max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
544 | <gh_stars>100-1000
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.tenant.administration.site_properties import SiteProperties
from office365.sharepoint.tenant.administration.tenant import Tenant
from tests import test_admin_site_url, test_user_credentials
admin_client = ClientContext(test_admin_site_url).with_credentials(test_user_credentials)
tenant = Tenant(admin_client)
result = tenant.get_site_properties_from_sharepoint_by_filters("", 0).execute_query()
for siteProps in result: # type: SiteProperties
print(siteProps.url)
| 179 |
504 | <reponame>steakknife/pcgeos
/***********************************************************************
*
* Copyright (c) Berkeley Softworks 1989 -- All Rights Reserved
*
* PROJECT: PCGEOS
* MODULE: Esp -- Symbol Handling Functions
* FILE: symbol.h
*
* AUTHOR: <NAME>: Aug 23, 1989
*
* REVISION HISTORY:
* Date Name Description
* ---- ---- -----------
* 8/23/89 ardeb Initial version
*
* DESCRIPTION:
* Interface definition for Sym module
*
*
* $Id: symbol.h,v 3.14 93/09/19 18:08:54 adam Exp $
*
***********************************************************************/
#ifndef _SYMBOL_H_
#define _SYMBOL_H_
#include <objfmt.h>
#include <localize.h>
/****************************************************************************
*
* Extra data kept for a class symbol (separated to keep Symbol small)
*
***************************************************************************/
typedef struct {
SymbolPtr methods; /* Enum type for methods */
SymbolPtr base; /* Structure type for base */
SymbolPtr vardata; /* Enum type for vardata tags */
Opaque bindings; /* BINDING symbols */
Opaque noreloc; /* List of things that should not be relocated
* for the class */
int flags; /* Flags for class: */
#define SYM_CLASS_MASTER 0x00000001 /* Top of a master group */
#define SYM_CLASS_VARIANT 0x00000002 /* Superclass unknown */
#define SYM_CLASS_CHECKING 0x00000004 /* Checking class for relativity (see
* CheckRelated in parse.y) */
#define SYM_CLASS_FORWARD 0x00000008 /* Class was declared forward by an
* ahead-of-its-time uses directive --
* it hasn't really been declared yet */
int numUsed; /* Number of classes whose methods are
* explicitly allowed to be bound to handlers
* for this class */
SymbolPtr used[LABEL_IN_STRUCT];
} ClassData;
/*****************************************************************************
*
* Extra data kept for a segment symbol
*
****************************************************************************/
typedef struct {
Opaque fixPriv; /* Data private to Fix module */
unsigned long align:8, /* Alignment (mask of bits to clear) */
comb:4; /* Combine type */
ID class; /* Segment class */
unsigned short offset; /* Offset into map block of descriptor */
SymbolPtr pair; /* Paired segment (for LMem segments) */
int inited; /* Non-zero if initialized (for LMem) */
int lastdot; /* Value of $ when segment last closed */
int checkLabel:1, /* Non-zero if should check for some sort of
* label at the current address when
* generating code */
blockStart:1; /* Non-zero if next instruction is the
* start of a basic block */
int lastLabel; /* Address of most-recently defined label for
* the segment */
word segment; /* Segment address, if absolute */
SymbolPtr first; /* Head of symbols-by-address list */
SymbolPtr last; /* Tail of symbols-by-address list */
SymbolPtr lastLine; /* Last line number entered */
VMBlockHandle lastSym; /* Last block in symbol chain (for write-out)*/
} SegData;
/*****************************************************************************
*
* Structure for an individual symbol. Each symbol has a name, a type, and
* some type-specific information.
*
****************************************************************************/
/*
* Type of a symbol
*/
typedef enum {
SYM_VAR, /* Variable */
SYM_LABEL, /* Regular label */
SYM_LOCALLABEL, /* Label local to a procedure. Uses the label data */
SYM_PROC, /* Procedure */
SYM_LINE, /* Line number mapping */
SYM_CLASS, /* Object class */
SYM_CHUNK, /* LMem chunk */
SYM_ONSTACK, /* Stack descriptor */
SYM_PROFILE_MARK, /* Basic-block profiling code */
SYM_LASTADDR, /* MARKER: Last symbol w/associated offset */
SYM_BITFIELD, /* Field in a RECORD */
SYM_FIELD, /* Field in a STRUC */
SYM_STRUCT, /* STRUC definition */
SYM_UNION, /* UNION definition */
SYM_MACRO, /* Macro definition */
SYM_NUMBER, /* = definition */
SYM_STRING, /* EQU definition */
SYM_SEGMENT, /* SEGMENT name */
SYM_GROUP, /* GROUP name */
SYM_ENUM, /* Member of enumerated type */
SYM_ETYPE, /* Enumerated type */
SYM_TYPE, /* Typedef */
SYM_METHOD, /* Method constant */
SYM_INSTVAR, /* Instance variable */
SYM_PUBLIC, /* Grossness to handle forward-referencing PUBLIC
* and GLOBAL directives */
SYM_RECORD, /* RECORD definition */
SYM_LOCAL, /* Local variable (stack-relative) */
SYM_BINDING, /* Binding of procedure to method */
SYM_VARDATA, /* VarData type */
SYM_INHERIT, /* Inherit-local-vars-from placeholder; name is source
* procedure */
SYM_PROTOMINOR /* ProtoMinor type */
} SymType;
#define SYM_ANY SYM_LASTADDR /* Type to pass to Sym_Find to tell it to
* find any type of symbol */
typedef struct _Symbol {
SymType type:16; /* Symbol's type */
short flags; /* Flags for the symbol (low 8 bits go
* into the object file): */
#define SYM_GLOBAL 0x0001 /* Symbol defined as global */
#define SYM_UNDEF 0x0002 /* Symbol not defined yet */
#define SYM_REF 0x0004 /* Symbol referenced */
#define SYM_NAMELESS 0x0020 /* Symbol's name is fake and not for
* human consumption */
#define SYM_NOWRITE 0x0100 /* Symbol is not to be written to
* the output file. Used for local
* symbols ($ and <n>$) to alert
* the fixup module it must do
* special things */
#define SYM_PERM 0x0200 /* Symbol's name is in permanent
* storage already */
ID name; /* Symbol's name */
struct _Symbol *segment; /* Segment in which it's defined */
/*
* Type-specific data. NOTE: These are all no more than 16 bytes long.
* They should be kept that way to keep memory usage "low".
*/
union {
/*
* Data left when symbol has been written to the file.
*/
struct {
int addr; /* Space left to keep the "address" part
* of the symbol data valid, allowing
* constants involving symbols to be
* written to the file correctly. */
unsigned short offset; /* Offset into block of ObjSym record */
VMBlockHandle block; /* Block in which ObjSym record resides */
} objsym;
/*
* THINGS WITH ADDRESSES (LOCALLABEL data defined later)
*/
struct addrcom { /* Common info for address-bearing
* symbols */
int offset; /* Offset of symbol */
struct _Symbol *next; /* Next in address chain */
} addrsym;
struct { /* Label info */
struct addrcom common;
int near; /* Non-zero if near label */
int unreach;/* Non-zero if unreferenced label is for
* unreachable code */
} label;
struct { /* Proc info */
struct addrcom common;
int flags; /* Flags about the procedure: */
#define SYM_NEAR 0x0001 /* Set if procedure near */
#define SYM_WEIRD 0x0002 /* Set if procedure contains an
* on_stack directive */
#define SYM_NO_JMP 0x0004 /* Set if procedure may not be jumped
* to */
#define SYM_NO_CALL 0x0008 /* Set if procedure may not be called */
#define SYM_STATIC 0x0010 /* Static method handler */
#define SYM_PRIVSTATIC 0x0020 /* Private static method handler */
#define SYM_DYNAMIC 0x0040 /* Dynamic method handler */
#define SYM_HANDLER 0x0080 /* Method handler */
#define SYM_HANDLER_MASK 0x0070 /* Mask of bits to indicate special
* method handler */
Opaque locals; /* Local symbols. Format known only
* to Sym module */
} proc;
struct { /* Variable info */
struct addrcom common;
TypePtr type; /* Type of variable */
} var;
struct { /* Line-number info */
struct addrcom common;
int line; /* Line number */
ID file; /* File name */
} line;
struct { /* Class record */
struct addrcom common;
struct _Symbol *super; /* Super class */
SymbolPtr instance;/* Structure type for instance */
ClassData *data; /* Other stuff used by Obj module */
} class;
struct { /* LMem chunk */
struct addrcom common;
TypePtr type; /* Type of data stored at chunk */
word handle; /* Handle to the chunk to be used when
* chunk referenced */
LocalizeInfo *loc;
} chunk;
struct { /* Stack descriptor */
struct addrcom common;
ID desc; /* Descriptor */
} onStack;
struct {
struct addrcom common;
word markType;
#define SYM_PROF_BBLOCK 1
#define SYM_PROF_COUNT 2
} profMark;
/*
* STRUCTURED TYPES
*/
struct typecom { /* Common info for types */
int size; /* Number of bytes in type */
TypePtr desc; /* Type descriptor describing the
* type, if any */
} typesym;
struct { /* Structure type info */
struct typecom common;
struct _Symbol *first; /* First field in type */
struct _Symbol *last; /* Last field in type */
} sType;
struct { /* Enumerated type info */
struct typecom common;
struct _Symbol *mems; /* List of members */
word firstVal;/* First value assigned to type */
word nextVal;/* Value for next member */
word incr; /* Increment */
byte flags;
#define SYM_ETYPE_PROTOMINOR 0x01 /* Set if members of the etype are
* affected by protominor symbols */
} eType;
struct { /* Typedef info */
struct typecom common;
TypePtr type; /* Description of type */
} typeDef;
struct { /* RECORD type info */
struct typecom common;
struct _Symbol *first; /* First field in type */
word mask; /* Mask for entire record */
} record;
/*
* TYPE ELEMENTS. The elements of a structured type are linked into
* a list through the u.eltsym.next field of this record. The
* final element of the type points back to the type of which
* all the elements are a part.
*/
struct eltcom { /* Type element-common data */
struct _Symbol *next; /* Next element in type */
} eltsym;
struct { /* Structure field info */
struct eltcom common;
TypePtr type; /* Field type */
Expr *value; /* Default initial value */
int offset; /* Byte offset of field in structure */
} field;
struct { /* Instance variable info */
struct eltcom common;
TypePtr type; /* Variable type */
Expr *value; /* Default initial value */
short offset; /* Byte offset of field in structure */
word flags; /* Flags for variable */
#define SYM_VAR_PUBLIC 0x0001 /* May be accessed by routines other
* than handler for owning class
* or descendant */
#define SYM_VAR_STATE 0x0002 /* Field stored in state block */
#define SYM_VAR_NORELOC 0x0004 /* Field should not be entered in the
* class's relocation table */
struct _Symbol *class; /* Class to which the variable belongs */
} instvar;
struct { /* Record field info */
struct eltcom common;
TypePtr type; /* Type if field not untyped */
Expr *value; /* Default value */
short offset; /* Bit offset of field in word */
short width; /* Width of field */
} bitField;
struct econst { /* Enum member info */
struct eltcom common;
word value; /* Value of this one */
struct _Symbol *protoMinor;
} econst;
struct { /* VarData member info */
struct econst common;
TypePtr type; /* Type of data stored with tag */
} varData;
struct { /* Method */
struct econst common;
word flags; /* Flags for method: */
#define SYM_METH_PUBLIC 0x0001 /* Publicly available */
#define SYM_METH_RANGE 0x0002 /* Method is actually the start of
* a range */
#define SYM_METH_RANGE_LENGTH 0xfffc /* Where the length of the exported
* range is stored */
#define SYM_METH_RANGE_LENGTH_OFFSET 2 /* Bit-shift to get to range length */
struct _Symbol *class; /* Class to which it belongs */
} method;
/*
* MISCELLANEOUS -- Macro/String stuff are opaque to avoid having
* the definitions for macro blocks in this file.
*/
struct { /* Numeric/expression equate */
Expr *value; /* Value of the symbol */
int rdonly; /* Non-zero if defined with EQU and hence
* should be considered read-only */
} equate;
struct { /* String equate */
void *value; /* The value, ready for interpolating */
} string;
struct { /* Macro */
void *text; /* The macro text */
int numArgs;/* Number of dummy parameters */
int numLocals; /* Number of local labels to define */
} macro;
struct { /* Procedure -> message binding */
SymbolPtr proc; /* Handler */
byte callType;/* How handler may be called */
} binding;
struct { /* Undefined public symbol */
ID file; /* File where PUBLIC directive was seen */
int line; /* Line number of same */
} public;
/*
* LOCAL SYMBOLS -- These are chained internally to the symbol module
* (i.e. the connections aren't visible to the outside world)
*/
struct { /* Local variable (arg or variable) */
int offset; /* Offset from BP */
TypePtr type; /* Type of variable */
} localVar;
struct { /* inherit-locals-from binding */
int done; /* Non-zero if inheritance resolved */
ID file; /* File containing .enter inherit */
int line; /* Line number of same */
} inherit;
/*
* SEGMENT STUFF
*/
struct { /* Segment info */
SegData *data; /* Data not used as often */
Table code; /* Code in segment */
#define CODE_BYTES_PER 256 /* Bytes per chunk in table */
} segment;
struct {
int nSegs; /* Number of segments in the group */
struct _Symbol **segs; /* Array of segments in the group */
int offset; /* Offset w/in object header for group */
} group;
} u;
} Symbol;
#define NullSymbol ((Symbol *)NULL)
#define Sym_IsNear(sym) ((((sym)->type == SYM_LABEL) || \
((sym)->type == SYM_LOCALLABEL)) ? \
(sym)->u.label.near : \
(((sym)->type == SYM_PROC) ? \
((sym)->u.proc.flags & SYM_NEAR) : 0))
#define Sym_Reference(sym) (((Symbol *)(sym))->flags |= SYM_REF)
typedef int SymForEachProc(SymbolPtr sym, Opaque data);
/*
* Initialize the symbol table, returning the symbol for the global scope
*/
extern SymbolPtr Sym_Init(void);
/*
* Enter a symbol into the current scope.
*/
extern SymbolPtr Sym_Enter(ID id, SymType type, ...);
/*
* Locate a symbol in the current scope
*/
extern SymbolPtr Sym_Find(ID id, SymType type,
int resolveInherits);
/*
* Iterate over all segments
*/
extern void Sym_ForEachSegment(SymForEachProc *func, Opaque data);
/*
* Iterate over all local symbols for a procedure
*/
extern void Sym_ForEachLocal(SymbolPtr proc,
SymForEachProc *func,
Opaque data);
/*
* Adjust the offsets of all arguments for a procedure by a set amount.
*/
extern void Sym_AdjustArgOffset(SymbolPtr proc, int adjustment);
extern void Sym_Adjust(SymbolPtr seg,
int start,
int diff);
/*
* Mark all the local variables and arguments as referenced.
*/
extern void Sym_ReferenceAllLocals(SymbolPtr proc);
/*
* Write all segments to the output file. This includes setup of the
* file header, the segment descriptors, arranging for the write-out of all
* fixups and segment data.
*/
extern int Sym_ProcessSegments(void);
/*
* Bind a procedure to a method in the context of a class.
*/
extern ID Sym_BindMethod(SymbolPtr class,
ID method,
SymbolPtr proc,
byte isStatic);
/*
* Allocate and enter localization information for a chunk. The fields of
* the thing are to be filled in later.
*/
extern LocalizeInfo *Sym_AllocLoc(SymbolPtr sym,
ChunkDataType type);
extern void Sym_SetAddress(SymbolPtr sym,
int offset);
extern void Sym_AddToGroup(SymbolPtr grp, SymbolPtr seg);
#endif /* _SYMBOL_H_ */
| 7,560 |
1,742 | <reponame>UCD4IDS/sage<gh_stars>1000+
"""
Context Managers for LibGAP
This module implements a context manager for global variables. This is
useful since the behavior of GAP is sometimes controlled by global
variables, which you might want to switch to a different value for a
computation. Here is an example how you are suppose to use it from
your code. First, let us set a dummy global variable for our example::
sage: libgap.set_global('FooBar', 123)
Then, if you want to switch the value momentarily you can write::
sage: with libgap.global_context('FooBar', 'test'):
....: print(libgap.get_global('FooBar'))
test
Afterward, the global variable reverts to the previous value::
sage: print(libgap.get_global('FooBar'))
123
The value is reset even if exceptions occur::
sage: with libgap.global_context('FooBar', 'test'):
....: print(libgap.get_global('FooBar'))
....: raise ValueError(libgap.get_global('FooBar'))
Traceback (most recent call last):
...
ValueError: test
sage: print(libgap.get_global('FooBar'))
123
"""
###############################################################################
# Copyright (C) 2012, <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
###############################################################################
from sage.libs.gap.libgap import libgap
class GlobalVariableContext():
def __init__(self, variable, value):
"""
Context manager for GAP global variables.
It is recommended that you use the
:meth:`sage.libs.gap.libgap.Gap.global_context` method and not
construct objects of this class manually.
INPUT:
- ``variable`` -- string. The variable name.
- ``value`` -- anything that defines a GAP object.
EXAMPLES::
sage: libgap.set_global('FooBar', 1)
sage: with libgap.global_context('FooBar', 2):
....: print(libgap.get_global('FooBar'))
2
sage: libgap.get_global('FooBar')
1
"""
self._variable = variable
self._new_value = value
def __enter__(self):
"""
Called when entering the with-block
EXAMPLES::
sage: libgap.set_global('FooBar', 1)
sage: with libgap.global_context('FooBar', 2):
....: print(libgap.get_global('FooBar'))
2
sage: libgap.get_global('FooBar')
1
"""
self._old_value = libgap.get_global(self._variable)
libgap.set_global(self._variable, self._new_value)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Called when exiting the with-block
EXAMPLES::
sage: libgap.set_global('FooBar', 1)
sage: with libgap.global_context('FooBar', 2):
....: print(libgap.get_global('FooBar'))
2
sage: libgap.get_global('FooBar')
1
"""
libgap.set_global(self._variable, self._old_value)
return False
| 1,341 |
970 | import pytest
import torch
from ludwig.modules import metric_modules
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(6).float()])
def test_rmse_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.RMSEMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.tensor([0.2, 0.3, 0.8, 0.1])])
@pytest.mark.parametrize("target", [torch.tensor([0, 0, 1, 1])])
@pytest.mark.parametrize("output", [torch.tensor(0.5)])
def test_roc_auc_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.ROCAUCMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(0.7527).float()])
def test_rmspe_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.RMSPEMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize(
"preds,target,num_outputs,output",
[
(torch.arange(3), torch.arange(3, 6), 1, torch.tensor(-12.5)),
(torch.arange(6).reshape(3, 2), torch.arange(6, 12).reshape(3, 2), 2, torch.tensor(-12.5)),
],
)
def test_r2_score(preds: torch.Tensor, target: torch.Tensor, num_outputs: int, output: torch.Tensor):
metric = metric_modules.R2Score(num_outputs=num_outputs)
metric.update(preds, target)
assert metric.compute() == output
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(-21.4655).float()])
def test_bwcewl_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.BWCEWLMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize("preds", [torch.tensor([[0.5, 0.5], [0.2, 0.8], [0.6, 0.4]])])
@pytest.mark.parametrize("target", [torch.tensor([1, 1, 0])])
@pytest.mark.parametrize("output", [torch.tensor(0.5763)])
def test_softmax_cross_entropy_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.SoftmaxCrossEntropyMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(-42.9311).float()])
def test_sigmoid_cross_entropy_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.SigmoidCrossEntropyMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize(
"preds,target,output",
[
(
torch.tensor([[0, 1], [3, 2], [4, 5]]),
torch.tensor([[0, 1], [1, 2], [4, 5]]),
torch.tensor(0.8),
),
(
torch.tensor([[0, 1, 2], [1, 3, 4], [3, 4, 5]]),
torch.tensor([[0, 1, 2], [1, 1, 4], [3, 4, 5]]),
torch.tensor(0.8750),
),
(
torch.tensor([[1, 5, 1, 5, 1, 5, 12, 12, 12], [10, 1, 5, 1, 5, 12, 12, 12, 12]]),
torch.tensor([[1, 9, 5, 7, 5, 9, 13, 6, 0], [1, 9, 7, 13, 4, 7, 7, 7, 0]]),
torch.tensor(0.05555555),
),
],
)
def test_token_accuracy_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.TokenAccuracyMetric()
metric.update(preds, target)
assert torch.allclose(metric.compute(), output)
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2)])
@pytest.mark.parametrize("target", [torch.tensor([[0, 1], [2, 1], [4, 5]]).float()])
@pytest.mark.parametrize("output", [torch.tensor(0.8333).float()])
def test_category_accuracy(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.CategoryAccuracy()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize(
"preds,target,output,k",
[
(
torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]),
torch.tensor([0, 1, 2]),
torch.tensor(0.6667).float(),
2,
)
],
)
def test_hits_at_k_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor, k: int):
metric = metric_modules.HitsAtKMetric(top_k=k)
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(6).float()])
def test_mae_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.MAEMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(36).float()])
def test_mse_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.MSEMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.tensor([[0, 1], [1, 1]])])
@pytest.mark.parametrize("target", [torch.tensor([[1, 0], [1, 1]])])
@pytest.mark.parametrize("output", [torch.tensor(0.5)])
def test_jaccard_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.JaccardMetric()
metric.update(preds, target)
assert output == metric.compute()
| 2,798 |
335 | {
"word": "Capacity",
"definitions": [
"the ability to hold or contain people or things",
"the largest amount or number that can be held or contained"
],
"parts-of-speech": "Noun"
}
| 68 |
1,168 | // Verifies that init statements in if and switch index correctly.
struct S {
//- @field defines/binding IntField
int field;
};
S g();
void f() {
//- @z defines/binding LocalVarZ
if (auto z = g();
//- @z ref LocalVarZ
//- @field ref IntField
z.field > 0) {
//- @z ref LocalVarZ
//- @field ref IntField
int i = z.field;
} else {
//- @z ref LocalVarZ
//- @field ref IntField
int j = z.field;
}
int x;
//- @zz defines/binding LocalVarZZ
switch(auto zz = g();
//- @zz ref LocalVarZZ
//- @field ref IntField
zz.field) {
case 0:
case 1:
//- @zz ref LocalVarZZ
//- @field ref IntField
x = zz.field;
}
}
| 319 |
1,444 |
package mage.abilities.effects.common.combat;
import mage.constants.Duration;
import mage.constants.Layer;
import mage.constants.Outcome;
import mage.constants.SubLayer;
import mage.abilities.Ability;
import mage.abilities.effects.ContinuousEffectImpl;
import mage.filter.FilterPermanent;
import mage.game.Game;
import mage.game.permanent.Permanent;
import mage.util.CardUtil;
/**
*
* @author Quercitron
*/
public class CantBeBlockedByMoreThanOneAllEffect extends ContinuousEffectImpl {
private FilterPermanent filter;
protected int amount;
public CantBeBlockedByMoreThanOneAllEffect(FilterPermanent filter) {
this(1, filter, Duration.WhileOnBattlefield);
}
public CantBeBlockedByMoreThanOneAllEffect(int amount, FilterPermanent filter) {
this(amount, filter, Duration.WhileOnBattlefield);
}
public CantBeBlockedByMoreThanOneAllEffect(int amount, FilterPermanent filter, Duration duration) {
super(duration, Outcome.Benefit);
this.amount = amount;
this.filter = filter;
staticText = new StringBuilder("Each ").append(filter.getMessage()).append(" can't be blocked by more than ")
.append(CardUtil.numberToText(amount)).append(" creature").append(amount > 1 ? "s" : "").toString();
}
public CantBeBlockedByMoreThanOneAllEffect(final CantBeBlockedByMoreThanOneAllEffect effect) {
super(effect);
this.amount = effect.amount;
this.filter = effect.filter;
}
@Override
public CantBeBlockedByMoreThanOneAllEffect copy() {
return new CantBeBlockedByMoreThanOneAllEffect(this);
}
@Override
public boolean apply(Layer layer, SubLayer sublayer, Ability source, Game game) {
switch (layer) {
case RulesEffects:
for (Permanent perm : game.getBattlefield().getActivePermanents(filter, source.getControllerId(), source.getSourceId(), game)) {
perm.setMaxBlockedBy(amount);
}
break;
}
return true;
}
@Override
public boolean apply(Game game, Ability source) {
return false;
}
@Override
public boolean hasLayer(Layer layer) {
return layer == Layer.RulesEffects;
}
}
| 845 |
610 | <filename>.changes/1.4.json
{
"date" : "2019-06-10",
"version" : "1.4",
"entries" : [ {
"type" : "feature",
"description" : "Usability enhancements to the CloudFormation UI \n - color coding status similar to the AWS Console\n - preventing multiple tabs opening for the same stack (#798)\n - opening from AWS Explorer with right-click instead of double click (#799)\n - adding status reason to event view"
}, {
"type" : "feature",
"description" : "Open README.md file after creating a project"
}, {
"type" : "feature",
"description" : "Auto-create run configurations when using the New Project wizard"
}, {
"type" : "feature",
"description" : "Enable toolkit in 2019.2 EAP"
}, {
"type" : "bugfix",
"description" : "Fix unable to map paths that have `.` or `..` in them"
}, {
"type" : "bugfix",
"description" : "Do not load proxy settings from Java system properties since it conflicts with IDE setting"
}, {
"type" : "bugfix",
"description" : "Make sure we commit all open documents if using a file-based event input (#910)"
}, {
"type" : "bugfix",
"description" : "Fix being unable to open an empty credentials/config file for editing"
} ]
} | 401 |
575 | <gh_stars>100-1000
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/frame/history_util.h"
#include "third_party/blink/renderer/platform/weborigin/kurl.h"
#include "third_party/blink/renderer/platform/weborigin/security_origin.h"
namespace blink {
namespace {
bool EqualIgnoringPathQueryAndFragment(const KURL& a, const KURL& b) {
return StringView(a.GetString(), 0, a.PathStart()) ==
StringView(b.GetString(), 0, b.PathStart());
}
bool EqualIgnoringQueryAndFragment(const KURL& a, const KURL& b) {
return StringView(a.GetString(), 0, a.PathEnd()) ==
StringView(b.GetString(), 0, b.PathEnd());
}
} // namespace
bool CanChangeToUrlForHistoryApi(const KURL& url,
const SecurityOrigin* document_origin,
const KURL& document_url) {
if (!url.IsValid())
return false;
if (document_origin->IsGrantedUniversalAccess())
return true;
// We allow sandboxed documents, `data:`/`file:` URLs, etc. to use
// 'pushState'/'replaceState' to modify the URL fragment: see
// https://crbug.com/528681 for the compatibility concerns.
if (document_origin->IsOpaque() || document_origin->IsLocal())
return EqualIgnoringQueryAndFragment(url, document_url);
if (!EqualIgnoringPathQueryAndFragment(url, document_url))
return false;
scoped_refptr<const SecurityOrigin> requested_origin =
SecurityOrigin::Create(url);
if (requested_origin->IsOpaque() ||
!requested_origin->IsSameOriginWith(document_origin)) {
return false;
}
return true;
}
} // namespace blink
| 639 |
1,143 | // =================================================================================================
// Copyright 2011 Twitter, Inc.
// -------------------------------------------------------------------------------------------------
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this work except in compliance with the License.
// You may obtain a copy of the License in the LICENSE file, or at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =================================================================================================
package com.twitter.common.zookeeper;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Functions;
import com.google.common.base.Preconditions;
import com.google.common.collect.ForwardingMap;
import com.google.common.collect.Sets;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import com.twitter.common.base.Command;
import com.twitter.common.base.ExceptionalSupplier;
import com.twitter.common.base.MorePreconditions;
import com.twitter.common.util.BackoffHelper;
import com.twitter.common.zookeeper.ZooKeeperClient.ZooKeeperConnectionException;
/**
* A ZooKeeper backed {@link Map}. Initialized with a node path, this map represents child nodes
* under that path as keys, with the data in those nodes as values. This map is readonly from
* clients of this class, and only can be modified via direct zookeeper operations.
*
* Note that instances of this class maintain a zookeeper watch for each zookeeper node under the
* parent, as well as on the parent itself. Instances of this class should be created via the
* {@link #create} factory method.
*
* As of ZooKeeper Version 3.1, the maximum allowable size of a data node is 1 MB. A single
* client should be able to hold up to maintain several thousand watches, but this depends on rate
* of data change as well.
*
* Talk to your zookeeper cluster administrator if you expect number of map entries times number
* of live clients to exceed a thousand, as a zookeeper cluster is limited by total number of
* server-side watches enabled.
*
* For an example of a set of tools to maintain one of these maps, please see
* src/scripts/HenAccess.py in the hen repository.
*
* @param <V> the type of values this map stores
*/
public class ZooKeeperMap<V> extends ForwardingMap<String, V> {
/**
* An optional listener which can be supplied and triggered when entries in a ZooKeeperMap
* are added, changed or removed. For a ZooKeeperMap of type <V>, the listener will fire a
* "nodeChanged" event with the name of the ZNode that changed, and its resulting value as
* interpreted by the provided deserializer. Removal of child nodes triggers the "nodeRemoved"
* method indicating the name of the ZNode which is no longer present in the map.
*/
public interface Listener<V> {
/**
* Fired when a node is added to the ZooKeeperMap or changed.
*
* @param nodeName indicates the name of the ZNode that was added or changed.
* @param value is the new value of the node after passing through your supplied deserializer.
*/
void nodeChanged(String nodeName, V value);
/**
* Fired when a node is removed from the ZooKeeperMap.
*
* @param nodeName indicates the name of the ZNode that was removed from the ZooKeeperMap.
*/
void nodeRemoved(String nodeName);
}
/**
* Default deserializer for the constructor if you want to simply store the zookeeper byte[] data
* in this map.
*/
public static final Function<byte[], byte[]> BYTE_ARRAY_VALUES = Functions.identity();
/**
* A listener that ignores all events.
*/
public static <T> Listener<T> noopListener() {
return new Listener<T>() {
@Override public void nodeChanged(String nodeName, T value) { }
@Override public void nodeRemoved(String nodeName) { }
};
}
private static final Logger LOG = Logger.getLogger(ZooKeeperMap.class.getName());
private final ZooKeeperClient zkClient;
private final String nodePath;
private final Function<byte[], V> deserializer;
private final ConcurrentMap<String, V> localMap;
private final Map<String, V> unmodifiableLocalMap;
private final BackoffHelper backoffHelper;
private final Listener<V> mapListener;
// Whether it's safe to re-establish watches if our zookeeper session has expired.
private final Object safeToRewatchLock;
private volatile boolean safeToRewatch;
/**
* Returns an initialized ZooKeeperMap. The given path must exist at the time of
* creation or a {@link KeeperException} will be thrown.
*
* @param zkClient a zookeeper client
* @param nodePath path to a node whose data will be watched
* @param deserializer a function that converts byte[] data from a zk node to this map's
* value type V
* @param listener is a Listener which fires when values are added, changed, or removed.
*
* @throws InterruptedException if the underlying zookeeper server transaction is interrupted
* @throws KeeperException.NoNodeException if the given nodePath doesn't exist
* @throws KeeperException if the server signals an error
* @throws ZooKeeperConnectionException if there was a problem connecting to the zookeeper
* cluster
*/
public static <V> ZooKeeperMap<V> create(
ZooKeeperClient zkClient,
String nodePath,
Function<byte[], V> deserializer,
Listener<V> listener)
throws InterruptedException, KeeperException, ZooKeeperConnectionException {
ZooKeeperMap<V> zkMap = new ZooKeeperMap<V>(zkClient, nodePath, deserializer, listener);
zkMap.init();
return zkMap;
}
/**
* Returns an initialized ZooKeeperMap. The given path must exist at the time of
* creation or a {@link KeeperException} will be thrown.
*
* @param zkClient a zookeeper client
* @param nodePath path to a node whose data will be watched
* @param deserializer a function that converts byte[] data from a zk node to this map's
* value type V
*
* @throws InterruptedException if the underlying zookeeper server transaction is interrupted
* @throws KeeperException.NoNodeException if the given nodePath doesn't exist
* @throws KeeperException if the server signals an error
* @throws ZooKeeperConnectionException if there was a problem connecting to the zookeeper
* cluster
*/
public static <V> ZooKeeperMap<V> create(
ZooKeeperClient zkClient,
String nodePath,
Function<byte[], V> deserializer)
throws InterruptedException, KeeperException, ZooKeeperConnectionException {
return ZooKeeperMap.create(zkClient, nodePath, deserializer, ZooKeeperMap.<V>noopListener());
}
/**
* Initializes a ZooKeeperMap. The given path must exist at the time of object creation or
* a {@link KeeperException} will be thrown.
*
* Please note that this object will not track any remote zookeeper data until {@link #init()}
* is successfully called. After construction and before that call, this {@link Map} will
* be empty.
*
* @param zkClient a zookeeper client
* @param nodePath top-level node path under which the map data lives
* @param deserializer a function that converts byte[] data from a zk node to this map's
* value type V
* @param mapListener is a Listener which fires when values are added, changed, or removed.
*
* @throws InterruptedException if the underlying zookeeper server transaction is interrupted
* @throws KeeperException.NoNodeException if the given nodePath doesn't exist
* @throws KeeperException if the server signals an error
* @throws ZooKeeperConnectionException if there was a problem connecting to the zookeeper
* cluster
*/
@VisibleForTesting
ZooKeeperMap(
ZooKeeperClient zkClient,
String nodePath,
Function<byte[], V> deserializer,
Listener<V> mapListener)
throws InterruptedException, KeeperException, ZooKeeperConnectionException {
super();
this.mapListener = Preconditions.checkNotNull(mapListener);
this.zkClient = Preconditions.checkNotNull(zkClient);
this.nodePath = MorePreconditions.checkNotBlank(nodePath);
this.deserializer = Preconditions.checkNotNull(deserializer);
localMap = new ConcurrentHashMap<String, V>();
unmodifiableLocalMap = Collections.unmodifiableMap(localMap);
backoffHelper = new BackoffHelper();
safeToRewatchLock = new Object();
safeToRewatch = false;
if (zkClient.get().exists(nodePath, null) == null) {
throw new KeeperException.NoNodeException();
}
}
/**
* Initialize zookeeper tracking for this {@link Map}. Once this call returns, this object
* will be tracking data in zookeeper.
*
* @throws InterruptedException if the underlying zookeeper server transaction is interrupted
* @throws KeeperException if the server signals an error
* @throws ZooKeeperConnectionException if there was a problem connecting to the zookeeper
* cluster
*/
@VisibleForTesting
void init() throws InterruptedException, KeeperException, ZooKeeperConnectionException {
Watcher watcher = zkClient.registerExpirationHandler(new Command() {
@Override public void execute() {
/*
* First rewatch all of our locally cached children. Some of them may not exist anymore,
* which will lead to caught KeeperException.NoNode whereafter we'll remove that child
* from the cached map.
*
* Next, we'll establish our top level child watch and add any new nodes that might exist.
*/
try {
synchronized (safeToRewatchLock) {
if (safeToRewatch) {
rewatchDataNodes();
tryWatchChildren();
}
}
} catch (InterruptedException e) {
LOG.log(Level.WARNING, "Interrupted while trying to re-establish watch.", e);
Thread.currentThread().interrupt();
}
}
});
try {
// Synchronize to prevent the race of watchChildren completing and then the session expiring
// before we update safeToRewatch.
synchronized (safeToRewatchLock) {
watchChildren();
safeToRewatch = true;
}
} catch (InterruptedException e) {
zkClient.unregister(watcher);
throw e;
} catch (KeeperException e) {
zkClient.unregister(watcher);
throw e;
} catch (ZooKeeperConnectionException e) {
zkClient.unregister(watcher);
throw e;
}
}
@Override
protected Map<String, V> delegate() {
return unmodifiableLocalMap;
}
private void tryWatchChildren() throws InterruptedException {
backoffHelper.doUntilSuccess(new ExceptionalSupplier<Boolean, InterruptedException>() {
@Override public Boolean get() throws InterruptedException {
try {
watchChildren();
return true;
} catch (KeeperException e) {
return false;
} catch (ZooKeeperConnectionException e) {
return false;
}
}
});
}
private synchronized void watchChildren()
throws InterruptedException, KeeperException, ZooKeeperConnectionException {
/*
* Add a watch on the parent node itself, and attempt to rewatch if it
* gets deleted
*/
zkClient.get().exists(nodePath, new Watcher() {
@Override public void process(WatchedEvent event) {
if (event.getType() == Watcher.Event.EventType.NodeDeleted) {
// If the parent node no longer exists
localMap.clear();
try {
tryWatchChildren();
} catch (InterruptedException e) {
LOG.log(Level.WARNING, "Interrupted while trying to watch children.", e);
Thread.currentThread().interrupt();
}
}
}});
final Watcher childWatcher = new Watcher() {
@Override
public void process(WatchedEvent event) {
if (event.getType() == Watcher.Event.EventType.NodeChildrenChanged) {
try {
tryWatchChildren();
} catch (InterruptedException e) {
LOG.log(Level.WARNING, "Interrupted while trying to watch children.", e);
Thread.currentThread().interrupt();
}
}
}
};
List<String> children = zkClient.get().getChildren(nodePath, childWatcher);
updateChildren(Sets.newHashSet(children));
}
private void tryAddChild(final String child) throws InterruptedException {
backoffHelper.doUntilSuccess(new ExceptionalSupplier<Boolean, InterruptedException>() {
@Override public Boolean get() throws InterruptedException {
try {
addChild(child);
return true;
} catch (KeeperException e) {
return false;
} catch (ZooKeeperConnectionException e) {
return false;
}
}
});
}
// TODO(<NAME>) - Make this use the ZooKeeperNode class.
private void addChild(final String child)
throws InterruptedException, KeeperException, ZooKeeperConnectionException {
final Watcher nodeWatcher = new Watcher() {
@Override
public void process(WatchedEvent event) {
if (event.getType() == Watcher.Event.EventType.NodeDataChanged) {
try {
tryAddChild(child);
} catch (InterruptedException e) {
LOG.log(Level.WARNING, "Interrupted while trying to add a child.", e);
Thread.currentThread().interrupt();
}
} else if (event.getType() == Watcher.Event.EventType.NodeDeleted) {
removeEntry(child);
}
}
};
try {
V value = deserializer.apply(zkClient.get().getData(makePath(child), nodeWatcher, null));
putEntry(child, value);
} catch (KeeperException.NoNodeException e) {
// This node doesn't exist anymore, remove it from the map and we're done.
removeEntry(child);
}
}
@VisibleForTesting
void removeEntry(String key) {
localMap.remove(key);
mapListener.nodeRemoved(key);
}
@VisibleForTesting
void putEntry(String key, V value) {
localMap.put(key, value);
mapListener.nodeChanged(key, value);
}
private void rewatchDataNodes() throws InterruptedException {
for (String child : keySet()) {
tryAddChild(child);
}
}
private String makePath(final String child) {
return nodePath + "/" + child;
}
private void updateChildren(Set<String> zkChildren) throws InterruptedException {
Set<String> addedChildren = Sets.difference(zkChildren, keySet());
Set<String> removedChildren = Sets.difference(keySet(), zkChildren);
for (String child : addedChildren) {
tryAddChild(child);
}
for (String child : removedChildren) {
removeEntry(child);
}
}
}
| 5,086 |
414 | <reponame>wisdark/C3<filename>Src/Common/FSecure/C3/Interfaces/Channels/GoogleDrive.cpp
#include "Stdafx.h"
#include "GoogleDrive.h"
#include "Common/FSecure/Crypto/Base64.h"
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
FSecure::C3::Interfaces::Channels::GoogleDrive::GoogleDrive(ByteView arguments)
: m_inboundDirectionName{ arguments.Read<std::string>() }
, m_outboundDirectionName{ arguments.Read<std::string>() }
{
auto [userAgent, ClientId, ClientSecret, RefreshToken, channelName] = arguments.Read<std::string, std::string, std::string, std::string, std::string>();
m_googledriveObj = FSecure::GoogleDrive{ userAgent, ClientId, ClientSecret, RefreshToken, channelName };
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t FSecure::C3::Interfaces::Channels::GoogleDrive::OnSendToChannel(ByteView data)
{
// There is a cap on uploads of files >5mb at which point different APIs are required.
data = data.SubString(0, 5 * 1024 * 1024);
m_googledriveObj.WriteMessageToFile(m_outboundDirectionName, data);
return data.size();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::vector<FSecure::ByteVector> FSecure::C3::Interfaces::Channels::GoogleDrive::OnReceiveFromChannel()
{
std::vector<ByteVector> ret;
for (auto& [ts, id] : m_googledriveObj.GetMessagesByDirection(m_inboundDirectionName))
{
ret.push_back(m_googledriveObj.ReadFile(id));
m_googledriveObj.DeleteFile(id);
}
return ret;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
FSecure::ByteVector FSecure::C3::Interfaces::Channels::GoogleDrive::OnRunCommand(ByteView command)
{
auto commandCopy = command; //each read moves ByteView. CommandCopy is needed for default.
switch (command.Read<uint16_t>())
{
case 0:
UploadFile(command);
return {};
case 1:
DeleteAllFiles();
return {};
default:
return AbstractChannel::OnRunCommand(commandCopy);
}
}
void FSecure::C3::Interfaces::Channels::GoogleDrive::UploadFile(ByteView args)
{
m_googledriveObj.UploadFile(args.Read<std::string>());
}
void FSecure::C3::Interfaces::Channels::GoogleDrive::DeleteAllFiles()
{
m_googledriveObj.DeleteAllFiles();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const char* FSecure::C3::Interfaces::Channels::GoogleDrive::GetCapability()
{
return R"_(
{
"create":
{
"arguments":
[
[
{
"type": "string",
"name": "Input ID",
"min": 4,
"randomize": true,
"description": "Used to distinguish packets for the channel"
},
{
"type": "string",
"name": "Output ID",
"min": 4,
"randomize": true,
"description": "Used to distinguish packets from the channel"
}
],
{
"type": "string",
"name": "User-Agent Header",
"description": "The User-Agent header to set. Warning: adding user agent header of web browser, can cause site security provider to block access to api, and prevent channel from functioning."
},
{
"type": "string",
"name": "Client ID",
"min": 1,
"description": "Client ID for GoogleDrive's API"
},
{
"type": "string",
"name": "Client Secret",
"min": 1,
"description": "Client Secret for GoogleDrive's API"
},
{
"type": "string",
"name": "Refresh token",
"min": 1,
"description": "This token is used to retrieve an access token for GoogleDrive's API"
},
{
"type": "string",
"name": "Folder name",
"min": 4,
"randomize": true,
"description": "Folder to create for channel"
}
]
},
"commands":
[
{
"name": "Upload File from Relay",
"id": 0,
"description": "Upload file from host running Relay directly to GoogleDrive (150mb max.)",
"arguments":
[
{
"type" : "string",
"name": "Remote Filepath",
"description" : "Path to upload."
}
]
},
{
"name": "Remove All Files",
"id": 1,
"description": "Delete channel folder and all files within it.",
"arguments": []
}
]
}
)_";
}
| 1,508 |
4,753 | <reponame>jonasmalacofilho/haxe
package haxe.test;
public class lowerCaseClass
{
public boolean works;
public lowerCaseClass()
{
this.works = true;
}
}
| 62 |
23,901 | <reponame>DionysisChristopoulos/google-research<filename>homophonous_logography/neural/utils.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of simple utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
class DualLogger(object):
"""Log to file and terminal: https://stackoverflow.com/questions/14906764."""
def __init__(self, filename, mode="wt"):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
self.terminal = sys.stdout
self.log = open(filename, mode, encoding="utf-8")
def _flush(self):
self.terminal.flush()
self.log.flush()
def write(self, message):
self.terminal.write(message)
self.log.write(message)
self._flush()
def flush(self):
# This flush method is needed for python 3 compatibility.
self._flush()
pass
| 465 |
1,695 | #include <gtest/gtest.h>
#include <nnpack.h>
#include <testers/softmax.h>
/*
* Test that implementation works for a small number of channels
*/
TEST(OUT_OF_PLACE, few_channels) {
auto tester = SoftmaxTester();
for (size_t channels = 1; channels <= 96; channels += 1) {
tester.channels(1000)
.testOutput();
}
}
TEST(IN_PLACE, few_channels) {
auto tester = SoftmaxTester();
for (size_t channels = 1; channels <= 96; channels += 1) {
tester.channels(1000)
.testOutputInplace();
}
}
/*
* Test that implementation works for a moderate number of channels with small batch
*/
TEST(OUT_OF_PLACE, small_batch) {
auto tester = SoftmaxTester();
for (size_t channels = 100; channels <= 115; channels += 1) {
for (size_t batch = 2; batch <= 5; batch += 1) {
tester.channels(1000)
.batchSize(batch)
.testOutput();
}
}
}
TEST(IN_PLACE, small_batch) {
auto tester = SoftmaxTester();
for (size_t channels = 100; channels <= 115; channels += 1) {
for (size_t batch = 2; batch <= 5; batch += 1) {
tester.channels(1000)
.batchSize(batch)
.testOutputInplace();
}
}
}
int main(int argc, char* argv[]) {
const enum nnp_status init_status = nnp_initialize();
assert(init_status == nnp_status_success);
setenv("TERM", "xterm-256color", 0);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 531 |
2,705 | // Copyright 2016 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "./ptrace.h"
#include "./symbol.h"
#include "./thread.h"
// This abstracts the representation of py2/py3
namespace pyflame {
// Get the threads. Each thread stack will be in reverse order (most recent
// frame first).
typedef std::vector<Thread> (*get_threads_t)(pid_t, PyAddresses, bool);
// Frobber to get python stack stuff; this encapsulates all of the Python
// interpreter logic.
class PyFrob {
public:
PyFrob(pid_t pid, bool enable_threads)
: pid_(pid), enable_threads_(enable_threads) {}
~PyFrob() { PtraceCleanup(pid_); }
// Must be called before GetThreads() to detect the Python ABI.
int DetectABI(PyABI abi);
// Get the current frame list.
std::vector<Thread> GetThreads(void) const;
// Useful when debugging.
std::string Status() const;
private:
pid_t pid_;
PyAddresses addrs_;
bool enable_threads_;
get_threads_t get_threads_;
// Fill the addrs_ member
int set_addrs_(PyABI *abi);
};
} // namespace pyflame
| 496 |
1,062 | <gh_stars>1000+
/**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.mr4c.sources;
import java.io.IOException;
import java.net.URI;
import java.util.List;
public interface FileSource {
List<String> getAllFileNames() throws IOException;
DataFileSource getFileSource(String fileName) throws IOException;
/**
* Check if the file exists
*/
boolean fileExists(String fileName) throws IOException;
/**
* Will return null if the file does not exist
*/
DataFileSource getFileSourceOnlyIfExists(String fileName) throws IOException;
DataFileSink getFileSink(String fileName) throws IOException;
/**
* Releases any resources held by this source. For some sources, this may be needed to commit writes.
*/
void close() throws IOException;
/**
* creates the necessary directories/files/etc before accessing this source
*/
void ensureExists() throws IOException;
/**
* deletes all files in this source
*/
void clear() throws IOException;
String getDescription();
}
| 458 |
743 | <filename>hermes-consumers/src/main/java/pl/allegro/tech/hermes/consumers/consumer/rate/maxrate/FlatBinaryMaxRateRegistry.java
package pl.allegro.tech.hermes.consumers.consumer.rate.maxrate;
import com.google.common.base.Preconditions;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.NodeCache;
import org.apache.curator.framework.recipes.cache.NodeCacheListener;
import org.slf4j.Logger;
import pl.allegro.tech.hermes.api.SubscriptionName;
import pl.allegro.tech.hermes.common.config.ConfigFactory;
import pl.allegro.tech.hermes.common.config.Configs;
import pl.allegro.tech.hermes.consumers.subscription.id.SubscriptionIds;
import pl.allegro.tech.hermes.consumers.supervisor.workload.ClusterAssignmentCache;
import pl.allegro.tech.hermes.consumers.supervisor.workload.ConsumerAssignmentCache;
import pl.allegro.tech.hermes.infrastructure.zookeeper.ZookeeperPaths;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import static org.slf4j.LoggerFactory.getLogger;
import static pl.allegro.tech.hermes.common.config.Configs.CONSUMER_WORKLOAD_NODE_ID;
class FlatBinaryMaxRateRegistry implements MaxRateRegistry, NodeCacheListener {
private static final Logger logger = getLogger(FlatBinaryMaxRateRegistry.class);
private final ZookeeperOperations zookeeper;
private final Map<String, ConsumerMaxRates> consumersMaxRates = new HashMap<>();
private final Map<String, ConsumerRateHistory> consumersRateHistories = new HashMap<>();
private final ConsumerRateHistory currentConsumerRateHistories;
private final ConsumerMaxRates currentConsumerMaxRates;
private final String consumerId;
private final ClusterAssignmentCache clusterAssignmentCache;
private final ConsumerAssignmentCache consumerAssignmentCache;
private final ConsumerRateHistoriesEncoder consumerRateHistoriesEncoder;
private final ConsumerRateHistoriesDecoder consumerRateHistoriesDecoder;
private final ConsumerMaxRatesDecoder consumerMaxRatesDecoder;
private final ConsumerMaxRatesEncoder consumerMaxRatesEncoder;
private final NodeCache maxRateNodeCache;
private final FlatBinaryMaxRateRegistryPaths registryPaths;
FlatBinaryMaxRateRegistry(ConfigFactory configFactory,
ClusterAssignmentCache clusterAssignmentCache,
ConsumerAssignmentCache consumerAssignmentCache,
CuratorFramework curator,
ZookeeperPaths zookeeperPaths,
SubscriptionIds subscriptionIds) {
this.consumerId = configFactory.getStringProperty(CONSUMER_WORKLOAD_NODE_ID);
this.clusterAssignmentCache = clusterAssignmentCache;
this.consumerAssignmentCache = consumerAssignmentCache;
final String clusterName = configFactory.getStringProperty(Configs.KAFKA_CLUSTER_NAME);
this.currentConsumerRateHistories = new ConsumerRateHistory();
this.currentConsumerMaxRates = new ConsumerMaxRates();
this.registryPaths = new FlatBinaryMaxRateRegistryPaths(zookeeperPaths, consumerId, clusterName);
this.zookeeper = new ZookeeperOperations(curator);
int historiesEncoderBufferSize = configFactory.getIntProperty(Configs.CONSUMER_MAXRATE_REGISTRY_BINARY_ENCODER_HISTORY_BUFFER_SIZE_BYTES);
this.consumerRateHistoriesEncoder = new ConsumerRateHistoriesEncoder(subscriptionIds, historiesEncoderBufferSize);
this.consumerRateHistoriesDecoder = new ConsumerRateHistoriesDecoder(subscriptionIds);
int maxRateEncoderBufferSize = configFactory.getIntProperty(Configs.CONSUMER_MAXRATE_REGISTRY_BINARY_ENCODER_MAX_RATE_BUFFER_SIZE_BYTES);
this.consumerMaxRatesEncoder = new ConsumerMaxRatesEncoder(subscriptionIds, maxRateEncoderBufferSize);
this.consumerMaxRatesDecoder = new ConsumerMaxRatesDecoder(subscriptionIds);
this.maxRateNodeCache = new NodeCache(curator, registryPaths.consumerMaxRatePath(consumerId));
maxRateNodeCache.getListenable().addListener(this);
}
@Override
public void start() {
try {
logger.info("Starting flat binary max rate registry at {}, watching current consumer path at {}",
registryPaths.consumersRateCurrentClusterRuntimeBinaryPath(), registryPaths.consumerMaxRatePath(consumerId));
maxRateNodeCache.start();
} catch (Exception e) {
throw new IllegalStateException("Could not start node cache for consumer max rate", e);
}
refreshConsumerMaxRates();
}
private void refreshConsumerMaxRates() {
ChildData nodeData = maxRateNodeCache.getCurrentData();
if (nodeData != null) {
byte[] data = nodeData.getData();
ConsumerMaxRates decodedMaxRates = consumerMaxRatesDecoder.decode(data);
logger.info("Decoded {} bytes of max rates for current node with {} subscription entries", data.length, decodedMaxRates.size());
currentConsumerMaxRates.setAllMaxRates(decodedMaxRates);
}
}
@Override
public void stop() {
try {
logger.info("Stopping flat binary max rate registry");
maxRateNodeCache.close();
} catch (IOException e) {
throw new RuntimeException("Could not stop node cache for consumer max rate", e);
}
}
@Override
public void onBeforeMaxRateCalculation() {
Set<String> assignedConsumers = clusterAssignmentCache.getAssignedConsumers();
clearCacheFromInactiveConsumers(assignedConsumers);
refreshRateCachesOfConsumers(assignedConsumers);
}
private void clearCacheFromInactiveConsumers(Set<String> assignedConsumers) {
consumersMaxRates.entrySet().removeIf(entry -> !assignedConsumers.contains(entry.getKey()));
consumersRateHistories.entrySet().removeIf(entry -> !assignedConsumers.contains(entry.getKey()));
}
private void refreshRateCachesOfConsumers(Set<String> assignedConsumers) {
getMaxRateConsumerNodes().forEach(consumerId -> {
if (assignedConsumers.contains(consumerId)) {
refreshConsumerRateHistory(consumerId);
refreshConsumerMaxRate(consumerId);
} else {
removeConsumerRateRootNode(consumerId);
}
});
}
private List<String> getMaxRateConsumerNodes() {
String path = registryPaths.consumersRateCurrentClusterRuntimeBinaryPath();
try {
if (zookeeper.exists(path)) {
return zookeeper.getNodeChildren(path);
}
} catch (Exception e) {
logger.warn("Could not get max rate consumer nodes list", e);
}
return Collections.emptyList();
}
private void refreshConsumerMaxRate(String consumerId) {
logger.info("Refreshing max rate of {}", consumerId);
String consumerMaxRatePath = registryPaths.consumerMaxRatePath(consumerId);
zookeeper.getNodeData(consumerMaxRatePath)
.map(consumerMaxRatesDecoder::decode)
.ifPresent(maxRates -> {
int decodedSize = maxRates.size();
maxRates.cleanup(clusterAssignmentCache.getConsumerSubscriptions(consumerId));
int cleanedSize = maxRates.size();
if (decodedSize > cleanedSize) {
logger.info("Refreshed max rates of {} with {} subscriptions ({} stale entries omitted)",
consumerId, cleanedSize, decodedSize - cleanedSize);
} else {
logger.info("Refreshed max rates of {} with {} subscriptions", consumerId, cleanedSize);
}
consumersMaxRates.put(consumerId, maxRates);
});
}
private void refreshConsumerRateHistory(String consumerId) {
logger.info("Refreshing rate history of {}", consumerId);
String consumerRateHistoryPath = registryPaths.consumerRateHistoryPath(consumerId);
zookeeper.getNodeData(consumerRateHistoryPath)
.map(consumerRateHistoriesDecoder::decode)
.ifPresent(rateHistories -> {
logger.info("Refreshed rate history of {} with {} subscriptions", consumerId, rateHistories.size());
consumersRateHistories.put(consumerId, rateHistories);
});
}
private void removeConsumerRateRootNode(String consumerId) {
logger.info("Deleting max rate node of stale consumer {}", consumerId);
String path = registryPaths.consumerRateParentRuntimePath(consumerId);
try {
zookeeper.deleteNodeRecursively(path);
} catch (Exception e) {
logger.warn("Could not delete stale consumer max rate node {}", path, e);
}
}
@Override
public void onAfterMaxRateCalculation() {
persistMaxRatesForAllConsumers();
}
private void persistMaxRatesForAllConsumers() {
consumersMaxRates.forEach((consumerId, maxRates) -> {
byte[] encoded = consumerMaxRatesEncoder.encode(maxRates);
String consumerMaxRatePath = registryPaths.consumerMaxRatePath(consumerId);
try {
zookeeper.writeOrCreatePersistent(consumerMaxRatePath, encoded);
} catch (Exception e) {
logger.warn("Could not write max rates for consumer {}", consumerId, e);
}
});
}
@Override
public Set<ConsumerRateInfo> ensureCorrectAssignments(SubscriptionName subscriptionName, Set<String> currentConsumers) {
Set<ConsumerRateInfo> rateInfos = new HashSet<>();
for (String consumerId : currentConsumers) {
Optional<MaxRate> maxRate = Optional.ofNullable(consumersMaxRates.get(consumerId))
.flatMap(rates -> rates.getMaxRate(subscriptionName));
RateHistory rateHistory = Optional.ofNullable(consumersRateHistories.get(consumerId))
.map(histories -> histories.getRateHistory(subscriptionName))
.orElse(RateHistory.empty());
rateInfos.add(new ConsumerRateInfo(consumerId, new RateInfo(maxRate, rateHistory)));
}
return rateInfos;
}
@Override
public void update(SubscriptionName subscriptionName, Map<String, MaxRate> newMaxRates) {
newMaxRates.forEach((consumerId, maxRate) -> {
consumersMaxRates.putIfAbsent(consumerId, new ConsumerMaxRates());
consumersMaxRates.get(consumerId).setMaxRate(subscriptionName, maxRate);
});
}
@Override
public Optional<MaxRate> getMaxRate(ConsumerInstance consumer) {
Preconditions.checkState(consumer.getConsumerId().equals(consumerId), "Reading max rate is allowed only for current consumer");
return currentConsumerMaxRates.getMaxRate(consumer.getSubscription());
}
@Override
public RateHistory getRateHistory(ConsumerInstance consumer) {
Preconditions.checkState(consumer.getConsumerId().equals(consumerId), "Reading rate history is allowed only for current consumer");
return currentConsumerRateHistories.getRateHistory(consumer.getSubscription());
}
@Override
public void writeRateHistory(ConsumerInstance consumer, RateHistory rateHistory) {
Preconditions.checkState(consumer.getConsumerId().equals(consumerId), "Saving rate history is allowed only for current consumer");
currentConsumerRateHistories.setRateHistory(consumer.getSubscription(), rateHistory);
}
@Override
public void onAfterWriteRateHistories() {
Set<SubscriptionName> subscriptions = consumerAssignmentCache.getConsumerSubscriptions();
currentConsumerRateHistories.cleanup(subscriptions);
byte[] encoded = consumerRateHistoriesEncoder.encode(currentConsumerRateHistories);
logger.info("Writing rate history of {} subscriptions, saving {} bytes", currentConsumerRateHistories.size(), encoded.length);
try {
zookeeper.writeOrCreatePersistent(registryPaths.currentConsumerRateHistoryPath(), encoded);
} catch (Exception e) {
logger.error("An error while saving consumers rate histories");
}
}
@Override
public void nodeChanged() {
refreshConsumerMaxRates();
}
}
| 4,746 |
1,585 | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2009 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2006 Voltaire. All rights reserved.
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2010 IBM Corporation. All rights reserved.
* Copyright (c) 2011-2016 Los Alamos National Security, LLC. All rights
* reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#define OPAL_DISABLE_ENABLE_MEM_DEBUG 1
#include "opal_config.h"
#include "opal/align.h"
#include "rcache_udreg.h"
#include <errno.h>
#include <string.h>
#ifdef HAVE_MALLOC_H
# include <malloc.h>
#endif
#include "opal/include/opal_stdint.h"
#include "opal/mca/rcache/base/base.h"
#include "opal/runtime/opal_params.h"
#include "opal/util/string_copy.h"
#include "opal/util/sys_limits.h"
#include <fcntl.h>
#include <udreg_pub.h>
#include <sys/mman.h>
static int mca_rcache_udreg_register(mca_rcache_base_module_t *rcache, void *addr, size_t size,
uint32_t flags, int32_t access_flags,
mca_rcache_base_registration_t **reg);
static int mca_rcache_udreg_deregister(mca_rcache_base_module_t *rcache,
mca_rcache_base_registration_t *reg);
static int mca_rcache_udreg_find(mca_rcache_base_module_t *rcache, void *addr, size_t size,
mca_rcache_base_registration_t **reg);
static void mca_rcache_udreg_finalize(mca_rcache_base_module_t *rcache);
static bool mca_rcache_udreg_evict(mca_rcache_base_module_t *rcache);
static void *mca_rcache_udreg_reg_func(void *addr, uint64_t len, void *reg_context);
static uint32_t mca_rcache_udreg_dereg_func(void *device_data, void *dreg_context);
/*
* Initializes the rcache module.
*/
int mca_rcache_udreg_module_init(mca_rcache_udreg_module_t *rcache)
{
struct udreg_cache_attr cache_attr;
int urc;
rcache->super.rcache_component = &mca_rcache_udreg_component.super;
rcache->super.rcache_register = mca_rcache_udreg_register;
rcache->super.rcache_find = mca_rcache_udreg_find;
rcache->super.rcache_deregister = mca_rcache_udreg_deregister;
/* This module relies on udreg for notification of memory release */
rcache->super.rcache_invalidate_range = NULL;
rcache->super.rcache_finalize = mca_rcache_udreg_finalize;
cache_attr.modes = 0;
/* Create udreg cache */
if (rcache->resources.use_kernel_cache) {
cache_attr.modes |= UDREG_CC_MODE_USE_KERNEL_CACHE;
}
if (rcache->resources.use_evict_w_unreg) {
cache_attr.modes |= UDREG_CC_MODE_USE_EVICT_W_UNREG;
}
if (mca_rcache_udreg_component.leave_pinned) {
cache_attr.modes |= UDREG_CC_MODE_USE_LAZY_DEREG;
}
OBJ_CONSTRUCT(&rcache->lock, opal_mutex_t);
opal_string_copy(cache_attr.cache_name, rcache->resources.base.cache_name,
UDREG_MAX_CACHENAME_LEN);
cache_attr.max_entries = rcache->resources.max_entries;
cache_attr.debug_mode = 0;
cache_attr.debug_rank = 0;
cache_attr.reg_context = rcache;
cache_attr.dreg_context = rcache;
cache_attr.destructor_context = rcache;
cache_attr.device_reg_func = mca_rcache_udreg_reg_func;
cache_attr.device_dereg_func = mca_rcache_udreg_dereg_func;
cache_attr.destructor_callback = NULL;
opal_output_verbose(MCA_BASE_VERBOSE_INFO, opal_rcache_base_framework.framework_output,
"rcache/udreg: creating udreg cache with name %s", cache_attr.cache_name);
/* attempt to create the udreg cache. this will fail if one already exists */
(void) UDREG_CacheCreate(&cache_attr);
urc = UDREG_CacheAccess(rcache->resources.base.cache_name,
(udreg_cache_handle_t *) &rcache->udreg_handle);
if (UDREG_RC_SUCCESS != urc) {
opal_output_verbose(MCA_BASE_VERBOSE_WARN, opal_rcache_base_framework.framework_output,
"rcache/udreg: call to UDREG_CacheAccess failed with rc: %d", urc);
return OPAL_ERROR;
}
OBJ_CONSTRUCT(&rcache->reg_list, opal_free_list_t);
opal_free_list_init(&rcache->reg_list, rcache->resources.base.sizeof_reg, opal_cache_line_size,
OBJ_CLASS(mca_rcache_base_registration_t), 0, opal_cache_line_size, 0, -1,
32, NULL, 0, NULL, NULL, NULL);
return OPAL_SUCCESS;
}
/* udreg callback functions */
static void *mca_rcache_udreg_reg_func(void *addr, uint64_t size, void *reg_context)
{
mca_rcache_udreg_module_t *rcache_udreg = (mca_rcache_udreg_module_t *) reg_context;
unsigned int page_size = opal_getpagesize();
mca_rcache_base_registration_t *udreg_reg;
opal_free_list_item_t *item;
int rc;
item = opal_free_list_get(&rcache_udreg->reg_list);
if (NULL == item) {
return NULL;
}
udreg_reg = (mca_rcache_base_registration_t *) item;
udreg_reg->rcache = reg_context;
udreg_reg->base = OPAL_DOWN_ALIGN_PTR(addr, page_size, unsigned char *);
udreg_reg->bound = OPAL_ALIGN_PTR((intptr_t) addr + size, page_size, unsigned char *) - 1;
udreg_reg->ref_count = 0;
addr = (void *) udreg_reg->base;
size = (uint64_t)(udreg_reg->bound - udreg_reg->base + 1);
/* pull the flags and access flags out of the rcache module */
udreg_reg->access_flags = rcache_udreg->requested_access_flags;
udreg_reg->flags = rcache_udreg->requested_flags;
opal_output_verbose(
MCA_BASE_VERBOSE_INFO, opal_rcache_base_framework.framework_output,
"rcache/udreg: calling underlying register function for address range {%p, %p}", addr,
(void *) ((intptr_t) addr + size));
rc = rcache_udreg->resources.base.register_mem(rcache_udreg->resources.base.reg_data,
udreg_reg->base, size, udreg_reg);
if (OPAL_SUCCESS != rc) {
opal_output_verbose(MCA_BASE_VERBOSE_WARN, opal_rcache_base_framework.framework_output,
"rcache/udreg: could not register memory. rc: %d", rc);
opal_free_list_return(&rcache_udreg->reg_list, item);
/* NTH: this is the only way to get UDReg_Register to recognize a failure */
udreg_reg = UDREG_DEVICE_REG_FAILED;
}
return udreg_reg;
}
static uint32_t mca_rcache_udreg_dereg_func(void *device_data, void *dreg_context)
{
mca_rcache_udreg_module_t *rcache_udreg = (mca_rcache_udreg_module_t *) dreg_context;
mca_rcache_base_registration_t *udreg_reg = (mca_rcache_base_registration_t *) device_data;
int rc;
assert(udreg_reg->ref_count == 0);
rc = rcache_udreg->resources.base.deregister_mem(rcache_udreg->resources.base.reg_data,
udreg_reg);
if (OPAL_LIKELY(OPAL_SUCCESS == rc)) {
opal_free_list_return(&rcache_udreg->reg_list, (opal_free_list_item_t *) udreg_reg);
}
/* might be worth printing out a warning if an error occurs here */
return 0;
}
static bool mca_rcache_udreg_evict(mca_rcache_base_module_t *rcache)
{
mca_rcache_udreg_module_t *rcache_udreg = (mca_rcache_udreg_module_t *) rcache;
udreg_return_t urc;
urc = UDREG_Evict(rcache_udreg->udreg_handle);
return (UDREG_RC_SUCCESS == urc);
}
/*
* register memory
*/
static int mca_rcache_udreg_register(mca_rcache_base_module_t *rcache, void *addr, size_t size,
uint32_t flags, int32_t access_flags,
mca_rcache_base_registration_t **reg)
{
mca_rcache_udreg_module_t *rcache_udreg = (mca_rcache_udreg_module_t *) rcache;
mca_rcache_base_registration_t *udreg_reg, *old_reg;
bool bypass_cache = !!(flags & MCA_RCACHE_FLAGS_CACHE_BYPASS);
const unsigned int page_size = opal_getpagesize();
unsigned char *base, *bound;
udreg_entry_t *udreg_entry = NULL;
*reg = NULL;
OPAL_THREAD_LOCK(&rcache_udreg->lock);
/* we hold the lock so no other thread can modify these flags until the registration is complete
*/
rcache_udreg->requested_access_flags = access_flags;
rcache_udreg->requested_flags = flags;
base = OPAL_DOWN_ALIGN_PTR(addr, page_size, unsigned char *);
bound = OPAL_ALIGN_PTR((intptr_t) addr + size, page_size, unsigned char *) - 1;
addr = base;
size = (size_t)(uintptr_t)(bound - base) + 1;
if (false == bypass_cache) {
/* Get a udreg entry for this region */
do {
opal_output_verbose(MCA_BASE_VERBOSE_INFO, opal_rcache_base_framework.framework_output,
"rcache/udreg: XXX registering region {%p, %p} with udreg", addr,
(void *) ((intptr_t) addr + size));
while (UDREG_RC_SUCCESS
!= UDREG_Register(rcache_udreg->udreg_handle, addr, size, &udreg_entry)) {
/* try to remove one unused reg and retry */
opal_output_verbose(MCA_BASE_VERBOSE_INFO,
opal_rcache_base_framework.framework_output, "calling evict!");
if (!mca_rcache_udreg_evict(rcache)) {
opal_output_verbose(MCA_BASE_VERBOSE_INFO,
opal_rcache_base_framework.framework_output,
"rcache/udreg: could not register memory with udreg");
OPAL_THREAD_UNLOCK(&rcache_udreg->lock);
return OPAL_ERR_OUT_OF_RESOURCE;
}
}
udreg_reg = (mca_rcache_base_registration_t *) udreg_entry->device_data;
if (NULL != udreg_reg && (udreg_reg->access_flags & access_flags) == access_flags) {
/* sufficient access */
break;
}
old_reg = udreg_reg;
if (old_reg) {
/* to not confuse udreg make sure the new registration covers the same address
* range as the old one. */
addr = old_reg->base;
size = (size_t)((intptr_t) old_reg->bound - (intptr_t) old_reg->base);
/* make the new access flags more permissive */
access_flags |= old_reg->access_flags;
if (!old_reg->ref_count) {
/* deregister the region before attempting to re-register */
mca_rcache_udreg_dereg_func(old_reg, rcache);
udreg_entry->device_data = NULL;
old_reg = NULL;
} else {
/* ensure that mca_rcache_udreg_deregister does not call into udreg since
* we are forcefully evicting the registration here */
old_reg->flags |= MCA_RCACHE_FLAGS_CACHE_BYPASS | MCA_RCACHE_FLAGS_INVALID;
}
}
rcache_udreg->requested_access_flags = access_flags;
/* get a new registration */
while (UDREG_DEVICE_REG_FAILED
== (udreg_reg = mca_rcache_udreg_reg_func(addr, size, rcache))) {
if (!mca_rcache_udreg_evict(rcache)) {
opal_output_verbose(MCA_BASE_VERBOSE_INFO,
opal_rcache_base_framework.framework_output,
"rcache/udreg: could not register memory with udreg");
OPAL_THREAD_UNLOCK(&rcache_udreg->lock);
return OPAL_ERR_OUT_OF_RESOURCE;
}
}
/* update the device data with the new registration */
udreg_entry->device_data = udreg_reg;
} while (0);
} else {
/* if cache bypass is requested don't use the udreg cache */
while (UDREG_DEVICE_REG_FAILED
== (udreg_reg = mca_rcache_udreg_reg_func(addr, size, rcache))) {
/* try to remove one unused reg and retry */
if (!mca_rcache_udreg_evict(rcache)) {
opal_output_verbose(MCA_BASE_VERBOSE_INFO,
opal_rcache_base_framework.framework_output,
"rcache/udreg: could not register memory");
OPAL_THREAD_UNLOCK(&rcache_udreg->lock);
return OPAL_ERR_OUT_OF_RESOURCE;
}
}
}
OPAL_THREAD_UNLOCK(&rcache_udreg->lock);
*reg = udreg_reg;
(void) OPAL_THREAD_ADD_FETCH32(&udreg_reg->ref_count, 1);
udreg_reg->rcache_context = udreg_entry;
return OPAL_SUCCESS;
}
static int mca_rcache_udreg_find(mca_rcache_base_module_t *rcache, void *addr, size_t size,
mca_rcache_base_registration_t **reg)
{
*reg = NULL;
return OPAL_ERR_NOT_FOUND;
}
static int mca_rcache_udreg_deregister(mca_rcache_base_module_t *rcache,
mca_rcache_base_registration_t *reg)
{
mca_rcache_udreg_module_t *rcache_udreg = (mca_rcache_udreg_module_t *) rcache;
int32_t ref_count = OPAL_THREAD_ADD_FETCH32(®->ref_count, -1);
assert(ref_count >= 0);
if (!(reg->flags & MCA_RCACHE_FLAGS_CACHE_BYPASS)) {
OPAL_THREAD_LOCK(&rcache_udreg->lock);
UDREG_DecrRefcount(rcache_udreg->udreg_handle, reg->rcache_context);
OPAL_THREAD_UNLOCK(&rcache_udreg->lock);
} else if (!ref_count) {
mca_rcache_udreg_dereg_func(reg, rcache);
}
return OPAL_SUCCESS;
}
static void mca_rcache_udreg_finalize(mca_rcache_base_module_t *rcache)
{
mca_rcache_udreg_module_t *rcache_udreg = (mca_rcache_udreg_module_t *) rcache;
/* Statistic */
if (true == mca_rcache_udreg_component.print_stats) {
uint64_t hit = 0, miss = 0, evicted = 0;
(void) UDREG_GetStat(rcache_udreg->udreg_handle, UDREG_STAT_CACHE_HIT, &hit);
(void) UDREG_GetStat(rcache_udreg->udreg_handle, UDREG_STAT_CACHE_MISS, &miss);
(void) UDREG_GetStat(rcache_udreg->udreg_handle, UDREG_STAT_CACHE_EVICTED, &evicted);
opal_output(0, "%s udreg: stats (hit/miss/evicted): %" PRIu64 "/%" PRIu64 "/%" PRIu64 "\n",
OPAL_NAME_PRINT(OPAL_PROC_MY_NAME), hit, miss, evicted);
}
UDREG_CacheRelease(rcache_udreg->udreg_handle);
OBJ_DESTRUCT(&rcache_udreg->reg_list);
OBJ_DESTRUCT(&rcache_udreg->lock);
}
| 7,293 |
666 | <reponame>mingt/microservices-basics-spring-boot
package com.anilallewar.microservices.task.config;
import org.springframework.cloud.client.loadbalancer.LoadBalanced;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.security.oauth2.client.OAuth2ClientContext;
import org.springframework.security.oauth2.client.OAuth2RestTemplate;
import org.springframework.security.oauth2.client.resource.OAuth2ProtectedResourceDetails;
import org.springframework.security.oauth2.client.token.grant.password.ResourceOwnerPasswordResourceDetails;
/**
* Configuration that sets up the OAuth2 client operation for making calls to
* the comments-webservice.<br>
* <br>
*
* @author anilallewar
*
*/
@Configuration
public class OAuthClientConfiguration {
/**
* RestTempate that relays the OAuth2 token passed to the task webservice.
*
* @param oauth2ClientContext
* @return
*/
@Bean(name = "oAuth2RestTemplate")
@LoadBalanced
@Primary
public OAuth2RestTemplate restTemplate(OAuth2ClientContext context) {
return new OAuth2RestTemplate(authServer(), context);
}
private OAuth2ProtectedResourceDetails authServer() {
ResourceOwnerPasswordResourceDetails resourceOwnerPasswordResourceDetails = new ResourceOwnerPasswordResourceDetails();
// Need to set the access token URI since RestTemplate tries to access it first
// time
resourceOwnerPasswordResourceDetails.setAccessTokenUri("/userauth/oauth/token");
return resourceOwnerPasswordResourceDetails;
}
}
| 462 |
3,084 | <gh_stars>1000+
/*++
Copyright (c) Microsoft Corporation. All rights reserved.
Module Name:
Trace.h
Abstract:
WPP tracing definitions.
Environment:
Kernel-mode only.
--*/
//
// Tracing GUID - {EAD1EE75-4BFE-4E28-8AFA-E94B0A1BAF37}
//
#pragma once
#define WPP_CONTROL_GUIDS \
WPP_DEFINE_CONTROL_GUID( \
UcsiTraceGuid, (EAD1EE75,4BFE,4E28,8AFA,E94B0A1BAF37), \
WPP_DEFINE_BIT(TRACE_FLAG_DRIVER) \
WPP_DEFINE_BIT(TRACE_FLAG_FDO) \
WPP_DEFINE_BIT(TRACE_FLAG_ACPI) \
WPP_DEFINE_BIT(TRACE_FLAG_PPM) \
WPP_DEFINE_BIT(TRACE_FLAG_UCMCALLBACKS) \
WPP_DEFINE_BIT(TRACE_FLAG_UCMNOTIFICATIONS) \
)
#define WPP_LEVEL_FLAGS_LOGGER(lvl, flags) \
WPP_LEVEL_LOGGER(flags)
#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) \
(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl)
//
// begin_wpp config
//
// FUNC TRACE_ERROR{LEVEL=TRACE_LEVEL_ERROR}(FLAGS, MSG,...);
//
// FUNC TRACE_WARN{LEVEL=TRACE_LEVEL_WARNING}(FLAGS, MSG, ...);
//
// FUNC TRACE_INFO{LEVEL=TRACE_LEVEL_INFORMATION}(FLAGS, MSG, ...);
//
// FUNC TRACE_VERBOSE{LEVEL=TRACE_LEVEL_VERBOSE}(FLAGS, MSG, ...);
//
// FUNC TRACE_FUNC_ENTRY{LEVEL=TRACE_LEVEL_VERBOSE}(FLAGS, ...);
// USESUFFIX(TRACE_FUNC_ENTRY, "%!FUNC! Entry");
//
// FUNC TRACE_FUNC_EXIT{LEVEL=TRACE_LEVEL_VERBOSE}(FLAGS, ...);
// USESUFFIX(TRACE_FUNC_EXIT, "%!FUNC! Exit");
//
// CUSTOM_TYPE(UCSI_COMMAND, ItemEnum(_UCSI_COMMAND));
// CUSTOM_TYPE(UCSI_POWER_OPERATION_MODE, ItemEnum(_UCSI_POWER_OPERATION_MODE));
// CUSTOM_TYPE(UCSI_POWER_DIRECTION, ItemEnum(_UCSI_POWER_DIRECTION));
// CUSTOM_TYPE(UCSI_CONNECTOR_PARTNER_TYPE, ItemEnum(_UCSI_CONNECTOR_PARTNER_TYPE));
// CUSTOM_TYPE(UCSI_BATTERY_CHARGING_STATUS, ItemEnum(_UCSI_BATTERY_CHARGING_STATUS));
//
// end_wpp
// | 957 |
2,406 | // Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
std::string custom_relu_model();
void register_custom_relu_operator();
void unregister_custom_relu_operator();
| 78 |
3,702 | package app.metatron.discovery.domain.workbook.configurations.format;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
public class GeoPointFormat extends GeoFormat implements FieldFormat {
public GeoPointFormat() {
}
@JsonCreator
public GeoPointFormat(@JsonProperty("originalSrsName") String originalSrsName,
@JsonProperty("maxLevels") Integer maxLevels) {
super(originalSrsName, maxLevels);
}
}
| 173 |
12,278 | {
"key": "lambda",
"name": "Lambda",
"authors": [
"<NAME>",
"<NAME>"
],
"description": "Define small unnamed function objects at the actual call site, and more.",
"category": [
"Function-objects"
],
"maintainers": [
"<NAME> <jarvi -at- cs.tamu.edu>"
]
}
| 151 |
1,429 | # -*- coding: utf-8 -*-
#
# environment.py
#
# Copyright (C) 2013 <NAME> <EMAIL>
#
# This module is part of python-pptx and is released under the MIT License:
# http://www.opensource.org/licenses/mit-license.php
"""
Used by behave to set testing environment before and after running acceptance
tests.
"""
import os
scratch_dir = os.path.abspath(
os.path.join(os.path.split(__file__)[0], '_scratch')
)
def before_all(context):
if not os.path.isdir(scratch_dir):
os.mkdir(scratch_dir)
| 191 |
2,434 | <gh_stars>1000+
/*
*
* Copyright 2017 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package io.github.swagger2markup.assertions;
import io.github.robwin.diff.DiffAssertions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
public class DiffUtils {
private static final Logger LOGGER = LoggerFactory.getLogger(DiffUtils.class);
public static void assertThatAllFilesAreEqual(Path expectedDirectory, Path actualDirectory, String reportName) {
Path reportPath = Paths.get("build/diff-report/", reportName);
try {
try (DirectoryStream<Path> directoryStream = Files.newDirectoryStream(expectedDirectory)) {
for (Path expectedFile : directoryStream) {
Path actualFile = actualDirectory.resolve(expectedFile.getFileName());
LOGGER.info("Diffing file '{}' with '{}'", actualFile, expectedFile);
DiffAssertions.assertThat(actualFile).isEqualTo(expectedFile, reportPath);
}
}
} catch (IOException e) {
throw new RuntimeException("Failed to assert that all files are equal", e);
}
}
public static void assertThatFileIsEqual(Path expectedFile, Path actualFile, String reportName) {
Path reportPath = Paths.get("build/diff-report/", reportName);
LOGGER.info("Diffing file '{}' with '{}'", actualFile, expectedFile);
DiffAssertions.assertThat(actualFile).isEqualTo(expectedFile, reportPath);
}
}
| 785 |
536 | ../../../SDAutoLayout/SDAutoLayoutDemo/SDAutoLayout/UIView+SDAutoLayout.h | 30 |
1,056 | <gh_stars>1000+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.core.output2;
import java.awt.*;
/**
* An event type which carries data about an operation performed on an
* instance of NbIO which an interested view must respond to on the
* event queue.
* <p>
* While this is an unusual approach, it is also using the event queue in
* exactly the way event queues are designed to be used. It accomplishes
* complete decoupling of the IO implementation from the view implementation -
* the IO does not need to know anything about what component is rendering
* it, it just posts relevant events onto the event queue.
* <p>
* Unfortunately, it is impossible to use an AWTEventListener to get notification
* of custom event types (appears that it once was, and was optimized out in
* the biggest if/then clause in history - see Toolkit). So we have the
* dispatch() method.
* <p>
* While all this could be done with SwingUtilities.invokeLater() and runnables,
* this approach works well and is somewhat more lightweight, so I see no need
* to change it.
* <p>
* Should someone want to make this package capable of supporting multiple
* output windows, for some reason, the only thing necessary to do is to
* add a registry of weakly referenced OutputContainers and iterate them all
* in <code>dispatch()</code> - the only static tie to the rest of the
* universe is the DEFAULT field of Controller.
*
* @author <NAME>
*/
final class IOEvent extends AWTEvent implements ActiveEvent {
static final int IO_EVENT_MASK = 0xF0000;
/**
* Command instructing the controller to create a new view for the IO.
* If getValue() returns true, it will try to find an existing closed tab
* with the same name and reuse it.
*/
static final int CMD_CREATE = 0;
/**
* Command to set the output visible. Output is always visible in the current
* implementation, so this command is provided for completeness but will be ignored.
*/
static final int CMD_OUTPUT_VISIBLE=1;
/**
* Set the input area visible.
*/
static final int CMD_INPUT_VISIBLE=2;
/**
* Command to set the error output visible. Error output is interleaved in the current
* implementation, so this command is provided for completeness but will be ignored.
*/
static final int CMD_ERR_VISIBLE=3;
/**
* Provided for completeness but will be ignored.
*/
static final int CMD_ERR_SEPARATED=4;
/**
* Evil and unwise but supported.
*/
static final int CMD_FOCUS_TAKEN=5;
/**
* Command indicating that the IO should become the selected tab.
*/
static final int CMD_SELECT=6;
/**
* Command indicating that the IO's tab should be closed.
*/
static final int CMD_CLOSE=7;
/**
* Command indicating that the IO has been closed for writes and the UI should performe
* any needed state changes to reflect that.
*/
static final int CMD_STREAM_CLOSED=8;
/**
* Command indicating that the output writer's reset() method has been called, and that any
* exiting output in the IO's tab should be discarded, and the closed flag reset.
*/
static final int CMD_RESET=9;
/**
* Set the toolbar actions that should be displayed.
*/
static final int CMD_SET_TOOLBAR_ACTIONS = 10;
/** set tab's icon */
static final int CMD_SET_ICON = 11;
/** set tool tip for tab */
static final int CMD_SET_TOOLTIP = 12;
/** scroll to position */
static final int CMD_SCROLL = 13;
/** set default colors */
static final int CMD_DEF_COLORS = 14;
/**
* Command to support IOSelect.select() (with "fine" control.
*/
static final int CMD_FINE_SELECT = 15;
private static final int CMD_LAST = 16;
/**
* Strings representing the event.
*/
private static final String[] CMDS = new String[] {
"CREATE", //NOI18N
"OUTPUT_VISIBLE", //NOI18N
"INPUT_VISIBLE", //NOI18N
"ERR_VISIBLE", //NOI18N
"ERR_SEPARATED", //NOI18N
"FOCUS_TAKEN", //NOI18N
"SELECT", //NOI18N
"CLOSE", //NOI18N
"STREAM_CLOSED", //NOI18N
"RESET", //NOI18N
"SET_TOOLBAR_ACTIONS", //NOI18N
"CMD_SET_ICON", //NOI18N
"CMD_SET_TOOLTIP", //NOI18N
"CMD_SCROLL", //NOI18N
"CMD_DEF_COLORS", //NOI18N
"CMD_FINE_SELECT", //NOI18N
};
/**
* Boolean value associated with this event.
*/
private boolean value = false;
/**
* Data associated with this event (used by set toolbar actions)
*/
private Object data = null;
/**
* Used by unit tests to ensure all pending events have been processed before
* continuing.
*/
static int pendingCount = 0;
/**
* Create an IOEvent with the specified source, command and boolean state for the command.
*
* @param source An instance of NbIO which something of interest has happened to; can be null
* in the case that this is CMD_DETACH, an instruction to the default instance to
* self-destruct (module uninstalled or winsys wants to install a new instance)
* @param command The ID of what has happened
* @param value The boolean state for the command to be performed
*/
IOEvent(NbIO source, int command, boolean value) {
//Null source only for destroying the default instance
super(source == null ? new Object() : source, command + IO_EVENT_MASK);
assert command >= 0 && command < CMD_LAST : "Unknown command: " + command; //NOI18N
consumed = false;
this.value = value;
pendingCount++;
}
/**
* Construct a data-bearing IOEvent with the specified source, commmand and data
*
* @param source The command source
* @param command The ID of what has happened
* @param data Data required to process this command (i.e. toolbar actions added)
*/
IOEvent(NbIO source, int command, Object data) {
this (source, command, false);
this.data = data;
}
/**
* Convenience getter for the command ID associated with this event.
* Equivalent to <code>getID() - IO_EVENT_MASK</code>.
*
* @return The command
*/
public int getCommand() {
return getID() - IO_EVENT_MASK;
}
/**
* Convenience getter for the NbIO associated with this
* command. Equivalent to <code>(NbIO) getSource()</code>
* @return
*/
public NbIO getIO() {
return getSource() instanceof NbIO ? (NbIO) getSource() : null;
}
/**
* Get a boolean value associated with the event - most use cases involve
* some thread calling boolean setters/getters on an instance of NbIO.
*
* @return The boolean state associated with this command.
*/
public boolean getValue() {
return value;
}
/**
* Get data associated with the event. This is only used for supplying
* toolbar actions.
*
* @return An object
*/
public Object getData() {
return data;
}
/**
* Determine if the event is consumed. Creation events will be.
*
* @return If the event is consumed
*/
@Override
public boolean isConsumed() {
return consumed;
}
/**
* Overridden to avoid a bit of work AWTEvent does that's not
* necessary for us.
*/
@Override
public void consume() {
consumed = true;
}
@Override
public String toString() {
return "IOEvent@" + System.identityHashCode(this) + "-" +
cmdToString(getCommand()) + " on " + getIO() +
" value= " + getValue() + " data=" + getData(); //NOI18N
}
public void dispatch() {
Controller.getDefault().eventDispatched(this);
pendingCount--;
}
public static String cmdToString (int cmd) {
return CMDS[cmd];
}
}
| 3,083 |
3,402 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kylin.stream.coordinator.coordinate.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* <pre>
* This annotation is a marker for developer.
* It indicate this method which be annotated is not atomic, it may break in intermediate step.
* Because exception may thrown in R/W action with remote resources, so it stay in non-consistent state.
* But caller can retry it safely to achieve the final consistency。
* </pre>
*
* @see NotAtomicAndNotIdempotent
*/
@Documented
@Retention(RetentionPolicy.SOURCE)
@Target(ElementType.METHOD)
@Inherited
public @interface NotAtomicIdempotent {
}
| 437 |
381 | <filename>jaxrs/src/main/java/org/jboss/aerogear/unifiedpush/rest/util/error/JsonProcessingExceptionMapper.java
package org.jboss.aerogear.unifiedpush.rest.util.error;
import com.fasterxml.jackson.core.JsonProcessingException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider;
@Provider
public class JsonProcessingExceptionMapper implements ExceptionMapper<JsonProcessingException> {
private static final Logger LOG = LoggerFactory.getLogger(JsonProcessingExceptionMapper.class);
public JsonProcessingExceptionMapper() {
LOG.debug("Starting up");
}
@Override
public Response toResponse(JsonProcessingException exception) {
LOG.debug("Caught exception " + exception.getMessage());
return Response.serverError().entity(ErrorBuilder.forServer().generalException(exception).build()).build();
}
} | 326 |
5,169 | {
"name": "JUtilities",
"version": "1.1",
"summary": "iOS Utilities",
"description": "iOS Utilities for Develepment.",
"homepage": "https://github.com/wishWinds/JUtilities.git",
"license": "MIT",
"authors": {
"shupeng": "<EMAIL>"
},
"platforms": {
"ios": "9.0"
},
"source": {
"git": "https://github.com/wishWinds/JUtilities.git",
"tag": "1.1"
},
"source_files": "JUtilities/JUtilities.h",
"public_header_files": "JUtilities/JUtilities.h",
"requires_arc": true,
"subspecs": [
{
"name": "Runtime",
"source_files": "JUtilities/Runtime/*.{h,m}",
"public_header_files": "JUtilities/Runtime/*.h"
},
{
"name": "Foundation",
"source_files": "JUtilities/Foundation/*.{h,m}",
"public_header_files": "JUtilities/Foundation/*.h"
},
{
"name": "UIKit",
"source_files": "JUtilities/UIKit/*.{h,m}",
"public_header_files": "JUtilities/UIKit/*.h",
"subspecs": [
{
"name": "UIResponderRouter",
"source_files": "JUtilities/UIKit/UIResponderRouter/*.{h,m}",
"public_header_files": "JUtilities/UIKit/UIResponderRouter/*.h"
}
]
}
]
}
| 574 |
443 | from .switch import Switch
class LightSwitch(Switch):
def __repr__(self):
return '<WeMo LightSwitch "{name}">'.format(name=self.name)
| 54 |
36,552 | <gh_stars>1000+
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of multiprocess concurrency with gRPC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import contextlib
import datetime
import logging
import math
import multiprocessing
import socket
import sys
import time
import grpc
import prime_pb2
import prime_pb2_grpc
_LOGGER = logging.getLogger(__name__)
_ONE_DAY = datetime.timedelta(days=1)
_PROCESS_COUNT = multiprocessing.cpu_count()
_THREAD_CONCURRENCY = _PROCESS_COUNT
def is_prime(n):
for i in range(2, int(math.ceil(math.sqrt(n)))):
if n % i == 0:
return False
else:
return True
class PrimeChecker(prime_pb2_grpc.PrimeCheckerServicer):
def check(self, request, context):
_LOGGER.info('Determining primality of %s', request.candidate)
return prime_pb2.Primality(isPrime=is_prime(request.candidate))
def _wait_forever(server):
try:
while True:
time.sleep(_ONE_DAY.total_seconds())
except KeyboardInterrupt:
server.stop(None)
def _run_server(bind_address):
"""Start a server in a subprocess."""
_LOGGER.info('Starting new server.')
options = (('grpc.so_reuseport', 1),)
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=_THREAD_CONCURRENCY,),
options=options)
prime_pb2_grpc.add_PrimeCheckerServicer_to_server(PrimeChecker(), server)
server.add_insecure_port(bind_address)
server.start()
_wait_forever(server)
@contextlib.contextmanager
def _reserve_port():
"""Find and reserve a port for all subprocesses to use."""
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 0:
raise RuntimeError("Failed to set SO_REUSEPORT.")
sock.bind(('', 0))
try:
yield sock.getsockname()[1]
finally:
sock.close()
def main():
with _reserve_port() as port:
bind_address = 'localhost:{}'.format(port)
_LOGGER.info("Binding to '%s'", bind_address)
sys.stdout.flush()
workers = []
for _ in range(_PROCESS_COUNT):
# NOTE: It is imperative that the worker subprocesses be forked before
# any gRPC servers start up. See
# https://github.com/grpc/grpc/issues/16001 for more details.
worker = multiprocessing.Process(target=_run_server,
args=(bind_address,))
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
if __name__ == '__main__':
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[PID %(process)d] %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
_LOGGER.setLevel(logging.INFO)
main()
| 1,411 |
398 | <filename>Relabel Button.sketchplugin/Contents/Sketch/manifest.json
{
"name" : "Button",
"description" : "Relabel and automatically resize a button (or similar grouped set of layers)",
"author" : "<NAME>",
"authorEmail" : "<EMAIL>",
"version" : "1.10",
"identifier" : "com.kenmoore.sketch.relabelbutton",
"commands" : [
{
"script" : "Relabel Button.js",
"handler" : "relabelButton",
"shortcut" : "command j",
"name" : "Relabel Button",
"identifier" : "relabelbutton"
},
{
"script" : "Relabel Button Right Aligned.js",
"handler" : "relabelButtonRightAligned",
"shortcut" : "command ctrl j",
"name" : "Relabel Button Right Aligned",
"identifier" : "relabelbuttonrightaligned"
}
],
"menu" : {
"title" : "Button",
"items" : [
"relabelbutton",
"relabelbuttonrightaligned"
]
}
}
| 391 |
879 | <reponame>LEONAD486/zstack
package org.zstack.core.log;
import org.zstack.core.cloudbus.CloudBusGlobalConfig;
import org.zstack.core.cloudbus.CloudBusGlobalProperty;
/**
* Created by ZStack on 2020/9/28.
*/
public class LogUtils {
public boolean isLogReadAPI() {
long openReadAPILog = CloudBusGlobalConfig.OPEN_READ_API_LOG.value(Long.class);
if (openReadAPILog != -1) {
return openReadAPILog == 1;
} else {
return !CloudBusGlobalProperty.READ_API_LOG_OFF;
}
}
}
| 226 |
309 | #ifndef PERMISSIONDIALOG_H
#define PERMISSIONDIALOG_H
#include <QSettings>
#include <QWebEnginePage>
#include <QWidget>
namespace Ui {
class PermissionDialog;
}
class PermissionDialog : public QWidget
{
Q_OBJECT
public:
explicit PermissionDialog(QWidget *parent = nullptr);
~PermissionDialog();
signals:
void webPageFeatureChanged(QWebEnginePage::Feature feature);
private slots:
void addToFeaturesTable(QWebEnginePage::Feature feature, QString &featureName);
private:
Ui::PermissionDialog *ui;
QSettings settings;
};
#endif // PERMISSIONDIALOG_H
| 204 |
12,718 | #include "pthread_impl.h"
int pthread_attr_setdetachstate(pthread_attr_t *a, int state)
{
if (state > 1U) return EINVAL;
a->_a_detach = state;
return 0;
}
| 71 |
1,968 | <reponame>agramonte/corona<gh_stars>1000+
//////////////////////////////////////////////////////////////////////////////
//
// This file is part of the Corona game engine.
// For overview and more information on licensing please refer to README.md
// Home page: https://github.com/coronalabs/corona
// Contact: <EMAIL>
//
//////////////////////////////////////////////////////////////////////////////
#pragma once
#include "Core\Rtt_Build.h"
#include "Core\Rtt_Real.h"
#include "Core\Rtt_String.h"
#include "Rtt_PlatformFont.h"
#pragma region Forward Declarations
namespace Interop
{
class RuntimeEnvironment;
}
#pragma endregion
namespace Rtt
{
class WinFont : public PlatformFont
{
public:
typedef WinFont Self;
typedef PlatformFont Super;
/// <summary>Creates a new native Corona font configuration for Win32.</summary>
/// <param name="environment">Corona runtime environment this font is associated with.</param>
WinFont(Interop::RuntimeEnvironment& environment);
/// <summary>Creates a new copy of the given native Corona font configuration.</summary>
/// <param name="font">Objct to copy the font configuration from.</param>
WinFont(const WinFont& font);
/// <summary>Destroys this font object and its allocated resources.</summary>
virtual ~WinFont();
/// <summary>Creates a new copy of this native Corona font object.</summary>
/// <param name="allocator">Pointer to an allocator needed to create the object. Cannot be null.</param>
/// <returns>
/// <para>Returns a new Rtt::WinFont object that is copy of this object.</para>
/// <para>Returns null if given a null allocator.</para>
/// </returns>
virtual PlatformFont* CloneUsing(Rtt_Allocator *allocator) const
{
if (!allocator)
{
return nullptr;
}
return Rtt_NEW(allocator, WinFont(*this));
}
/// <summary>Sets the font family name or font file path to be used.</summary>
/// <param name="name">
/// <para>The font family name or font file name.</para>
/// <para>Can be null or empty string, in which case the default system font should be used.</para>
/// </param>
void SetName(const char* name);
/// <summary>Gets the font family name or font file name.</summary>
/// <returns>
/// <para>Returns the font name or font file path.</para>
/// <para>Returns null or empty string if the font name was not set.</para>
/// </returns>
virtual const char* Name() const;
/// <summary>Sets the font size.</summary>
/// <remarks>
/// The font size units are context sensitive and depend on what feature this font object will be used with.
/// For example, display.newText() always interprets font size in Corona's scaled point system.
/// But native TextFields and TextBoxes can interpret it in native points if their "isFontSizeScaled"
/// property is set to false.
/// </remarks>
/// <param name="value">
/// <para>The font size text will be rendered with.</para>
/// <para>Set to less than or equal to zero to use the system's default font size.</para>
/// </param>
virtual void SetSize(Real value);
/// <summary>Gets the font size.</summary>
/// <remarks>
/// The font size units are context sensitive and depend on what feature this font object will be used with.
/// For example, display.newText() always interprets font size in Corona's scaled point system.
/// But native TextFields and TextBoxes can interpret it in native points if their "isFontSizeScaled"
/// property is set to false.
/// </remarks>
/// <returns>
/// <para>Returns the font size to render text with.</para>
/// <para>Returns a value less than or equal to zero if the font should use the system's default font size.</para>
/// </returns>
virtual Rtt_Real Size() const;
/// <summary>Sets whether or not the font should use a bold style.</summary>
/// <param name="value">Set true to use a bold style. Set false to not use this style.</param>
void SetBold(bool value);
/// <summary>Determines if the font should use a bold style.</summary>
/// <returns>
/// <para>Returns true if the font should use a bold style.</para>
/// <para>Returns false to not use a bold style, which is the default.</para>
/// </returns>
bool IsBold() const;
/// <summary>Sets whether or not the font should use an italic style.</summary>
/// <param name="value">Set true to use an italic style. Set false to not use this style.</param>
void SetItalic(bool value);
/// <summary>Determines if the font should use an italic style.</summary>
/// <returns>
/// <para>Returns true if the font should use an italic style.</para>
/// <para>Returns false to not use an italic style, which is the default.</para>
/// </returns>
bool IsItalic() const;
/// <summary>
/// <para>This function is not supported on Windows.</para>
/// <para>
/// You must use the Interop::Graphics::FontSettings and Interop::Graphics::FontServices derived class
/// to generate a native Win32 GDI or GDI+ font object.
/// </para>
/// </summary>
/// <returns>Always returns null.</returns>
virtual void* NativeObject() const;
private:
Interop::RuntimeEnvironment& fEnvironment;
String fName;
Rtt_Real fSize;
bool fIsBold;
bool fIsItalic;
};
} // namespace Rtt
| 1,657 |
510 | from ._excel import ExcelXlsTableWriter, ExcelXlsxTableWriter
from ._pandas import PandasDataFramePickleWriter
from ._sqlite import SqliteTableWriter
| 43 |
375 | /*
* This file is part of the Wayback archival access software
* (http://archive-access.sourceforge.net/projects/wayback/).
*
* Licensed to the Internet Archive (IA) by one or more individual
* contributors.
*
* The IA licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.archive.wayback.util.webapp;
import java.util.Map;
import java.util.logging.Logger;
import javax.servlet.ServletContext;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.FileSystemXmlApplicationContext;
/**
* Single static method to read a Spring XML configuration, extract
* RequestHandlers, and return a RequestMapper which delegates requests to
* those RequestHandlers.
*
* @author brad
*
*/
public class SpringReader {
private static final Logger LOGGER = Logger.getLogger(
SpringReader.class.getName());
protected static ApplicationContext currentContext = null;
/**
* Read the single Spring XML configuration file located at the specified
* path, performing PropertyPlaceHolder interpolation, extracting all beans
* which implement the RequestHandler interface, and construct a
* RequestMapper for those RequestHandlers, on the specified ServletContext.
* @param configPath the path to the Spring XML file containing the
* configuration.
* @param servletContext the ServletContext where the RequestHandlers should
* be mapped
* @return a new ReqeustMapper which delegates requests for the
* ServletContext
*/
public static RequestMapper readSpringConfig(String configPath,
ServletContext servletContext) {
LOGGER.info("Loading from config file " + configPath);
currentContext = new FileSystemXmlApplicationContext("file:" + configPath);
Map<String,RequestHandler> beans =
currentContext.getBeansOfType(RequestHandler.class,false,false);
return new RequestMapper(beans.values(), servletContext);
}
public static ApplicationContext getCurrentContext() {
return currentContext;
}
}
| 689 |
648 | <gh_stars>100-1000
{"resourceType":"DataElement","id":"DeviceUseRequest.indication","meta":{"lastUpdated":"2015-10-24T07:41:03.495+11:00"},"url":"http://hl7.org/fhir/DataElement/DeviceUseRequest.indication","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"path":"DeviceUseRequest.indication","short":"Reason for request","definition":"Reason or justification for the use of this device.","min":0,"max":"*","type":[{"code":"CodeableConcept"}],"isSummary":true,"mapping":[{"identity":"quick","map":"Action.indication.reason"},{"identity":"w5","map":"why"}]}]} | 167 |
5,460 | <reponame>gtourkas/moto<gh_stars>1000+
from moto.core import ACCOUNT_ID
from moto.core.utils import pascal_to_camelcase, camelcase_to_underscores
def make_arn_for_wacl(name, region_name, id, scope):
"""https://docs.aws.amazon.com/waf/latest/developerguide/how-aws-waf-works.html - explains --scope (cloudfront vs regional)"""
if scope == "REGIONAL":
scope = "regional"
elif scope == "CLOUDFRONT":
scope = "global"
return "arn:aws:wafv2:{}:{}:{}/webacl/{}/{}".format(
region_name, ACCOUNT_ID, scope, name, id
)
def pascal_to_underscores_dict(original_dict):
outdict = {}
for k, v in original_dict.items():
outdict[camelcase_to_underscores(pascal_to_camelcase(k))] = v
return outdict
| 319 |
2,038 | // Copyright (c) 2012, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common/mac/arch_utilities.h"
#include <mach-o/arch.h>
#include <mach-o/fat.h>
#include <stdio.h>
#include <string.h>
#ifndef CPU_SUBTYPE_ARM_V7S
#define CPU_SUBTYPE_ARM_V7S (static_cast<cpu_subtype_t>(11))
#endif // CPU_SUBTYPE_ARM_V7S
#ifndef CPU_TYPE_ARM64
#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
#endif // CPU_TYPE_ARM64
#ifndef CPU_SUBTYPE_ARM64_ALL
#define CPU_SUBTYPE_ARM64_ALL (static_cast<cpu_subtype_t>(0))
#endif // CPU_SUBTYPE_ARM64_ALL
#ifndef CPU_SUBTYPE_ARM64_E
#define CPU_SUBTYPE_ARM64_E (static_cast<cpu_subtype_t>(2))
#endif // CPU_SUBTYPE_ARM64_E
namespace {
const NXArchInfo* ArchInfo_arm64(cpu_subtype_t cpu_subtype) {
const char* name = NULL;
switch (cpu_subtype) {
case CPU_SUBTYPE_ARM64_ALL:
name = "arm64";
break;
case CPU_SUBTYPE_ARM64_E:
name = "arm64e";
break;
default:
return NULL;
}
NXArchInfo* arm64 = new NXArchInfo;
*arm64 = *NXGetArchInfoFromCpuType(CPU_TYPE_ARM,
CPU_SUBTYPE_ARM_V7);
arm64->name = name;
arm64->cputype = CPU_TYPE_ARM64;
arm64->cpusubtype = cpu_subtype;
arm64->description = "arm 64";
return arm64;
}
const NXArchInfo* ArchInfo_armv7s() {
NXArchInfo* armv7s = new NXArchInfo;
*armv7s = *NXGetArchInfoFromCpuType(CPU_TYPE_ARM,
CPU_SUBTYPE_ARM_V7);
armv7s->name = "armv7s";
armv7s->cpusubtype = CPU_SUBTYPE_ARM_V7S;
armv7s->description = "arm v7s";
return armv7s;
}
} // namespace
namespace google_breakpad {
const NXArchInfo* BreakpadGetArchInfoFromName(const char* arch_name) {
// TODO: Remove this when the OS knows about arm64.
if (!strcmp("arm64", arch_name))
return BreakpadGetArchInfoFromCpuType(CPU_TYPE_ARM64,
CPU_SUBTYPE_ARM64_ALL);
if (!strcmp("arm64e", arch_name))
return BreakpadGetArchInfoFromCpuType(CPU_TYPE_ARM64,
CPU_SUBTYPE_ARM64_E);
// TODO: Remove this when the OS knows about armv7s.
if (!strcmp("armv7s", arch_name))
return BreakpadGetArchInfoFromCpuType(CPU_TYPE_ARM, CPU_SUBTYPE_ARM_V7S);
return NXGetArchInfoFromName(arch_name);
}
const NXArchInfo* BreakpadGetArchInfoFromCpuType(cpu_type_t cpu_type,
cpu_subtype_t cpu_subtype) {
// TODO: Remove this when the OS knows about arm64.
if (cpu_type == CPU_TYPE_ARM64 && cpu_subtype == CPU_SUBTYPE_ARM64_ALL) {
static const NXArchInfo* arm64 = ArchInfo_arm64(cpu_subtype);
return arm64;
}
if (cpu_type == CPU_TYPE_ARM64 && cpu_subtype == CPU_SUBTYPE_ARM64_E) {
static const NXArchInfo* arm64e = ArchInfo_arm64(cpu_subtype);
return arm64e;
}
// TODO: Remove this when the OS knows about armv7s.
if (cpu_type == CPU_TYPE_ARM && cpu_subtype == CPU_SUBTYPE_ARM_V7S) {
static const NXArchInfo* armv7s = ArchInfo_armv7s();
return armv7s;
}
return NXGetArchInfoFromCpuType(cpu_type, cpu_subtype);
}
} // namespace google_breakpad
// TODO(crbug.com/1242776): The "#ifndef __APPLE__" should be here, but the
// system version of NXGetLocalArchInfo returns incorrect information on
// x86_64 machines (treating them as just x86), so use the Breakpad version
// all the time for now.
namespace {
enum Architecture {
kArch_i386 = 0,
kArch_x86_64,
kArch_x86_64h,
kArch_arm,
kArch_arm64,
kArch_arm64e,
kArch_ppc,
// This must be last.
kNumArchitectures
};
// enum Architecture above and kKnownArchitectures below
// must be kept in sync.
const NXArchInfo kKnownArchitectures[] = {
{
"i386",
CPU_TYPE_I386,
CPU_SUBTYPE_I386_ALL,
NX_LittleEndian,
"Intel 80x86"
},
{
"x86_64",
CPU_TYPE_X86_64,
CPU_SUBTYPE_X86_64_ALL,
NX_LittleEndian,
"Intel x86-64"
},
{
"x86_64h",
CPU_TYPE_X86_64,
CPU_SUBTYPE_X86_64_H,
NX_LittleEndian,
"Intel x86-64h Haswell"
},
{
"arm",
CPU_TYPE_ARM,
CPU_SUBTYPE_ARM_ALL,
NX_LittleEndian,
"ARM"
},
{
"arm64",
CPU_TYPE_ARM64,
CPU_SUBTYPE_ARM64_ALL,
NX_LittleEndian,
"ARM64"
},
{
"arm64e",
CPU_TYPE_ARM64,
CPU_SUBTYPE_ARM64_E,
NX_LittleEndian,
"ARM64e"
},
{
"ppc",
CPU_TYPE_POWERPC,
CPU_SUBTYPE_POWERPC_ALL,
NX_BigEndian,
"PowerPC"
}
};
} // namespace
const NXArchInfo *NXGetLocalArchInfo(void) {
Architecture arch;
#if defined(__i386__)
arch = kArch_i386;
#elif defined(__x86_64__)
arch = kArch_x86_64;
#elif defined(__arm64)
arch = kArch_arm64;
#elif defined(__arm__)
arch = kArch_arm;
#elif defined(__powerpc__)
arch = kArch_ppc;
#else
#error "Unsupported CPU architecture"
#endif
return &kKnownArchitectures[arch];
}
#ifndef __APPLE__
const NXArchInfo *NXGetArchInfoFromName(const char *name) {
for (int arch = 0; arch < kNumArchitectures; ++arch) {
if (!strcmp(name, kKnownArchitectures[arch].name)) {
return &kKnownArchitectures[arch];
}
}
return NULL;
}
const NXArchInfo *NXGetArchInfoFromCpuType(cpu_type_t cputype,
cpu_subtype_t cpusubtype) {
const NXArchInfo *candidate = NULL;
for (int arch = 0; arch < kNumArchitectures; ++arch) {
if (kKnownArchitectures[arch].cputype == cputype) {
if (kKnownArchitectures[arch].cpusubtype == cpusubtype) {
return &kKnownArchitectures[arch];
}
if (!candidate) {
candidate = &kKnownArchitectures[arch];
}
}
}
return candidate;
}
struct fat_arch *NXFindBestFatArch(cpu_type_t cputype,
cpu_subtype_t cpusubtype,
struct fat_arch *fat_archs,
uint32_t nfat_archs) {
struct fat_arch *candidate = NULL;
for (uint32_t f = 0; f < nfat_archs; ++f) {
if (fat_archs[f].cputype == cputype) {
if (fat_archs[f].cpusubtype == cpusubtype) {
return &fat_archs[f];
}
if (!candidate) {
candidate = &fat_archs[f];
}
}
}
return candidate;
}
#endif // !__APPLE__
| 3,303 |
4,036 |
#Written in Java or C style
def gcd(a, b):
while(a != 0 and b != 0):
if(a > b):
a = a % b
else:
b = b % a
if(a == 0):
return (b)
return (a)
#Written in a more Pythonic style
def gcd(a, b):
while a != 0 and b != 0:
if a > b:
a = a % b
else:
b = b % a
if a == 0:
return b
return a
| 234 |
12,718 | <filename>lib/libcxx/include/__functional/bind_front.h
// -*- C++ -*-
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCPP___FUNCTIONAL_BIND_FRONT_H
#define _LIBCPP___FUNCTIONAL_BIND_FRONT_H
#include <__config>
#include <__functional/perfect_forward.h>
#include <__functional/invoke.h>
#include <type_traits>
#include <utility>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
_LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER > 17
struct __bind_front_op
{
template<class... _Args>
constexpr static auto __call(_Args&&... __args)
noexcept(noexcept(_VSTD::invoke(_VSTD::forward<_Args>(__args)...)))
-> decltype( _VSTD::invoke(_VSTD::forward<_Args>(__args)...))
{ return _VSTD::invoke(_VSTD::forward<_Args>(__args)...); }
};
template<class _Fn, class... _Args,
class = _EnableIf<conjunction<is_constructible<decay_t<_Fn>, _Fn>,
is_move_constructible<decay_t<_Fn>>,
is_constructible<decay_t<_Args>, _Args>...,
is_move_constructible<decay_t<_Args>>...
>::value>>
constexpr auto bind_front(_Fn&& __f, _Args&&... __args)
{
return __perfect_forward<__bind_front_op, _Fn, _Args...>(_VSTD::forward<_Fn>(__f),
_VSTD::forward<_Args>(__args)...);
}
#endif // _LIBCPP_STD_VER > 17
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___FUNCTIONAL_BIND_FRONT_H
| 854 |
541 | // Copyright 2017, 2019 ETH Zürich, <NAME>
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <memory>
#include "libvis/eigen.h"
#include "libvis/libvis.h"
namespace vis {
template <typename T>
class Image;
enum class ImageFormat {
kPNG = 0,
kPBM, // netpbm portable bitmap file format
kPGM, // netpbm portable graymap file format
kPPM, // netpbm portable pixmap file format
kPNM, // netpbm portable anymap file format
kOther
};
ImageFormat TryToDetermineImageFormat(const std::string& filename);
// Base class for image I/O classes. Note: All functions must be re-entrant and
// return true on success, respectively false if an error occurred.
class ImageIO {
public:
// Better values must have a higher value.
enum class ImageFormatSupport {
kComplete = 2,
kIncomplete = 1,
kNone = 0
};
virtual ~ImageIO() {}
virtual ImageFormatSupport GetSupportForFormat(ImageFormat /*format*/) const {
return ImageFormatSupport::kNone;
}
virtual bool Read(const std::string& /*image_file_name*/, Image<u8>* /*image*/) const {
return false;
}
virtual bool Read(const std::string& /*image_file_name*/, Image<u16>* /*image*/) const {
return false;
}
virtual bool Read(const std::string& /*image_file_name*/, Image<Vec3u8>* /*image*/) const {
return false;
}
virtual bool Read(const std::string& /*image_file_name*/, Image<Vec4u8>* /*image*/) const {
return false;
}
virtual bool Write(const std::string& /*image_file_name*/, const Image<u8>& /*image*/) const {
return false;
}
virtual bool Write(const std::string& /*image_file_name*/, const Image<u16>& /*image*/) const {
return false;
}
virtual bool Write(const std::string& /*image_file_name*/, const Image<Vec3u8>& /*image*/) const {
return false;
}
virtual bool Write(const std::string& /*image_file_name*/, const Image<Vec4u8>& /*image*/) const {
return false;
}
};
// Image I/O class registry. All classes providing image I/O register themselves
// here, enabling to find them. The available classes depend on the available
// dependencies.
class ImageIORegistry {
public:
// Registers an image IO class.
inline void Register(const shared_ptr<ImageIO>& image_io) {
image_ios_.push_back(image_io);
// LOG(INFO) << "Have " << image_ios_.size() << " handlers.";
}
inline ImageIO* Get(const std::string& filename) const {
if (image_ios_.empty()) {
return nullptr;
}
// Try to determine the image format.
ImageFormat format = TryToDetermineImageFormat(filename);
// Prioritize complete implementations for the format.
ImageIO* best_implemenation = nullptr;
ImageIO::ImageFormatSupport best_support = ImageIO::ImageFormatSupport::kNone;
for (const shared_ptr<ImageIO>& image_io : image_ios_) {
ImageIO::ImageFormatSupport support = image_io->GetSupportForFormat(format);
if (static_cast<int>(support) > static_cast<int>(best_support)) {
best_support = support;
best_implemenation = image_io.get();
}
}
return best_implemenation;
}
// Accessor to the global instance of this class.
static ImageIORegistry* Instance() {
static ImageIORegistry instance;
return &instance;
}
private:
vector<shared_ptr<ImageIO>> image_ios_;
};
}
| 1,568 |
1,744 | <reponame>civitaspo/embulk
package org.embulk.spi.util;
import com.google.common.collect.ImmutableList;
import java.util.Iterator;
import java.util.List;
import org.embulk.spi.Column;
import org.embulk.spi.ColumnVisitor;
import org.embulk.spi.Page;
import org.embulk.spi.PageReader;
import org.embulk.spi.Schema;
/**
* A utility class to manupulate {@link org.embulk.spi.Page}s.
*
* @deprecated Use {@link org.embulk.spi.PageReader} directly.
*/
@Deprecated
public class Pages {
public static List<Object[]> toObjects(Schema schema, Page page) {
return toObjects(schema, ImmutableList.of(page));
}
// TODO use streaming and return Iterable
public static List<Object[]> toObjects(final Schema schema, final Iterable<Page> pages, final boolean useInstant) {
ImmutableList.Builder<Object[]> builder = ImmutableList.builder();
Iterator<Page> ite = pages.iterator();
try (PageReader reader = new PageReader(schema)) {
while (ite.hasNext()) {
reader.setPage(ite.next());
while (reader.nextRecord()) {
builder.add(toObjects(reader, useInstant));
}
}
}
return builder.build();
}
public static List<Object[]> toObjects(Schema schema, Iterable<Page> pages) {
return toObjects(schema, pages, false);
}
public static Object[] toObjects(final PageReader record, final boolean useInstant) {
final Object[] values = new Object[record.getSchema().getColumns().size()];
record.getSchema().visitColumns(new ObjectColumnVisitor(record, useInstant) {
@Override
public void visit(Column column, Object object) {
values[column.getIndex()] = object;
}
});
return values;
}
public static Object[] toObjects(final PageReader record) {
return toObjects(record, false);
}
/**
* A {@link ColumnVisitor} implementation to map everything to {@link java.lang.Object}.
*
* @deprecated Implement your own {@link ColumnVisitor}.
*/
@Deprecated
public abstract static class ObjectColumnVisitor implements ColumnVisitor {
private final PageReader record;
private final boolean useInstant;
public ObjectColumnVisitor(final PageReader record, final boolean useInstant) {
this.record = record;
this.useInstant = useInstant;
}
public ObjectColumnVisitor(PageReader record) {
this(record, false);
}
public abstract void visit(Column column, Object obj);
@Override
public void booleanColumn(Column column) {
if (record.isNull(column)) {
visit(column, null);
} else {
visit(column, record.getBoolean(column));
}
}
@Override
public void longColumn(Column column) {
if (record.isNull(column)) {
visit(column, null);
} else {
visit(column, record.getLong(column));
}
}
@Override
public void doubleColumn(Column column) {
if (record.isNull(column)) {
visit(column, null);
} else {
visit(column, record.getDouble(column));
}
}
@Override
public void stringColumn(Column column) {
if (record.isNull(column)) {
visit(column, null);
} else {
visit(column, record.getString(column));
}
}
@Override
@SuppressWarnings("deprecation") // https://github.com/embulk/embulk/issues/1292
public void timestampColumn(Column column) {
if (record.isNull(column)) {
visit(column, null);
} else {
if (this.useInstant) {
visit(column, record.getTimestampInstant(column));
} else {
visit(column, record.getTimestamp(column));
}
}
}
@Override
public void jsonColumn(Column column) {
if (record.isNull(column)) {
visit(column, null);
} else {
visit(column, record.getJson(column));
}
}
}
public static Object getObject(PageReader record, Column column) {
GetObjectColumnVisitor visitor = new GetObjectColumnVisitor(record);
column.visit(visitor);
return visitor.get();
}
private static class GetObjectColumnVisitor extends ObjectColumnVisitor {
private Object object;
public GetObjectColumnVisitor(PageReader record) {
super(record);
}
public Object get() {
return object;
}
public void visit(Column column, Object object) {
this.object = object;
}
}
}
| 2,251 |
1,253 | <gh_stars>1000+
print("Enter the number of process: ")
n = int(input())
Allocation = []
Max = []
Need = []
# take inputs of Allocation
print('Allocation Matrix')
for i in range(n):
theinputs = []
for j in range(3):
x = int(input())
theinputs.append(x)
Allocation.append(theinputs)
# take inputs of Max
print('Claim Matrix')
for i in range(n):
theinputs = []
for j in range(3):
x = int(input())
theinputs.append(x)
Max.append(theinputs)
# find the Need
for i in range(n):
theinputs = []
for j in range(3):
x = Max[i][j] - Allocation[i][j]
theinputs.append(x)
Need.append(theinputs)
# print(Need)
# take input of A,B,C at t0 time
print('Enter Resources:')
A = int(input())
B = int(input())
C = int(input())
# getting Available at t0 time
Available = []
x = 0
for i in range(n):
x += Allocation[i][0]
x = A - x
Available.append(x)
x = 0
for i in range(n):
x += Allocation[i][1]
x = B - x
Available.append(x)
x = 0
for i in range(n):
x += Allocation[i][2]
x = C - x
Available.append(x)
# print(Available)
Work = Available
# take input for request
print('Initial Available Vector')
request = []
for i in range(3):
x = int(input())
request.append(x)
# All finish is Zero at the initial
Finish = []
for i in range(n):
Finish.append(0)
Sequence = []
for i in range(n):
for j in range(n):
if(Finish[j] == 0):
if(Need[j][0] <= Work[0] and Need[j][1] <= Work[1] and Need[j][2] <= Work[2]):
Work[0] += Allocation[j][0]
Work[1] += Allocation[j][1]
Work[2] += Allocation[j][2]
Finish[j] = 1
Sequence.append(j+1)
print("Process", j+1, "executed")
print('Available Matrix: ', Work)
# print(Finish)
tag = 0
for i in range(n):
if(Finish[i] == 0):
tag = 1
break
if(tag == 0):
print("Granted!")
print("The Process Sequence: ")
print(Sequence)
else:
print("Deadlock in the System!!!")
| 952 |
977 | #pragma once
#include "Post/PostFilter.hpp"
namespace acid {
class ACID_EXPORT DefaultFilter : public PostFilter {
public:
explicit DefaultFilter(const Pipeline::Stage &pipelineStage, bool lastFilter = false);
void Render(const CommandBuffer &commandBuffer) override;
private:
bool lastFilter;
};
}
| 93 |
1,013 | <reponame>JosephChataignon/pyclustering
/*!
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <pyclustering/cluster/optics.hpp>
#include <pyclustering/cluster/ordering_analyser.hpp>
#include <pyclustering/utils/metric.hpp>
#include "samples.hpp"
#include "utenv_check.hpp"
using namespace pyclustering;
using namespace pyclustering::clst;
using namespace pyclustering::utils::metric;
static std::shared_ptr<optics_data>
template_optics_length_process_data(const std::shared_ptr<dataset> & p_data,
const double p_radius,
const size_t p_neighbors,
const size_t p_amount_clusters,
const std::vector<size_t> & p_expected_cluster_length)
{
std::shared_ptr<optics_data> ptr_output_result = std::make_shared<optics_data>();
optics solver(p_radius, p_neighbors, p_amount_clusters);
solver.process(*p_data, *ptr_output_result);
const dataset & data = *p_data;
const cluster_sequence & actual_clusters = ptr_output_result->clusters();
const optics_object_sequence & objects = ptr_output_result->optics_objects();
ASSERT_CLUSTER_SIZES(data, actual_clusters, p_expected_cluster_length);
if (p_amount_clusters > 0) {
EXPECT_EQ(p_expected_cluster_length.size(), ordering_analyser::extract_cluster_amount(ptr_output_result->cluster_ordering(), ptr_output_result->get_radius()));
}
EXPECT_EQ(p_data->size(), objects.size());
for (const auto & object : objects) {
EXPECT_TRUE(object.m_core_distance == optics::NONE_DISTANCE || object.m_core_distance >= 0.0);
EXPECT_TRUE(object.m_reachability_distance == optics::NONE_DISTANCE || object.m_reachability_distance >= 0.0);
EXPECT_TRUE(object.m_processed);
}
return ptr_output_result;
}
static std::shared_ptr<optics_data>
template_optics_length_process_distance_matrix(const std::shared_ptr<dataset> & p_data,
const double p_radius,
const size_t p_neighbors,
const size_t p_amount_clusters,
const std::vector<size_t> & p_expected_cluster_length)
{
std::shared_ptr<optics_data> ptr_output_result = std::make_shared<optics_data>();
optics solver(p_radius, p_neighbors, p_amount_clusters);
dataset matrix;
distance_matrix(*p_data, matrix);
solver.process(matrix, data_t::DISTANCE_MATRIX, *ptr_output_result);
const dataset & data = *p_data;
const cluster_sequence & actual_clusters = ptr_output_result->clusters();
ASSERT_CLUSTER_SIZES(data, actual_clusters, p_expected_cluster_length);
if (p_amount_clusters > 0) {
EXPECT_EQ(p_expected_cluster_length.size(), ordering_analyser::extract_cluster_amount(ptr_output_result->cluster_ordering(), ptr_output_result->get_radius()));
}
return ptr_output_result;
}
TEST(utest_optics, allocation_sample_simple_01) {
const std::vector<size_t> expected_clusters_length = { 5, 5 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 0.4, 2, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_01_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 5, 5 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 0.4, 2, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_one_allocation_simple_01) {
const std::vector<size_t> expected_clusters_length = { 10 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 10.0, 1, 0, expected_clusters_length);
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 9.0, 1, 0, expected_clusters_length);
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 5.0, 1, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_one_allocation_simple_01_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 10 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 10.0, 1, 0, expected_clusters_length);
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 9.0, 1, 0, expected_clusters_length);
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 5.0, 1, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_02) {
const std::vector<size_t> expected_clusters_length = { 10, 5, 8 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 1.0, 2, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_02_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 10, 5, 8 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 1.0, 2, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_one_allocation_sample_simple_02) {
const std::vector<size_t> expected_clusters_length = { 23 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 5.0, 1, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_one_allocation_sample_simple_02_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 23 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 5.0, 1, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_03) {
const std::vector<size_t> expected_clusters_length = { 10, 10, 10, 30 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_03), 0.7, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_03_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 10, 10, 10, 30 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_03), 0.7, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_04) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15, 15 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_04), 0.7, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_04_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15, 15 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_04), 0.7, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_05) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_05), 0.7, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_05_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_05), 0.7, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_one_dimension) {
const std::vector<size_t> expected_clusters_length = { 10, 10 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_07), 3.0, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_one_dimension_one_allocation) {
const std::vector<size_t> expected_clusters_length = { 20 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_07), 7.0, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_lsun) {
const std::vector<size_t> expected_clusters_length = { 100, 101, 202 };
template_optics_length_process_data(fcps_sample_factory::create_sample(FCPS_SAMPLE::LSUN), 0.5, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_lsun_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 100, 101, 202 };
template_optics_length_process_distance_matrix(fcps_sample_factory::create_sample(FCPS_SAMPLE::LSUN), 0.5, 3, 0, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_02_large_radius) {
const std::vector<size_t> expected_clusters_length = { 10, 5, 8 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 5.0, 2, 3, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_02_large_radius_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 10, 5, 8 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 5.0, 2, 3, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_03_large_radius) {
const std::vector<size_t> expected_clusters_length = { 10, 10, 10, 30 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_03), 7.0, 4, 4, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_03_large_radius_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 10, 10, 10, 30 };
template_optics_length_process_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_03), 7.0, 4, 4, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_04_large_radius) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15, 15 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_04), 50.0, 5, 5, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_simple_05_large_radius) {
const std::vector<size_t> expected_clusters_length = { 15, 15, 15, 15 };
template_optics_length_process_data(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_05), 10.0, 10, 4, expected_clusters_length);
}
#ifndef VALGRIND_ANALYSIS_SHOCK
TEST(utest_optics, allocation_sample_lsun_large_radius_10) {
const std::vector<size_t> expected_clusters_length = { 99, 100, 202 };
template_optics_length_process_data(fcps_sample_factory::create_sample(FCPS_SAMPLE::LSUN), 1.0, 3, 3, expected_clusters_length);
}
TEST(utest_optics, allocation_sample_lsun_large_radius_19) {
const std::vector<size_t> expected_clusters_length = { 99, 100, 202 };
template_optics_length_process_data(fcps_sample_factory::create_sample(FCPS_SAMPLE::LSUN), 1.9, 3, 3, expected_clusters_length);
}
#endif
static std::shared_ptr<optics_data>
template_optics_noise_allocation(const std::shared_ptr<dataset> & p_data,
const double p_radius,
const size_t p_neighbors,
const size_t p_amount_clusters,
const std::vector<size_t> & p_expected_cluster_length,
const std::size_t p_noise_length) {
std::shared_ptr<optics_data> ptr_output_result = template_optics_length_process_data(p_data, p_radius, p_neighbors, p_amount_clusters, p_expected_cluster_length);
EXPECT_EQ(p_noise_length, ptr_output_result->noise().size());
return ptr_output_result;
}
static std::shared_ptr<optics_data>
template_optics_noise_allocation_distance_matrix(const std::shared_ptr<dataset> & p_data,
const double p_radius,
const size_t p_neighbors,
const size_t p_amount_clusters,
const std::vector<size_t> & p_expected_cluster_length,
const std::size_t p_noise_length) {
std::shared_ptr<optics_data> ptr_output_result = template_optics_length_process_distance_matrix(p_data, p_radius, p_neighbors, p_amount_clusters, p_expected_cluster_length);
EXPECT_EQ(p_noise_length, ptr_output_result->noise().size());
return ptr_output_result;
}
TEST(utest_optics, noise_allocation_sample_simple_01) {
const std::vector<size_t> expected_clusters_length = { };
template_optics_noise_allocation(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 10.0, 20, 0, expected_clusters_length, 10);
}
TEST(utest_optics, noise_allocation_sample_simple_01_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { };
template_optics_noise_allocation_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_01), 10.0, 20, 0, expected_clusters_length, 10);
}
TEST(utest_optics, noise_allocation_sample_simple_02) {
const std::vector<size_t> expected_clusters_length = { };
template_optics_noise_allocation(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 0.5, 20, 0, expected_clusters_length, 23);
}
TEST(utest_optics, noise_allocation_sample_simple_02_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { };
template_optics_noise_allocation_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 0.5, 20, 0, expected_clusters_length, 23);
}
TEST(utest_optics, noise_cluster_allocation_sample_simple_02) {
const std::vector<size_t> expected_clusters_length = { 10 };
template_optics_noise_allocation(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 2.0, 9, 0, expected_clusters_length, 13);
}
TEST(utest_optics, noise_cluster_allocation_sample_simple_02_distance_matrix) {
const std::vector<size_t> expected_clusters_length = { 10 };
template_optics_noise_allocation_distance_matrix(simple_sample_factory::create_sample(SAMPLE_SIMPLE::SAMPLE_SIMPLE_02), 2.0, 9, 0, expected_clusters_length, 13);
}
#ifdef UT_PERFORMANCE_SESSION
#include <chrono>
TEST(performance_optics, big_data) {
auto points = simple_sample_factory::create_random_sample(5000, 10);
auto start = std::chrono::system_clock::now();
const std::size_t repeat = 1;
for (std::size_t i = 0; i < repeat; i++) {
optics_data output_result;
optics solver(0.1, 40);
solver.process(*points, output_result);
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> difference = end - start;
std::cout << "Clustering time: '" << difference.count() / repeat << "' sec." << std::endl;
}
TEST(performance_optics, engy_time) {
auto points = fcps_sample_factory::create_sample(FCPS_SAMPLE::ENGY_TIME);
auto start = std::chrono::system_clock::now();
const std::size_t repeat = 30;
for (std::size_t i = 0; i < repeat; i++) {
optics_data output_result;
optics solver(0.2, 20);
solver.process(*points, output_result);
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> difference = end - start;
std::cout << "Clustering time: '" << difference.count() / repeat << "' sec." << std::endl;
}
TEST(performance_optics, atom) {
auto points = fcps_sample_factory::create_sample(FCPS_SAMPLE::ATOM);
auto start = std::chrono::system_clock::now();
const std::size_t repeat = 20;
for (std::size_t i = 0; i < repeat; i++) {
optics_data output_result;
optics solver(15, 3);
solver.process(*points, output_result);
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> difference = end - start;
std::cout << "Clustering time: '" << difference.count() / repeat << "' sec." << std::endl;
}
TEST(performance_optics, chainlink) {
auto points = fcps_sample_factory::create_sample(FCPS_SAMPLE::CHAINLINK);
auto start = std::chrono::system_clock::now();
const std::size_t repeat = 20;
for (std::size_t i = 0; i < repeat; i++) {
optics_data output_result;
optics solver(0.15, 3);
solver.process(*points, output_result);
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> difference = end - start;
std::cout << "Clustering time: '" << difference.count() / repeat << "' sec." << std::endl;
}
#endif
| 6,244 |
1,091 | /*
* Copyright 2020-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.store.service;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicReference;
public class TestAtomicValue<V> implements AsyncAtomicValue<V> {
private AtomicReference<V> ref;
private String name;
TestAtomicValue(String name) {
ref = new AtomicReference<>();
this.name = name;
}
@Override
public CompletableFuture<Boolean> compareAndSet(V expect, V update) {
return CompletableFuture.completedFuture(ref.compareAndSet(expect, update));
}
@Override
public CompletableFuture<V> get() {
return CompletableFuture.completedFuture(ref.get());
}
@Override
public CompletableFuture<V> getAndSet(V value) {
return CompletableFuture.completedFuture(ref.getAndSet(value));
}
@Override
public CompletableFuture<Void> set(V value) {
ref.set(value);
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> addListener(AtomicValueEventListener<V> listener) {
// Unimplemented
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> removeListener(AtomicValueEventListener<V> listener) {
// Unimplemented
return CompletableFuture.completedFuture(null);
}
@Override
public String name() {
return name;
}
public static Builder builder() {
return new Builder();
}
public static class Builder extends AtomicValueBuilder {
@Override
public AsyncAtomicValue build() {
return new TestAtomicValue<>("");
}
}
}
| 799 |
3,083 | /*
Copyright 2014 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.google.security.zynamics.binnavi.API.disassembly;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import com.google.common.collect.Lists;
import com.google.security.zynamics.binnavi.Database.CModuleViewGenerator;
import com.google.security.zynamics.binnavi.Database.Exceptions.CouldntLoadDataException;
import com.google.security.zynamics.binnavi.Database.Exceptions.LoadCancelledException;
import com.google.security.zynamics.binnavi.Database.MockClasses.MockDatabase;
import com.google.security.zynamics.binnavi.Database.MockClasses.MockSqlProvider;
import com.google.security.zynamics.binnavi.Tagging.CTag;
import com.google.security.zynamics.binnavi.Tagging.CTagManager;
import com.google.security.zynamics.binnavi.Tagging.MockTagManager;
import com.google.security.zynamics.binnavi.Tagging.TagType;
import com.google.security.zynamics.binnavi.disassembly.CFunction;
import com.google.security.zynamics.binnavi.disassembly.CFunctionNode;
import com.google.security.zynamics.binnavi.disassembly.CInstruction;
import com.google.security.zynamics.binnavi.disassembly.COperandTree;
import com.google.security.zynamics.binnavi.disassembly.INaviCodeNode;
import com.google.security.zynamics.binnavi.disassembly.INaviEdge;
import com.google.security.zynamics.binnavi.disassembly.INaviInstruction;
import com.google.security.zynamics.binnavi.disassembly.INaviViewNode;
import com.google.security.zynamics.binnavi.disassembly.MockEdge;
import com.google.security.zynamics.binnavi.disassembly.MockTextNode;
import com.google.security.zynamics.binnavi.disassembly.MockView;
import com.google.security.zynamics.binnavi.disassembly.Modules.CModule;
import com.google.security.zynamics.binnavi.disassembly.views.INaviView;
import com.google.security.zynamics.zylib.disassembly.CAddress;
import com.google.security.zynamics.zylib.disassembly.FunctionType;
import com.google.security.zynamics.zylib.disassembly.GraphType;
import com.google.security.zynamics.zylib.types.trees.Tree;
import com.google.security.zynamics.zylib.types.trees.TreeNode;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import java.awt.Color;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
@RunWith(JUnit4.class)
public final class ViewGraphHelpersTest {
private CodeNode m_codeNode;
private FunctionNode m_functionNode;
private TextNode m_textNode;
private ViewGraph m_graph;
private View m_view;
@Before
public void setUp() throws CouldntLoadDataException, LoadCancelledException,
com.google.security.zynamics.binnavi.API.disassembly.CouldntLoadDataException,
PartialLoadException {
final MockSqlProvider provider = new MockSqlProvider();
final TagManager tagManager = new TagManager(new MockTagManager(TagType.NODE_TAG));
final TagManager viewTagManager =
new TagManager(new CTagManager(new Tree<CTag>(new TreeNode<CTag>(new CTag(1, "", "",
TagType.VIEW_TAG, provider))), TagType.VIEW_TAG, provider));
final Database database = new Database(new MockDatabase());
final CModule internalModule =
new CModule(1, "", "", new Date(), new Date(), "00000000000000000000000000000000",
"0000000000000000000000000000000000000000", 0, 0, new CAddress(0), new CAddress(0),
null, null, Integer.MAX_VALUE, false, provider);
internalModule.load();
final Module module = new Module(database, internalModule, tagManager, viewTagManager);
final CModuleViewGenerator generator = new CModuleViewGenerator(provider, internalModule);
final INaviView internalView =
generator.generate(1, "My View", "My View Description",
com.google.security.zynamics.zylib.disassembly.ViewType.NonNative,
GraphType.MIXED_GRAPH, new Date(), new Date(), 1, 2, new HashSet<CTag>(),
new HashSet<CTag>(), false);
m_view = new View(module, internalView, tagManager, viewTagManager);
m_view.load();
final List<INaviInstruction> instructions = new ArrayList<INaviInstruction>();
instructions.add(new CInstruction(false, internalModule, new CAddress(0x123), "nop",
new ArrayList<COperandTree>(), new byte[] {(byte) 0x90}, "x86-32", provider));
instructions.add(new CInstruction(false, internalModule, new CAddress(0x124), "nop",
new ArrayList<COperandTree>(), new byte[] {(byte) 0x90}, "x86-32", provider));
instructions.add(new CInstruction(false, internalModule, new CAddress(0x125), "nop",
new ArrayList<COperandTree>(), new byte[] {(byte) 0x90}, "x86-32", provider));
final INaviCodeNode codeNode = internalView.getContent().createCodeNode(null, instructions);
final List<INaviViewNode> nodes1 = new ArrayList<INaviViewNode>();
nodes1.add(codeNode);
final List<INaviEdge> edges1 = new ArrayList<INaviEdge>();
final CFunction internalFunction = new CFunction(internalModule,
new MockView(nodes1, edges1, provider), new CAddress(0x123), "Mock Function",
"Mock Function", "Mock Description", 0, 0, 0, 0, FunctionType.NORMAL, "", 0, null, null,
null, provider);
internalFunction.load();
final Function function = new Function(module, internalFunction);
final CFunctionNode functionNode =
new CFunctionNode(0, internalFunction, 0, 0, 0, 0, Color.RED, false, false, null,
new HashSet<CTag>(), provider);
m_codeNode = new CodeNode(m_view, codeNode, tagManager);
m_functionNode = new FunctionNode(m_view, functionNode, function, tagManager);
m_textNode = new TextNode(m_view, new MockTextNode(), tagManager);
final List<ViewNode> nodes = Lists.newArrayList(m_codeNode, m_functionNode, m_textNode);
final List<ViewEdge> edges =
Lists.newArrayList(new ViewEdge(new MockEdge(1, provider), nodes.get(0), nodes.get(0)));
m_graph = new ViewGraph(nodes, edges);
}
@Test
public void testGetCodeNode() {
assertEquals(m_codeNode, ViewGraphHelpers.getCodeNode(m_graph, new Address(0x123)));
assertNull(ViewGraphHelpers.getCodeNode(m_graph, new Address(0x124)));
assertEquals(m_codeNode, ViewGraphHelpers.getCodeNode(m_graph, 0x123));
assertNull(ViewGraphHelpers.getCodeNode(m_graph, 0x124));
}
@Test
public void testGetCodeNodes() {
final List<CodeNode> codeNodes = ViewGraphHelpers.getCodeNodes(m_graph);
assertEquals(1, codeNodes.size());
assertEquals(m_codeNode, codeNodes.get(0));
}
@Test
public void testGetFunctionNode() {
assertEquals(m_functionNode, ViewGraphHelpers.getFunctionNode(m_graph, "Mock Function"));
assertNull(ViewGraphHelpers.getFunctionNode(m_graph, "Sock Function"));
}
@Test
public void testGetFunctionNodes() {
final List<FunctionNode> functionNodes = ViewGraphHelpers.getFunctionNodes(m_graph);
assertEquals(1, functionNodes.size());
assertEquals(m_functionNode, functionNodes.get(0));
}
@Test
public void testGetInstruction() {
assertEquals(m_codeNode.getInstructions().get(0),
ViewGraphHelpers.getInstruction(m_graph, new Address(0x123)));
assertNull(ViewGraphHelpers.getInstruction(m_graph, new Address(0x129)));
assertEquals(m_codeNode.getInstructions().get(0),
ViewGraphHelpers.getInstruction(m_graph, 0x123));
assertNull(ViewGraphHelpers.getInstruction(m_graph, 0x129));
}
@Test
public void testInline()
throws com.google.security.zynamics.binnavi.API.disassembly.CouldntLoadDataException,
PartialLoadException {
m_view.load();
final InliningResult result =
ViewGraphHelpers.inlineFunctionCall(m_view, m_codeNode,
m_codeNode.getInstructions().get(1), m_functionNode.getFunction());
assertNotNull(result.getFirstNode());
assertNotNull(result.getSecondNode());
assertEquals(2, result.getFirstNode().getInstructions().size());
assertEquals(1, result.getSecondNode().getInstructions().size());
}
}
| 2,980 |
1,875 | <filename>tools/idea/plugin/src/main/java/org/teavm/idea/ui/TeaVMFacetEditorTab.java
/*
* Copyright 2016 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.teavm.idea.ui;
import com.intellij.facet.ui.FacetEditorTab;
import com.intellij.openapi.module.Module;
import javax.swing.JComponent;
import org.jetbrains.annotations.Nls;
import org.jetbrains.annotations.NotNull;
import org.teavm.idea.jps.model.TeaVMJpsConfiguration;
public class TeaVMFacetEditorTab extends FacetEditorTab {
private TeaVMConfigurable configurable;
public TeaVMFacetEditorTab(Module module, TeaVMJpsConfiguration configuration) {
configurable = new TeaVMConfigurable(module, configuration);
}
@NotNull
@Override
public JComponent createComponent() {
return configurable.createComponent();
}
@Nls
@Override
public String getDisplayName() {
return "General settings";
}
@Override
public boolean isModified() {
return configurable.isModified();
}
@Override
public void apply() {
configurable.apply();
}
@Override
public void disposeUIResources() {
configurable.disposeUIResources();
}
@Override
public void reset() {
configurable.reset();
}
}
| 619 |
2,206 | <reponame>YunLemon/speedment
/*
*
* Copyright (c) 2006-2020, Speedment, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); You may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.speedment.common.rest;
import static org.junit.jupiter.api.Assertions.*;
import com.speedment.common.rest.Option.Type;
import org.junit.jupiter.api.Test;
final class OptionTest {
private final Option header = Header.header("Content-Type", "application/json");
private final Option param = Param.param("key", "value");
private final AbstractOption abstractHeader = (AbstractOption) header;
private final AbstractOption abstractParam = (AbstractOption) param;
@Test
void getType() {
assertEquals(Type.HEADER, header.getType());
assertEquals(Type.PARAM, param.getType());
}
@Test
void getKey() {
assertEquals("Content-Type", header.getKey());
assertEquals("key", param.getKey());
}
@Test
void getValue() {
assertEquals("application/json", header.getValue());
assertEquals("value", param.getValue());
}
@Test
void testHashCode() {
assertNotEquals(0, abstractHeader.hashCode());
assertNotEquals(0, abstractParam.hashCode());
}
@Test
void testToString() {
assertEquals("Content-Type=application/json", abstractHeader.toString());
assertEquals("key=value", abstractParam.toString());
}
@Test
void testEquals() {
final AbstractOption copy = abstractHeader;
assertTrue(abstractHeader.equals(copy));
assertFalse(abstractHeader.equals(null));
assertFalse(abstractHeader.equals(1));
assertFalse(abstractHeader.equals(abstractParam));
final Option sameKey = Header.header(header.getKey(), "text/html");
assertFalse(abstractHeader.equals(sameKey));
final Option sameValue = Header.header("Accepts", header.getValue());
assertFalse(abstractHeader.equals(sameValue));
final Option allDifferent = Header.header("Accepts", "text/html");
assertFalse(abstractHeader.equals(allDifferent));
final Option allSame = Header.header(header.getKey(), header.getValue());
assertTrue(abstractHeader.equals(allSame));
}
}
| 943 |
324 | <filename>apis/s3/src/main/java/org/jclouds/s3/xml/ListMultipartUploadsHandler.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jclouds.s3.xml;
import static org.jclouds.util.SaxUtils.currentOrNull;
import java.util.Date;
import javax.inject.Inject;
import org.jclouds.date.DateService;
import org.jclouds.http.functions.ParseSax;
import org.jclouds.s3.domain.CanonicalUser;
import org.jclouds.s3.domain.ListMultipartUploadsResponse;
import org.jclouds.s3.domain.ObjectMetadata;
import org.xml.sax.Attributes;
import com.google.common.collect.ImmutableList;
public final class ListMultipartUploadsHandler extends ParseSax.HandlerWithResult<ListMultipartUploadsResponse> {
private String bucket;
private String keyMarker;
private String uploadIdMarker;
private String nextKeyMarker;
private String nextUploadIdMarker;
private int maxUploads;
private boolean isTruncated;
private final ImmutableList.Builder<ListMultipartUploadsResponse.Upload> uploads = ImmutableList.builder();
private String key;
private String uploadId;
private String id;
private String displayName;
private CanonicalUser initiator;
private CanonicalUser owner;
private ObjectMetadata.StorageClass storageClass;
private Date initiated;
private final DateService dateParser;
private final StringBuilder currentText = new StringBuilder();
private boolean inUpload;
private boolean inInitiator;
private boolean inOwner;
@Inject
public ListMultipartUploadsHandler(DateService dateParser) {
this.dateParser = dateParser;
}
public ListMultipartUploadsResponse getResult() {
return ListMultipartUploadsResponse.create(bucket, keyMarker, uploadIdMarker, nextKeyMarker, nextUploadIdMarker, maxUploads, isTruncated, uploads.build());
}
public void startElement(String uri, String name, String qName, Attributes attrs) {
if (qName.equals("Upload")) {
inUpload = true;
} else if (qName.equals("Initiator")) {
inInitiator = true;
} else if (qName.equals("Owner")) {
inOwner = true;
}
currentText.setLength(0);
}
public void endElement(String uri, String name, String qName) {
if (qName.equals("Bucket")) {
bucket = currentOrNull(currentText);
} else if (qName.equals("KeyMarker")) {
keyMarker = currentOrNull(currentText);
} else if (qName.equals("UploadIdMarker")) {
uploadIdMarker = currentOrNull(currentText);
} else if (qName.equals("NextKeyMarker")) {
nextKeyMarker = currentOrNull(currentText);
} else if (qName.equals("NextUploadIdMarker")) {
nextUploadIdMarker = currentOrNull(currentText);
} else if (qName.equals("MaxUploads")) {
maxUploads = Integer.parseInt(currentOrNull(currentText));
} else if (qName.equals("IsTruncated")) {
isTruncated = Boolean.parseBoolean(currentOrNull(currentText));
} else if (qName.equals("Key")) {
key = currentOrNull(currentText);
} else if (qName.equals("UploadId")) {
uploadId = currentOrNull(currentText);
} else if (qName.equals("StorageClass")) {
storageClass = ObjectMetadata.StorageClass.valueOf(currentOrNull(currentText));
} else if (qName.equals("Initiated")) {
initiated = dateParser.iso8601DateOrSecondsDateParse(currentOrNull(currentText));
} else if (qName.equals("Upload")) {
uploads.add(ListMultipartUploadsResponse.Upload.create(key, uploadId, initiator, owner, storageClass, initiated));
key = null;
uploadId = null;
id = null;
displayName = null;
initiator = null;
owner = null;
storageClass = null;
initiated = null;
inUpload = false;
} else if (qName.equals("Initiator")) {
initiator = new CanonicalUser(id, displayName);
id = null;
displayName = null;
inInitiator = false;
} else if (qName.equals("Owner")) {
owner = new CanonicalUser(id, displayName);
id = null;
displayName = null;
inOwner = false;
}
}
public void characters(char[] ch, int start, int length) {
currentText.append(ch, start, length);
}
}
| 1,808 |
2,441 | """Tests for distutils.command.bdist."""
import os
import unittest
from test.support import run_unittest
import warnings
from distutils.command.bdist import bdist
from distutils.tests import support
class BuildTestCase(support.TempdirManager,
unittest.TestCase):
def test_formats(self):
# let's create a command and make sure
# we can set the format
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.formats = ['msi']
cmd.ensure_finalized()
self.assertEqual(cmd.formats, ['msi'])
# what formats does bdist offer?
formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
'wininst', 'xztar', 'zip', 'ztar']
found = sorted(cmd.format_command)
self.assertEqual(found, formats)
def test_skip_build(self):
# bug #10946: bdist --skip-build should trickle down to subcommands
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.skip_build = 1
cmd.ensure_finalized()
dist.command_obj['bdist'] = cmd
names = ['bdist_dumb', 'bdist_wininst'] # bdist_rpm does not support --skip-build
if os.name == 'nt':
names.append('bdist_msi')
for name in names:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'bdist_wininst command is deprecated',
DeprecationWarning)
subcmd = cmd.get_finalized_command(name)
if getattr(subcmd, '_unsupported', False):
# command is not supported on this build
continue
self.assertTrue(subcmd.skip_build,
'%s should take --skip-build from bdist' % name)
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| 875 |
1,006 | <filename>boards/arm/sama5/giant-board/include/board_492mhz.h
/****************************************************************************
* boards/arm/sama5/giant-board/include/board_492mhz.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __BOARDS_ARM_SAMA5_GIANT_BOARD_INCLUDE_BOARD_492MHZ_H
#define __BOARDS_ARM_SAMA5_GIANT_BOARD_INCLUDE_BOARD_492MHZ_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Clocking *****************************************************************/
/* After power-on reset, the SAMA5 device is running on a 12MHz internal RC.
* These definitions will configure operational clocking.
*
* This configuration results in a CPU clock of 492MHz:
*
* MAINOSC: Frequency = 24MHz (crystal)
* PLLA: PLL Multiplier = 40+1 to generate PLLACK = 984MHz
* Master Clock (MCK): Source = PLLACK/1, Prescalar = 1, MDIV = 6 to generate
* MCK = 164MHz
* CPU clock = 492MHz
*/
/* Main oscillator register settings.
*
* The start up time should be should be:
* Start Up Time = 8 * MOSCXTST / SLCK = 56 Slow Clock Cycles.
*/
#define BOARD_CKGR_MOR_MOSCXTST (62 << PMC_CKGR_MOR_MOSCXTST_SHIFT) /* Start-up Time */
#define BOARD_CKGR_PLLAR_COUNT (63 << PMC_CKGR_PLLAR_COUNT_SHIFT)
#define BOARD_CKGR_PLLAR_OUT (0)
#define BOARD_CKGR_PLLAR_MUL (40 << PMC_CKGR_PLLAR_MUL_SHIFT)
/* PMC master clock register settings.
*
* Master/Processor Clock Source Selection = PLLA
* Master/Processor Clock Prescaler = 1
* PLLA Divider = 1
* Master Clock Division (MDIV) = 6
*
* NOTE: Bit PLLADIV2 must always be set to 1 when MDIV is set to 3.
*
* Prescaler input = 984MHz / 1 = 984MHz
* Prescaler output = 984MHz / 1 = 984MHz
* Processor Clock (PCK) = 984Mhz / 2 = 492MHz
* Master clock (MCK) = 984MHz / 6 = 164Mhz
*/
#define BOARD_PMC_MCKR_CSS PMC_MCKR_CSS_PLLA
#define BOARD_PMC_MCKR_PRES PMC_MCKR_PRES_DIV1
#define BOARD_PMC_MCKR_PLLADIV PMC_MCKR_PLLADIV1
#define BOARD_PMC_MCKR_MDIV PMC_MCKR_MDIV_PCKDIV4
/* ADC Configuration
*
* ADCClock = MCK / ((PRESCAL+1) * 2)
*
* Given:
* MCK = 164MHz
* ADCClock = 8MHz
* Then:
* PRESCAL = 20.5
*
* PRESCAL=20 and MCK=164MHz yields ADC clock of 8.2MHz
*/
#define BOARD_ADC_PRESCAL (20)
#define BOARD_TSD_STARTUP (40) /* 40 nanoseconds */
#define BOARD_TSD_TRACKTIM (2000) /* Min 1�s at 8MHz */
#define BOARD_TSD_DEBOUNCE (10000000) /* 10 milliseconds (units nanoseconds) */
/* Resulting frequencies */
/* Giant Board
*
* Crystal frequency: 24 MHz
* CPU clock: 492 MHz
* Master clock: 164 MHz
*/
#define BOARD_MAINCK_FREQUENCY BOARD_MAINOSC_FREQUENCY
#define BOARD_PLLA_FREQUENCY (984000000) /* PLLACK: 41 * 24Mhz / 1 */
#define BOARD_PCK_FREQUENCY (492000000) /* CPU: PLLACK / 2 / 1 */
#define BOARD_MCK_FREQUENCY (164000000) /* MCK: PLLACK / 1 / 1 / 3 */
#define BOARD_ADCCLK_FREQUENCY (10250000) /* ADCCLK: MCK / ((7+1)*2) */
/* Clocking to certain peripherals may be MCK/2.
*
* REVISIT: I am not sure why this is. Perhaps because of H32MXDIV?
*/
#define BOARD_PIT_FREQUENCY (BOARD_MCK_FREQUENCY >> 1)
#define BOARD_USART_FREQUENCY (BOARD_MCK_FREQUENCY >> 1)
#define BOARD_FLEXCOM_FREQUENCY (BOARD_MCK_FREQUENCY >> 1)
#if defined(CONFIG_SAMA5_EHCI) || defined(CONFIG_SAMA5_OHCI) || \
defined(CONFIG_SAMA5_UDPHS)
/* The USB Host High Speed requires a 480 MHz clock (UPLLCK) for the embedded
* High-speed transceivers. UPLLCK is the output of the 480 MHz UTMI PLL
* (UPLL). The source clock of the UTMI PLL is the Main OSC output: Either
* the 12MHz internal RC oscillator on a an external 12MHz crystal. The
* Main OSC must be 12MHz because the UPLL has a built-in 40x multiplier.
*
* For High-speed operations, the user has to perform the following:
*
* 1) Enable UHP peripheral clock, bit (1 << AT91C_ID_UHPHS) in
* PMC_PCER register.
* 2) Write CKGR_PLLCOUNT field in PMC_UCKR register.
* 3) Enable UPLL, bit AT91C_CKGR_UPLLEN in PMC_UCKR register.
* 4) Wait until UTMI_PLL is locked. LOCKU bit in PMC_SR register
* 5) Enable BIAS, bit AT91C_CKGR_BIASEN in PMC_UCKR register.
* 6) Select UPLLCK as Input clock of OHCI part, USBS bit in PMC_USB
* register.
* 7) Program the OHCI clocks (UHP48M and UHP12M) with USBDIV field in
* PMC_USB register. USBDIV must be 9 (division by 10) if UPLLCK is
* selected.
* 8) Enable OHCI clocks, UHP bit in PMC_SCER register.
*
* Steps 2 through 7 performed here. 1 and 8 are performed in the EHCI
* driver is initialized.
*/
# define BOARD_USE_UPLL 1 /* Use UPLL for clock source */
# define BOARD_CKGR_UCKR_UPLLCOUNT (15) /* Maximum value */
# define BOARD_CKGR_UCKR_BIASCOUNT (15) /* Maximum value */
# define BOARD_UPLL_OHCI_DIV (10) /* Divide by 10 */
#endif
/* HSMCI clocking
*
* Multimedia Card Interface clock (MCCK or MCI_CK) is Master Clock (MCK)
* divided by (2*(CLKDIV) + CLOCKODD + 2).
*
* MCI_SPEED = MCK / (2*CLKDIV + CLOCKODD + 2)
*
* Where CLKDIV has a range of 0-255.
*/
/* MCK = 132MHz, CLKDIV = 164,
* MCI_SPEED = 132MHz / (2*164 + 0 + 2) = 400 KHz
*/
#define SDMMC_INIT_CLKDIV (164 << SDMMC_MR_CLKDIV_SHIFT)
/* MCK = 132MHz, CLKDIV = 2 w/CLOCKODD,
* MCI_SPEED = 132MHz /(2*2 + 1 + 2) = 18.9 MHz
*/
#define SDMMC_MMCXFR_CLKDIV ((2 << SDMMC_MR_CLKDIV_SHIFT) | SDMMC_MR_CLKODD)
/* MCK = 132MHz, CLKDIV = 2, MCI_SPEED = 132MHz /(2*2 + 0 + 2) = 22 MHz */
#define SDMMC_SDXFR_CLKDIV (2 << SDMMC_MR_CLKDIV_SHIFT)
#define SDMMC_SDWIDEXFR_CLKDIV SDMMC_SDXFR_CLKDIV
/****************************************************************************
* Public Data
****************************************************************************/
#ifndef __ASSEMBLY__
#undef EXTERN
#if defined(__cplusplus)
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#undef EXTERN
#if defined(__cplusplus)
}
#endif
#endif /* !__ASSEMBLY__ */
#endif /* __BOARDS_ARM_SAMA5_GIANT_BOARD_INCLUDE_BOARD_492MHZ_H */
| 2,942 |
2,542 | <gh_stars>1000+
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#if defined(PLATFORM_UNIX)
#include <string>
#include <vector>
#include <algorithm>
#include <sstream>
#include <iostream>
#include <fstream>
#include <random>
#ifdef _WIN32
#define NOMINMAX
#include <Windows.h>
#else
# include <sys/time.h>
#endif
#endif
using namespace Common;
using namespace HttpGateway;
using namespace HttpServer;
void HttpGatewayImpl::OpenAsyncOperation::OnStart(__in AsyncOperationSPtr const& thisSPtr)
{
auto error = owner_.InitializeServer();
if (!error.IsSuccess())
{
TryComplete(thisSPtr, error);
}
#if !defined(PLATFORM_UNIX)
auto timeout = timeoutHelper_.GetRemainingTime();
auto operation = owner_.serviceResolver_->BeginOpen(
timeout,
[this](AsyncOperationSPtr const &operation)
{
this->OnServiceResolverOpenComplete(operation, false);
},
thisSPtr);
OnServiceResolverOpenComplete(operation, true);
#else
auto openOperation = owner_.httpServer_->BeginOpen(
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const &operation)
{
this->OnOpenComplete(operation, false);
},
thisSPtr);
this->OnOpenComplete(openOperation, true);
#endif
}
#if !defined(PLATFORM_UNIX)
void HttpGatewayImpl::OpenAsyncOperation::OnServiceResolverOpenComplete(
AsyncOperationSPtr const &operation,
bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
auto error = owner_.serviceResolver_->EndOpen(operation);
if (!error.IsSuccess())
{
TryComplete(operation->Parent, error);
return;
}
auto openOperation = owner_.httpServer_->BeginOpen(
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const &operation)
{
this->OnOpenComplete(operation, false);
},
operation->Parent);
this->OnOpenComplete(openOperation, true);
}
#endif
void HttpGatewayImpl::OpenAsyncOperation::OnOpenComplete(
AsyncOperationSPtr const &operation,
__in bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously) { return; }
auto error = owner_.httpServer_->EndOpen(operation);
TryComplete(operation->Parent, error);
}
ErrorCode HttpGatewayImpl::OpenAsyncOperation::End(AsyncOperationSPtr const& operation)
{
auto thisPtr = AsyncOperation::End<OpenAsyncOperation>(operation);
return thisPtr->Error;
}
| 954 |
1,127 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.ops.strided_slice import StridedSlice
from openvino.tools.mo.utils.graph import Node
from openvino.tools.mo.utils.ir_reader.extender import Extender
class StridedSlice_extender(Extender):
op = 'StridedSlice'
@staticmethod
def extend(op: Node):
for attr in StridedSlice.get_mask_names():
# We can not use op.has_and_set(attr) here as a condition, because it will return False if begin/end is
# 1D tensor and begin_mask/end_mask is equal to 0
if op.has(attr) and op[attr] != '':
Extender.attr_to_list(op, attr)
else:
assert attr not in ['begin_mask', 'end_mask'],\
'{} is not defined for the node {}'.format(attr, op.soft_get('name', op.id))
op[attr] = int64_array([0])
op.begin_mask = int64_array([1 - i for i in op.begin_mask])
op.end_mask = int64_array([1 - i for i in op.end_mask])
| 489 |
648 | <filename>spec/hl7.fhir.core/3.0.1/package/DataElement-ImplementationGuide.page.kind.json
{"resourceType":"DataElement","id":"ImplementationGuide.page.kind","meta":{"lastUpdated":"2017-04-19T07:44:43.294+10:00"},"url":"http://hl7.org/fhir/DataElement/ImplementationGuide.page.kind","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"id":"ImplementationGuide.page.kind","path":"ImplementationGuide.page.kind","short":"page | example | list | include | directory | dictionary | toc | resource","definition":"The kind of page that this is. Some pages are autogenerated (list, example), and other kinds are of interest so that tools can navigate the user to the page of interest.","min":1,"max":"1","type":[{"code":"code"}],"isSummary":true,"binding":{"extension":[{"url":"http://hl7.org/fhir/StructureDefinition/elementdefinition-bindingName","valueString":"GuidePageKind"}],"strength":"required","description":"The kind of an included page.","valueSetReference":{"reference":"http://hl7.org/fhir/ValueSet/guide-page-kind"}}}]} | 292 |
890 | /*
*
* Copyright 2018 Asylo authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "asylo/crypto/util/trivial_object_util.h"
#include "asylo/identity/platform/sgx/internal/hardware_interface.h"
#include "asylo/identity/platform/sgx/internal/identity_key_management_structs.h"
#include "asylo/identity/platform/sgx/internal/sgx_identity_util_internal.h"
#include "asylo/test/util/status_matchers.h"
namespace asylo {
namespace sgx {
namespace {
using ::testing::Not;
// Verify that VerifyHardwareReport() can verify a hardware report that is
// targeted at the verifying enclave.
TEST(VerifyHardwareReportTest, VerifyHardwareReportSucceedsWhenTargetIsSelf) {
AlignedTargetinfoPtr targetinfo;
SetTargetinfoFromSelfIdentity(targetinfo.get());
AlignedReportdataPtr reportdata;
reportdata->data = TrivialRandomObject<UnsafeBytes<kReportdataSize>>();
Report report;
ASYLO_ASSERT_OK_AND_ASSIGN(
report,
HardwareInterface::CreateDefault()->GetReport(*targetinfo, *reportdata));
ASYLO_ASSERT_OK(VerifyHardwareReport(report));
}
// Verify that VerifyHardwareReport() cannot verify a hardware report that is
// not targeted at the verifying enclave.
TEST(VerifyHardwareReportTest, VerifyHardwareReportFailsWhenTargetIsNotSelf) {
AlignedTargetinfoPtr targetinfo;
*targetinfo = TrivialZeroObject<Targetinfo>();
AlignedReportdataPtr reportdata;
reportdata->data = TrivialRandomObject<UnsafeBytes<kReportdataSize>>();
Report report;
ASYLO_ASSERT_OK_AND_ASSIGN(
report,
HardwareInterface::CreateDefault()->GetReport(*targetinfo, *reportdata));
ASSERT_THAT(VerifyHardwareReport(report), Not(IsOk()));
}
} // namespace
} // namespace sgx
} // namespace asylo
| 717 |
709 | <filename>doc/examples/finalizable/cons.c
#include <stdio.h>
typedef unsigned int uint;
typedef struct Cons {
struct Cons *next;
int value;
} *Cons;
Cons listCons (int n, Cons c) {
Cons res;
res = (Cons) malloc (sizeof(*res));
fprintf (stderr, "0x%08x = listCons (%d)\n", (uint)res, n);
res->next = c;
res->value = n;
return res;
}
Cons listSing (int n) {
Cons res;
res = (Cons) malloc (sizeof(*res));
fprintf (stderr, "0x%08x = listSing (%d)\n", (uint)res, n);
res->next = NULL;
res->value = n;
return res;
}
void listFree (Cons p) {
fprintf (stderr, "listFree (0x%08x)\n", (uint)p);
free (p);
}
int listSum (Cons c) {
int res;
fprintf (stderr, "listSum\n");
res = 0;
for (; c != NULL; c = c->next)
res += c->value;
return res;
}
| 477 |
536 | <filename>graph.c
#define _POSIX_C_SOURCE 200809L
#include <ctype.h>
#include <errno.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include "env.h"
#include "graph.h"
#include "htab.h"
#include "util.h"
static struct hashtable *allnodes;
struct edge *alledges;
static void
delnode(void *p)
{
struct node *n = p;
if (n->shellpath != n->path)
free(n->shellpath);
free(n->use);
free(n->path);
free(n);
}
void
graphinit(void)
{
struct edge *e;
/* delete old nodes and edges in case we rebuilt the manifest */
delhtab(allnodes, delnode);
while (alledges) {
e = alledges;
alledges = e->allnext;
free(e->out);
free(e->in);
free(e);
}
allnodes = mkhtab(1024);
}
struct node *
mknode(struct string *path)
{
void **v;
struct node *n;
struct hashtablekey k;
htabkey(&k, path->s, path->n);
v = htabput(allnodes, &k);
if (*v) {
free(path);
return *v;
}
n = xmalloc(sizeof(*n));
n->path = path;
n->shellpath = NULL;
n->gen = NULL;
n->use = NULL;
n->nuse = 0;
n->mtime = MTIME_UNKNOWN;
n->logmtime = MTIME_MISSING;
n->hash = 0;
n->id = -1;
*v = n;
return n;
}
struct node *
nodeget(const char *path, size_t len)
{
struct hashtablekey k;
if (!len)
len = strlen(path);
htabkey(&k, path, len);
return htabget(allnodes, &k);
}
void
nodestat(struct node *n)
{
struct stat st;
if (stat(n->path->s, &st) < 0) {
if (errno != ENOENT)
fatal("stat %s:", n->path->s);
n->mtime = MTIME_MISSING;
} else {
#ifdef __APPLE__
n->mtime = (int64_t)st.st_mtime * 1000000000 + st.st_mtimensec;
/*
Illumos hides the members of st_mtim when you define _POSIX_C_SOURCE
since it has not been updated to support POSIX.1-2008:
https://www.illumos.org/issues/13327
*/
#elif defined(__sun)
n->mtime = (int64_t)st.st_mtim.__tv_sec * 1000000000 + st.st_mtim.__tv_nsec;
#else
n->mtime = (int64_t)st.st_mtim.tv_sec * 1000000000 + st.st_mtim.tv_nsec;
#endif
}
}
struct string *
nodepath(struct node *n, bool escape)
{
char *s, *d;
int nquote;
if (!escape)
return n->path;
if (n->shellpath)
return n->shellpath;
escape = false;
nquote = 0;
for (s = n->path->s; *s; ++s) {
if (!isalnum(*s) && !strchr("_+-./", *s))
escape = true;
if (*s == '\'')
++nquote;
}
if (escape) {
n->shellpath = mkstr(n->path->n + 2 + 3 * nquote);
d = n->shellpath->s;
*d++ = '\'';
for (s = n->path->s; *s; ++s) {
*d++ = *s;
if (*s == '\'') {
*d++ = '\\';
*d++ = '\'';
*d++ = '\'';
}
}
*d++ = '\'';
} else {
n->shellpath = n->path;
}
return n->shellpath;
}
void
nodeuse(struct node *n, struct edge *e)
{
/* allocate in powers of two */
if (!(n->nuse & (n->nuse - 1)))
n->use = xreallocarray(n->use, n->nuse ? n->nuse * 2 : 1, sizeof(e));
n->use[n->nuse++] = e;
}
struct edge *
mkedge(struct environment *parent)
{
struct edge *e;
e = xmalloc(sizeof(*e));
e->env = mkenv(parent);
e->pool = NULL;
e->out = NULL;
e->nout = 0;
e->in = NULL;
e->nin = 0;
e->flags = 0;
e->allnext = alledges;
alledges = e;
return e;
}
void
edgehash(struct edge *e)
{
static const char sep[] = ";rspfile=";
struct string *cmd, *rsp, *s;
if (e->flags & FLAG_HASH)
return;
e->flags |= FLAG_HASH;
cmd = edgevar(e, "command", true);
if (!cmd)
fatal("rule '%s' has no command", e->rule->name);
rsp = edgevar(e, "rspfile_content", true);
if (rsp && rsp->n > 0) {
s = mkstr(cmd->n + sizeof(sep) - 1 + rsp->n);
memcpy(s->s, cmd->s, cmd->n);
memcpy(s->s + cmd->n, sep, sizeof(sep) - 1);
memcpy(s->s + cmd->n + sizeof(sep) - 1, rsp->s, rsp->n);
s->s[s->n] = '\0';
e->hash = murmurhash64a(s->s, s->n);
free(s);
} else {
e->hash = murmurhash64a(cmd->s, cmd->n);
}
}
static struct edge *
mkphony(struct node *n)
{
struct edge *e;
e = mkedge(rootenv);
e->rule = &phonyrule;
e->inimpidx = 0;
e->inorderidx = 0;
e->outimpidx = 1;
e->nout = 1;
e->out = xmalloc(sizeof(n));
e->out[0] = n;
return e;
}
void
edgeadddeps(struct edge *e, struct node **deps, size_t ndeps)
{
struct node **order, *n;
size_t norder, i;
for (i = 0; i < ndeps; ++i) {
n = deps[i];
if (!n->gen)
n->gen = mkphony(n);
nodeuse(n, e);
}
e->in = xreallocarray(e->in, e->nin + ndeps, sizeof(e->in[0]));
order = e->in + e->inorderidx;
norder = e->nin - e->inorderidx;
memmove(order + ndeps, order, norder * sizeof(e->in[0]));
memcpy(order, deps, ndeps * sizeof(e->in[0]));
e->inorderidx += ndeps;
e->nin += ndeps;
}
| 2,135 |
675 | /****************************************************************************
**
** Copyright (C) 2016 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
** This file is part of the QtCore module of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 3 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL3 included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 3 requirements
** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 2.0 or (at your option) the GNU General
** Public license version 3 or any later version approved by the KDE Free
** Qt Foundation. The licenses are as published by the Free Software
** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
** included in the packaging of this file. Please review the following
** information to ensure the GNU General Public License requirements will
** be met: https://www.gnu.org/licenses/gpl-2.0.html and
** https://www.gnu.org/licenses/gpl-3.0.html.
**
** $QT_END_LICENSE$
**
****************************************************************************/
#ifndef QSTATE_H
#define QSTATE_H
#include <QtCore/qabstractstate.h>
#include <QtCore/qlist.h>
#include <QtCore/qmetaobject.h>
QT_REQUIRE_CONFIG(statemachine);
QT_BEGIN_NAMESPACE
class QAbstractTransition;
class QSignalTransition;
class QStatePrivate;
class Q_CORE_EXPORT QState : public QAbstractState
{
Q_OBJECT
Q_PROPERTY(QAbstractState* initialState READ initialState WRITE setInitialState NOTIFY initialStateChanged)
Q_PROPERTY(QAbstractState* errorState READ errorState WRITE setErrorState NOTIFY errorStateChanged)
Q_PROPERTY(ChildMode childMode READ childMode WRITE setChildMode NOTIFY childModeChanged)
public:
enum ChildMode {
ExclusiveStates,
ParallelStates
};
Q_ENUM(ChildMode)
enum RestorePolicy {
DontRestoreProperties,
RestoreProperties
};
Q_ENUM(RestorePolicy)
QState(QState *parent = nullptr);
QState(ChildMode childMode, QState *parent = nullptr);
~QState();
QAbstractState *errorState() const;
void setErrorState(QAbstractState *state);
void addTransition(QAbstractTransition *transition);
QSignalTransition *addTransition(const QObject *sender, const char *signal, QAbstractState *target);
#ifdef Q_QDOC
template<typename PointerToMemberFunction>
QSignalTransition *addTransition(const QObject *sender, PointerToMemberFunction signal,
QAbstractState *target);
#else
template <typename Func>
QSignalTransition *addTransition(const typename QtPrivate::FunctionPointer<Func>::Object *obj,
Func signal, QAbstractState *target)
{
const QMetaMethod signalMetaMethod = QMetaMethod::fromSignal(signal);
return addTransition(obj, signalMetaMethod.methodSignature().constData(), target);
}
#endif // Q_QDOC
QAbstractTransition *addTransition(QAbstractState *target);
void removeTransition(QAbstractTransition *transition);
QList<QAbstractTransition*> transitions() const;
QAbstractState *initialState() const;
void setInitialState(QAbstractState *state);
ChildMode childMode() const;
void setChildMode(ChildMode mode);
#ifndef QT_NO_PROPERTIES
void assignProperty(QObject *object, const char *name,
const QVariant &value);
#endif
Q_SIGNALS:
void finished(QPrivateSignal);
void propertiesAssigned(QPrivateSignal);
void childModeChanged(QPrivateSignal);
void initialStateChanged(QPrivateSignal);
void errorStateChanged(QPrivateSignal);
protected:
void onEntry(QEvent *event) override;
void onExit(QEvent *event) override;
bool event(QEvent *e) override;
protected:
QState(QStatePrivate &dd, QState *parent);
private:
Q_DISABLE_COPY(QState)
Q_DECLARE_PRIVATE(QState)
};
QT_END_NAMESPACE
#endif
| 1,516 |
407 | <reponame>iuskye/SREWorks
package com.alibaba.tesla.appmanager.server.repository;
import com.alibaba.tesla.appmanager.server.repository.condition.ProductQueryCondition;
import com.alibaba.tesla.appmanager.server.repository.domain.ProductDO;
import java.util.List;
public interface ProductRepository {
long countByCondition(ProductQueryCondition condition);
int deleteByCondition(ProductQueryCondition condition);
int insert(ProductDO record);
List<ProductDO> selectByCondition(ProductQueryCondition condition);
ProductDO getByCondition(ProductQueryCondition condition);
int updateByCondition(ProductDO record, ProductQueryCondition condition);
} | 190 |
310 | /*
* Jitsi, the OpenSource Java VoIP and Instant Messaging client.
*
* Copyright @ 2015 Atlassian Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "org_jitsi_impl_neomedia_jmfext_media_renderer_video_JAWTRenderer.h"
#include "JAWTRenderer.h"
JNIEXPORT void JNICALL
Java_org_jitsi_impl_neomedia_jmfext_media_renderer_video_JAWTRenderer_close
(JNIEnv *env, jclass clazz, jlong handle, jobject component)
{
JAWTRenderer_close(env, clazz, handle, component);
}
JNIEXPORT jlong JNICALL
Java_org_jitsi_impl_neomedia_jmfext_media_renderer_video_JAWTRenderer_open
(JNIEnv *env, jclass clazz, jobject component)
{
return JAWTRenderer_open(env, clazz, component);
}
JNIEXPORT jboolean JNICALL
Java_org_jitsi_impl_neomedia_jmfext_media_renderer_video_JAWTRenderer_paint
(JNIEnv *env, jclass clazz, jlong handle, jobject component, jobject g,
jint zOrder)
{
#ifdef __ANDROID__
return JAWTRenderer_paint(0, NULL, clazz, handle, g, zOrder);
#else /* #ifdef __ANDROID__ */
JAWT awt;
jboolean awtIsAvailable;
jboolean wantsPaint;
awt.version = JAWT_VERSION_1_4;
#ifdef __APPLE__
#ifndef JAWT_MACOSX_USE_CALAYER
#define JAWT_MACOSX_USE_CALAYER 0x80000000
#endif /* #ifndef JAWT_MACOSX_USE_CALAYER */
awt.version |= JAWT_MACOSX_USE_CALAYER;
awtIsAvailable = JAWT_GetAWT(env, &awt);
/*
* We do not know whether JAWT_GetAWT will fail when JAWT_MACOSX_USE_CALAYER
* is specified and not supported or it will rather remove the flag from the
* version field of JAWT. That's why we will call the function in question
* again in case of failure with the flag removed.
*/
if (JNI_FALSE == awtIsAvailable)
{
awt.version &= ~JAWT_MACOSX_USE_CALAYER;
awtIsAvailable = JAWT_GetAWT(env, &awt);
}
#else /* #ifdef __APPLE__ */
awtIsAvailable = JAWT_GetAWT(env, &awt);
#endif /* #ifdef __APPLE__ */
wantsPaint = JNI_TRUE;
if (JNI_TRUE == awtIsAvailable)
{
JAWT_DrawingSurface *ds;
ds = awt.GetDrawingSurface(env, component);
if (ds)
{
jint dsLock;
dsLock = ds->Lock(ds);
if (0 == (dsLock & JAWT_LOCK_ERROR))
{
JAWT_DrawingSurfaceInfo *dsi;
dsi = ds->GetDrawingSurfaceInfo(ds);
if (dsi && dsi->platformInfo)
{
/*
* The function arguments env and component are now
* available as the fields env and target, respectively, of
* the JAWT_DrawingSurface which is itself the value of the
* field ds of the JAWT_DrawingSurfaceInfo.
*/
wantsPaint
= JAWTRenderer_paint(
awt.version,
dsi,
clazz,
handle,
g,
zOrder);
ds->FreeDrawingSurfaceInfo(dsi);
}
ds->Unlock(ds);
}
awt.FreeDrawingSurface(ds);
}
}
return wantsPaint;
#endif /* #ifdef __ANDROID__ */
}
JNIEXPORT jboolean JNICALL
Java_org_jitsi_impl_neomedia_jmfext_media_renderer_video_JAWTRenderer_process
(JNIEnv *env, jclass clazz, jlong handle, jobject component, jintArray data,
jint offset, jint length, jint width, jint height)
{
jint *dataPtr;
jboolean processed;
dataPtr = (*env)->GetPrimitiveArrayCritical(env, data, NULL);
if (dataPtr)
{
processed
= JAWTRenderer_process(
env, clazz,
handle, component,
dataPtr + offset, length,
width, height);
(*env)->ReleasePrimitiveArrayCritical(
env,
data, dataPtr,
JNI_ABORT);
}
else
processed = JNI_FALSE;
return processed;
}
JNIEXPORT jstring JNICALL
Java_org_jitsi_impl_neomedia_jmfext_media_renderer_video_JAWTRenderer_sysctlbyname
(JNIEnv *env, jclass clazz, jstring name)
{
#ifdef __APPLE__
return JAWTRenderer_sysctlbyname(env, name);
#else /* #ifdef __APPLE__ */
return NULL;
#endif /* #ifdef __APPLE__ */
}
| 2,354 |
6,390 | """
Duplicates filter middleware for autoscraping
"""
from scrapy.exceptions import NotConfigured
from scrapy.exceptions import DropItem
from slybot.item import create_item_version
class DupeFilterPipeline(object):
def __init__(self, settings):
if not settings.getbool('SLYDUPEFILTER_ENABLED'):
raise NotConfigured
self._itemversion_cache = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_item(self, item, spider):
"""Checks whether a scrapy item is a dupe, based on version (not vary)
fields of the item class"""
if (not hasattr(item, 'version_fields') or not item.version_fields or
item.get('_type') != getattr(item, '_display_name', 0)):
return item
version = create_item_version(item)
if version in self._itemversion_cache:
old_url = self._itemversion_cache[version]
raise DropItem("Duplicate product scraped at <%s>, first one was "
"scraped at <%s>" % (item["url"], old_url))
self._itemversion_cache[version] = item["url"]
return item
| 481 |
6,036 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/graph/graph_viewer.h"
#include <unordered_set>
#include <vector>
namespace onnxruntime {
namespace nuphar {
// A generic Partition data struct for Partitioner
struct PartitionMeta {
std::vector<NodeIndex> nodes; // a list of NodeIndex to represent Nodes in this Partition
std::unordered_set<std::string> frontier_node_args; // a set of string to repsent frontier NodeArgs in this Partition
std::unordered_set<std::string> rejected_frontiner_node_args; // a set of string to repsent rejected frontier NodeArgs in this Partition
std::unordered_set<NodeIndex> predecessor_partitions; // a set of NodeIndex to represent predecessor Partitions of this Partition
std::unordered_set<NodeIndex> immediate_predecessor_partitions; // a set of NodeIndex to represent immediate predecessor Partitions of this Partition
int cost; // a cost of this Partition. It can be used to guide customized partitioning
PartitionMeta() {}
PartitionMeta(NodeIndex node) {
nodes.push_back(node);
}
inline NodeIndex Id() {
//Use the first NodeIndex as the id for PartitionMeta
return nodes.front();
}
};
// Base class of Partitioner.
// Partitioner is used for GraphPartition to generate a FuseNode in Ort for the nuphar provider.
// OR used for SubgraphPartition to generate subgraph Function within FuseNode in nuphar itself.
class Partitioner {
public:
Partitioner() {}
virtual ~Partitioner() = default;
// Main function to perform partiton
Status Evaluate(const onnxruntime::GraphViewer& graph, bool distinguish_subgraph);
protected:
// Check whether a Node is included
virtual bool IsNodeSupported(const Node& node) const = 0;
// Force a Partition.
// It returns false to perform default merge process.
// Returning true avoid performing default process.
// The customized process need to be implmented within this function
virtual bool ForcePartition(
const onnxruntime::GraphViewer& /*graph*/,
const Node& /*node*/,
const std::vector<NodeIndex>& /*candidate_partitions*/,
const std::vector<NodeIndex>& /*immedidate_rejected_partitions*/) {
return false;
}
// Cost Function interface to exstimate Cost of a PartitionMeta.
// It can be used to trigger FocePartition or any other process.
virtual int Cost(const Node&, const std::vector<NodeIndex>&) const { return 0; };
// Update PartitonMeta to include a node
void UpdateFrontiers(PartitionMeta& part_meta, const Node& node);
void UpdatePredecessors(PartitionMeta& part_meta, const NodeIndex& node_id);
// Merge at least two Partitions when they are connected by a node
void MergePartitions(const Node& node,
const std::vector<NodeIndex>& candidates,
const std::vector<NodeIndex>& rejected_partitions);
std::map<NodeIndex, PartitionMeta> partitions_;
private:
void RejectNode(
const onnxruntime::GraphViewer& graph,
const NodeIndex& node_idx);
void AcceptNode(
const onnxruntime::GraphViewer& graph,
const NodeIndex& node_idx);
virtual void HandleSubgraph(const onnxruntime::GraphViewer&) {}
protected:
virtual void CreateNewPartition(const Node& node, const std::vector<NodeIndex>& immedidate_rejected_partitions);
private:
ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Partitioner);
};
} // namespace nuphar
} // namespace onnxruntime
| 1,138 |
679 | <filename>main/sd/source/ui/slidesorter/cache/SlsCacheConfiguration.cxx
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_sd.hxx"
#include "SlsCacheConfiguration.hxx"
#include <vos/mutex.hxx>
#include <vcl/svapp.hxx>
#include <comphelper/processfactory.hxx>
#include <com/sun/star/lang/XMultiServiceFactory.hpp>
#include <com/sun/star/container/XHierarchicalNameAccess.hpp>
#ifndef _COM_SUN_STAR_CONTAINER_PROPERTYVALUE_HPP_
#include <com/sun/star/beans/PropertyValue.hpp>
#endif
using namespace ::com::sun::star;
using namespace ::com::sun::star::uno;
namespace sd { namespace slidesorter { namespace cache {
::boost::shared_ptr<CacheConfiguration> CacheConfiguration::mpInstance;
::boost::weak_ptr<CacheConfiguration> CacheConfiguration::mpWeakInstance;
Timer CacheConfiguration::maReleaseTimer;
::boost::shared_ptr<CacheConfiguration> CacheConfiguration::Instance (void)
{
::vos::OGuard aSolarGuard (Application::GetSolarMutex());
if (mpInstance.get() == NULL)
{
// Maybe somebody else kept a previously created instance alive.
if ( ! mpWeakInstance.expired())
mpInstance = ::boost::shared_ptr<CacheConfiguration>(mpWeakInstance);
if (mpInstance.get() == NULL)
{
// We have to create a new instance.
mpInstance.reset(new CacheConfiguration());
mpWeakInstance = mpInstance;
// Prepare to release this instance in the near future.
maReleaseTimer.SetTimeoutHdl(
LINK(mpInstance.get(),CacheConfiguration,TimerCallback));
maReleaseTimer.SetTimeout(5000 /* 5s */);
maReleaseTimer.Start();
}
}
return mpInstance;
}
CacheConfiguration::CacheConfiguration (void)
{
// Get the cache size from configuration.
const ::rtl::OUString sConfigurationProviderServiceName(
RTL_CONSTASCII_USTRINGPARAM(
"com.sun.star.configuration.ConfigurationProvider"));
const ::rtl::OUString sPathToImpressConfigurationRoot(
RTL_CONSTASCII_USTRINGPARAM("/org.openoffice.Office.Impress/"));
const ::rtl::OUString sPathToNode(
RTL_CONSTASCII_USTRINGPARAM(
"MultiPaneGUI/SlideSorter/PreviewCache"));
try
{
do
{
// Obtain access to the configuration.
Reference<lang::XMultiServiceFactory> xProvider (
::comphelper::getProcessServiceFactory()->createInstance(
sConfigurationProviderServiceName),
UNO_QUERY);
if ( ! xProvider.is())
break;
// Obtain access to Impress configuration.
Sequence<Any> aCreationArguments(3);
aCreationArguments[0] = makeAny(beans::PropertyValue(
::rtl::OUString(
RTL_CONSTASCII_USTRINGPARAM("nodepath")),
0,
makeAny(sPathToImpressConfigurationRoot),
beans::PropertyState_DIRECT_VALUE));
aCreationArguments[1] = makeAny(beans::PropertyValue(
::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM("depth")),
0,
makeAny((sal_Int32)-1),
beans::PropertyState_DIRECT_VALUE));
aCreationArguments[2] = makeAny(beans::PropertyValue(
::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM("lazywrite")),
0,
makeAny(true),
beans::PropertyState_DIRECT_VALUE));
::rtl::OUString sAccessService (::rtl::OUString(RTL_CONSTASCII_USTRINGPARAM(
"com.sun.star.configuration.ConfigurationAccess")));
Reference<XInterface> xRoot (xProvider->createInstanceWithArguments(
sAccessService, aCreationArguments));
if ( ! xRoot.is())
break;
Reference<container::XHierarchicalNameAccess> xHierarchy (xRoot, UNO_QUERY);
if ( ! xHierarchy.is())
break;
// Get the node for the slide sorter preview cache.
mxCacheNode = Reference<container::XNameAccess>(
xHierarchy->getByHierarchicalName(sPathToNode),
UNO_QUERY);
}
while (false);
}
catch (RuntimeException aException)
{
(void)aException;
}
catch (Exception aException)
{
(void)aException;
}
}
Any CacheConfiguration::GetValue (const ::rtl::OUString& rName)
{
Any aResult;
if (mxCacheNode != NULL)
{
try
{
aResult = mxCacheNode->getByName(rName);
}
catch (Exception aException)
{
(void)aException;
}
}
return aResult;
}
IMPL_LINK(CacheConfiguration,TimerCallback, Timer*,EMPTYARG)
{
// Release out reference to the instance.
mpInstance.reset();
return 0;
}
} } } // end of namespace ::sd::slidesorter::cache
| 2,431 |
852 | #ifndef RecoLocalCalo_EcalRecProducers_plugins_EigenMatrixTypes_gpu_h
#define RecoLocalCalo_EcalRecProducers_plugins_EigenMatrixTypes_gpu_h
#include <array>
#include <Eigen/Dense>
#include "CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h"
namespace ecal {
namespace multifit {
constexpr int SampleVectorSize = 10;
constexpr int FullSampleVectorSize = 19;
constexpr int PulseVectorSize = 12;
constexpr int NGains = 3;
using data_type = ::ecal::reco::ComputationScalarType;
typedef Eigen::Matrix<data_type, SampleVectorSize, SampleVectorSize> PulseMatrixType;
typedef Eigen::Matrix<char, SampleVectorSize, 1> BXVectorType;
using SampleMatrixD = Eigen::Matrix<double, SampleVectorSize, SampleVectorSize>;
typedef Eigen::Matrix<data_type, SampleVectorSize, 1> SampleVector;
typedef Eigen::Matrix<data_type, FullSampleVectorSize, 1> FullSampleVector;
typedef Eigen::Matrix<data_type, Eigen::Dynamic, 1, 0, PulseVectorSize, 1> PulseVector;
typedef Eigen::Matrix<char, Eigen::Dynamic, 1, 0, PulseVectorSize, 1> BXVector;
typedef Eigen::Matrix<char, SampleVectorSize, 1> SampleGainVector;
typedef Eigen::Matrix<data_type, SampleVectorSize, SampleVectorSize> SampleMatrix;
typedef Eigen::Matrix<data_type, FullSampleVectorSize, FullSampleVectorSize> FullSampleMatrix;
typedef Eigen::Matrix<data_type, Eigen::Dynamic, Eigen::Dynamic, 0, PulseVectorSize, PulseVectorSize> PulseMatrix;
typedef Eigen::Matrix<data_type, SampleVectorSize, Eigen::Dynamic, 0, SampleVectorSize, PulseVectorSize>
SamplePulseMatrix;
typedef Eigen::LLT<SampleMatrix> SampleDecompLLT;
typedef Eigen::LLT<SampleMatrixD> SampleDecompLLTD;
typedef Eigen::LLT<PulseMatrix> PulseDecompLLT;
typedef Eigen::LDLT<PulseMatrix> PulseDecompLDLT;
typedef Eigen::Matrix<data_type, 1, 1> SingleMatrix;
typedef Eigen::Matrix<data_type, 1, 1> SingleVector;
typedef std::array<SampleMatrixD, NGains> SampleMatrixGainArray;
using PermutationMatrix = Eigen::PermutationMatrix<SampleMatrix::RowsAtCompileTime>;
} // namespace multifit
} // namespace ecal
#endif // RecoLocalCalo_EcalRecProducers_plugins_EigenMatrixTypes_gpu_h
| 767 |
1,168 | <filename>kythe/cxx/verifier/assertions_to_souffle.h
/*
* Copyright 2021 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef KYTHE_CXX_VERIFIER_ASSERTIONS_TO_SOUFFLE_
#define KYTHE_CXX_VERIFIER_ASSERTIONS_TO_SOUFFLE_
#include <string>
#include "kythe/cxx/verifier/assertion_ast.h"
namespace kythe::verifier {
/// \brief Turns `goal_groups` into a Souffle program.
/// \param symbol_table the symbol table used by `goal_groups`.
/// \param goal_groups the goal groups to lower.
std::string LowerGoalsToSouffle(const SymbolTable& symbol_table,
const std::vector<GoalGroup>& goal_groups);
} // namespace kythe::verifier
#endif // defined(KYTHE_CXX_VERIFIER_ASSERTIONS_TO_SOUFFLE_)
| 428 |
839 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.wsdl;
import javax.xml.namespace.QName;
public final class WSDLConstants {
public static final String WSDL_PREFIX = "wsdl";
public static final String NS_WSDL11 = "http://schemas.xmlsoap.org/wsdl/";
public static final String NP_XMLNS = "xmlns";
public static final String NS_XMLNS = "http://www.w3.org/2000/xmlns/";
// XML Schema (CR) datatypes + structures
public static final String NP_SCHEMA_XSD = "xsd";
public static final String NS_SCHEMA_XSD = "http://www.w3.org/2001/XMLSchema";
public static final QName QNAME_SCHEMA = new QName(NS_SCHEMA_XSD, "schema");
public static final QName QNAME_SCHEMA_IMPORT = new QName(NS_SCHEMA_XSD, "import");
public static final QName QNAME_SCHEMA_INCLUDE = new QName(NS_SCHEMA_XSD, "include");
// XML Schema instance
public static final String NP_SCHEMA_XSI = "xsi";
public static final String NS_SCHEMA_XSI = "http://www.w3.org/2001/XMLSchema-instance";
public static final String A_XSI_TYPE = "type";
public static final String A_XSI_NIL = "nil";
// XML Schema attribute names
public static final QName NA_XSI_TYPE = new QName(NP_SCHEMA_XSI, A_XSI_TYPE, NS_SCHEMA_XSI);
public static final QName NA_XSI_NIL = new QName(NP_SCHEMA_XSI, A_XSI_NIL, NS_SCHEMA_XSI);
public static final String NS_SOAP = "http://schemas.xmlsoap.org/wsdl/soap/";
public static final String NS_SOAP11 = NS_SOAP;
public static final String NS_SOAP12 = "http://schemas.xmlsoap.org/wsdl/soap12/";
public static final String SOAP11_PREFIX = "soap";
public static final String SOAP12_PREFIX = "soap12";
public static final String NS_SOAP_HTTP_TRANSPORT = "http://schemas.xmlsoap.org/soap/http";
public static final String NS_SOAP11_HTTP_TRANSPORT = "http://schemas.xmlsoap.org/soap/http";
public static final QName QNAME_SOAP_BINDING = new QName(NS_SOAP, "binding");
public static final QName QNAME_SOAP_OPERATION = new QName(NS_SOAP, "operation");
public static final QName QNAME_SOAP_BODY = new QName(NS_SOAP, "body");
public static final QName QNAME_SOAP_FAULT = new QName(NS_SOAP, "fault");
public static final QName QNAME_SOAP_BINDING_ADDRESS = new QName(NS_SOAP, "address");
public static final String NS_SOAP12_HTTP_BINDING = "http://www.w3.org/2003/05/soap/bindings/HTTP/";
public static final QName QNAME_SOAP12_BINDING = new QName(NS_SOAP12, "binding");
public static final QName QNAME_SOAP12_BINDING_ADDRESS = new QName(NS_SOAP12, "address");
public static final String DOCUMENT = "document";
public static final String RPC = "rpc";
public static final String LITERAL = "literal";
public static final String REPLACE_WITH_ACTUAL_URL = "REPLACE_WITH_ACTUAL_URL";
public static final String JMS_PREFIX = "jms";
public static final String TNS_PREFIX = "tns";
// WSDL 1.1 definitions
public static final QName QNAME_BINDING = new QName(NS_WSDL11, "binding");
public static final QName QNAME_DEFINITIONS = new QName(NS_WSDL11, "definitions");
public static final QName QNAME_DOCUMENTATION = new QName(NS_WSDL11, "documentation");
public static final QName QNAME_IMPORT = new QName(NS_WSDL11, "import");
public static final QName QNAME_MESSAGE = new QName(NS_WSDL11, "message");
public static final QName QNAME_PART = new QName(NS_WSDL11, "part");
public static final QName QNAME_OPERATION = new QName(NS_WSDL11, "operation");
public static final QName QNAME_INPUT = new QName(NS_WSDL11, "input");
public static final QName QNAME_OUTPUT = new QName(NS_WSDL11, "output");
public static final QName QNAME_PORT = new QName(NS_WSDL11, "port");
public static final QName QNAME_ADDRESS = new QName(NS_WSDL11, "address");
public static final QName QNAME_PORT_TYPE = new QName(NS_WSDL11, "portType");
public static final QName QNAME_FAULT = new QName(NS_WSDL11, "fault");
public static final QName QNAME_SERVICE = new QName(NS_WSDL11, "service");
public static final QName QNAME_TYPES = new QName(NS_WSDL11, "types");
// WSDL Validation
public static final String ATTR_PART_ELEMENT = "element";
public static final String ATTR_PART_TYPE = "type";
public static final String ATTR_TYPE = "type";
public static final int DOC_WRAPPED = 1;
public static final int DOC_BARE = 2;
public static final int RPC_WRAPPED = 3;
public static final int ERORR_STYLE_USE = -1;
public static final String NS_BINDING_XML = "http://cxf.apache.org/bindings/xformat";
public static final QName QNAME_XMLHTTP_BINDING_ADDRESS =
new QName("http://schemas.xmlsoap.org/wsdl/http/", "address");
public static final String ATTR_TRANSPORT = "transport";
public static final String ATTR_LOCATION = "location";
public static final String ATTR_NAME = "name";
public static final String ATTR_NAMESPACE = "namespace";
public static final String ATTR_TNS = "targetNamespace";
// usual prefix for the targetNamespace.
public static final String CONVENTIONAL_TNS_PREFIX = "tns";
public static final String WSDL11 = "1.1";
public static final String WSDL20 = "2.0";
public enum WSDLVersion {
WSDL11,
WSDL20,
UNKNOWN
};
private WSDLConstants() {
}
public static WSDLVersion getVersion(String version) {
if (WSDL11.equals(version)) {
return WSDLVersion.WSDL11;
}
if (WSDL20.equals(version)) {
return WSDLVersion.WSDL20;
}
return WSDLVersion.UNKNOWN;
}
}
| 2,366 |
416 | <filename>include/visionaray/texture/detail/storage_types/bricked_accessor.h
// This file is distributed under the MIT license.
// See the LICENSE file for details.
#pragma once
#ifndef VSNRAY_TEXTURE_DETAIL_STORAGE_TYPES_BRICKED_ACCESSOR_H
#define VSNRAY_TEXTURE_DETAIL_STORAGE_TYPES_BRICKED_ACCESSOR_H 1
#include <algorithm>
#include <array>
#include <cstddef>
#include <type_traits>
#include <visionaray/math/detail/math.h>
#include <visionaray/math/simd/gather.h>
#include <visionaray/math/simd/type_traits.h>
#include <visionaray/aligned_vector.h>
#include <visionaray/pixel_format.h>
#include <visionaray/swizzle.h>
namespace visionaray
{
//-------------------------------------------------------------------------------------------------
// Simple linear storage type. Data is aligned to allow for SIMD access
//
template <typename T>
class bricked_accessor
{
public:
using value_type = T;
static constexpr unsigned BW = 4;
static constexpr unsigned BH = 4;
static constexpr unsigned BD = 4;
public:
bricked_accessor() = default;
explicit bricked_accessor(std::array<unsigned, 3> size)
: data_(nullptr)
, size_(size)
{
rounded_size_[0] = div_up(size_[0], BW) * BW;
rounded_size_[1] = div_up(size_[1], BH) * BH;
rounded_size_[2] = div_up(size_[2], BD) * BD;
num_bricks_[0] = div_up(size_[0], BW);
num_bricks_[1] = div_up(size_[1], BH);
num_bricks_[2] = div_up(size_[2], BD);
}
explicit bricked_accessor(T const* data, std::array<unsigned, 3> size)
: data_(data)
, size_(size)
{
rounded_size_[0] = div_up(size_[0], BW) * BW;
rounded_size_[1] = div_up(size_[1], BH) * BH;
rounded_size_[2] = div_up(size_[2], BD) * BD;
num_bricks_[0] = div_up(size_[0], BW);
num_bricks_[1] = div_up(size_[1], BH);
num_bricks_[2] = div_up(size_[2], BD);
}
explicit bricked_accessor(unsigned w, unsigned h, unsigned d)
{
size_[0] = w;
size_[1] = h;
size_[2] = d;
rounded_size_[0] = div_up(size_[0], BW) * BW;
rounded_size_[1] = div_up(size_[1], BH) * BH;
rounded_size_[2] = div_up(size_[2], BD) * BD;
num_bricks_[0] = div_up(size_[0], BW);
num_bricks_[1] = div_up(size_[1], BH);
num_bricks_[2] = div_up(size_[2], BD);
}
std::array<unsigned, 3> size() const
{
return size_;
}
template <typename U, typename I>
U value(U /* */, I const& x, I const& y, I const& z) const
{
I bx = x / BW;
I by = y / BH;
I bz = z / BD;
I brick_id = bz * num_bricks_[0] * num_bricks_[1]
+ by * num_bricks_[0]
+ bx;
I brick_offset = brick_id * BW * BH * BD;
I ix = x % BW;
I iy = y % BH;
I iz = z % BD;
I index = brick_offset + iz * BW * BH + iy * BW + ix;
return access(U{}, index);
}
void reset(T const* data)
{
data_ = data;
}
value_type const* data() const
{
return data_;
}
operator bool() const
{
return data_ != nullptr;
}
protected:
template <typename U, typename I>
U access(U /* */, I const& index) const
{
return U(data_[index]);
}
template <
typename U,
typename I,
typename = typename std::enable_if<simd::is_simd_vector<I>::value>::type
>
U access(U /* */, I const& index) const
{
return U(gather(data_, index));
}
T const* data_ = nullptr;
std::array<unsigned, 3> size_;
std::array<unsigned, 3> rounded_size_;
std::array<unsigned, 3> num_bricks_;
};
} // visionaray
#endif // VSNRAY_TEXTURE_DETAIL_STORAGE_TYPES_BRICKED_ACCESSOR_H
| 1,826 |
1,746 | <filename>code/src/main/java/com/shekhargulati/java8_tutorial/domain/TaskType.java
package com.shekhargulati.java8_tutorial.domain;
public enum TaskType {
READING, CODING, BLOGGING
}
| 72 |
563 | <reponame>wobbier/Sharpmake
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <string.h>
int main( int argc, char ** argv )
{
if ( argc != 3 )
{
printf( "Bad Args!\n" );
return 1;
}
const char * fileToStamp = argv[ 1 ];
const char * stampMessage = argv[ 2 ];
FILE * f = fopen( fileToStamp, "ab+" );
if ( f == NULL )
{
printf( "Can't open for append file %s!\n", fileToStamp );
return 1;
}
fwrite( (char *) stampMessage, strlen( stampMessage ), 1, f );
fclose( f );
return 0;
}
| 264 |
360 | <reponame>Yanci0/openGauss-server
/* -------------------------------------------------------------------------
* Copyright (c) 2021 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* -------------------------------------------------------------------------
*
* icached_rec.h
*
* IDENTIFICATION
* src\common\interfaces\libpq\client_logic_cache\icached_rec.h
*
* -------------------------------------------------------------------------
*/
#ifndef ICACHED_REC_H
#define ICACHED_REC_H
#include "libpq/libpq-fe.h"
class ICachedRec {
public:
ICachedRec(): m_original_ids(NULL){}
virtual ~ICachedRec()
{
if (m_original_ids) {
delete m_original_ids;
m_original_ids = NULL;
}
}
virtual const int* get_original_ids() const
{
return m_original_ids;
}
virtual const size_t get_num_processed_args() const =0;
protected:
virtual const Oid get_original_id(const size_t idx) const =0;
int* m_original_ids;
};
#endif | 519 |
831 | <reponame>phpc0de/idea-android<gh_stars>100-1000
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.profilers.event;
import static com.android.tools.idea.transport.faketransport.FakeTransportService.FAKE_DEVICE_ID;
import static com.google.common.truth.Truth.assertThat;
import com.android.tools.adtui.model.FakeTimer;
import com.android.tools.adtui.model.Range;
import com.android.tools.adtui.model.SeriesData;
import com.android.tools.adtui.model.event.EventAction;
import com.android.tools.adtui.model.event.LifecycleAction;
import com.android.tools.adtui.model.event.LifecycleEvent;
import com.android.tools.idea.transport.faketransport.FakeGrpcChannel;
import com.android.tools.idea.transport.faketransport.FakeTransportService;
import com.android.tools.profiler.proto.Common;
import com.android.tools.profiler.proto.EventProfiler;
import com.android.tools.profiler.proto.Interaction;
import com.android.tools.profilers.FakeIdeProfilerServices;
import com.android.tools.profilers.ProfilerClient;
import com.android.tools.profilers.StudioProfilers;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class LifeCycleEventDataSeriesTest {
private static final long TEST_START_TIME_NS = TimeUnit.SECONDS.toNanos(10);
private static final long TEST_END_TIME_NS = TEST_START_TIME_NS + TimeUnit.SECONDS.toNanos(1);
private static final String ACTIVITY_NAME = "TestActivity";
private static final String FRAGMENT_NAME = "TestFragment";
private static final String ACTIVITY_NAME_2 = "TestActivity2";
@Parameterized.Parameters
public static Collection<Boolean> useNewEventPipelineParameter() {
return Arrays.asList(false, true);
}
private FakeTimer myTimer = new FakeTimer();
private FakeTransportService myTransportService = new FakeTransportService(myTimer);
private FakeEventService myEventService = new FakeEventService();
@Rule public FakeGrpcChannel myGrpcChannel = new FakeGrpcChannel(getClass().getName(), myTransportService, myEventService);
private FakeIdeProfilerServices myIdeProfilerServices;
private LifecycleEventDataSeries myActivitySeries;
private LifecycleEventDataSeries myFragmentSeries;
public LifeCycleEventDataSeriesTest(boolean useNewEventPipeline) {
myIdeProfilerServices = new FakeIdeProfilerServices();
myIdeProfilerServices.enableEventsPipeline(useNewEventPipeline);
}
@Before
public void setUp() {
StudioProfilers profilers = new StudioProfilers(new ProfilerClient(myGrpcChannel.getChannel()), myIdeProfilerServices, myTimer);
myActivitySeries = new LifecycleEventDataSeries(profilers, false);
myFragmentSeries = new LifecycleEventDataSeries(profilers, true);
}
@Test
public void testActivityStarted() {
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
},
0);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myActivitySeries.getDataForRange(range);
assertThat(dataList).hasSize(1);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
verifyActivity(event, 0);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.STARTED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(ACTIVITY_NAME);
}
@Test
public void testActivityCompleted() {
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED,
TEST_END_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.DESTROYED,
TEST_END_TIME_NS),
},
0);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myActivitySeries.getDataForRange(range);
assertThat(dataList).hasSize(1);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
verifyActivity(event, TEST_END_TIME_NS);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(
String.format("%s - %s", ACTIVITY_NAME, Interaction.ViewData.State.DESTROYED.toString().toLowerCase()));
}
@Test
public void testActivityDied() {
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED,
TEST_END_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.STOPPED,
TEST_END_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.DESTROYED,
TEST_END_TIME_NS),
},
0);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myActivitySeries.getDataForRange(range);
assertThat(dataList).hasSize(1);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
verifyActivity(event, TEST_END_TIME_NS);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(
String.format("%s - %s - %s", ACTIVITY_NAME, Interaction.ViewData.State.STOPPED.toString().toLowerCase(),
Interaction.ViewData.State.DESTROYED.toString().toLowerCase()));
}
@Test
public void testActivityDiedThenResumed() {
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.DESTROYED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.REMOVED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.CREATED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED,
TEST_END_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.DESTROYED,
TEST_END_TIME_NS),
},
0);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myActivitySeries.getDataForRange(range);
assertThat(dataList).hasSize(2);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
verifyActivity(event, TEST_START_TIME_NS);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(
String.format("%s - %s - %s", ACTIVITY_NAME, Interaction.ViewData.State.DESTROYED.toString().toLowerCase(),
Interaction.ViewData.State.REMOVED.toString().toLowerCase()));
event = dataList.get(1);
verifyActivity(event, TEST_END_TIME_NS);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(
String.format("%s - %s", ACTIVITY_NAME, Interaction.ViewData.State.DESTROYED.toString().toLowerCase()));
}
@Test
public void testActivityDestroyedDisplayString() {
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED, TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.DESTROYED, TEST_START_TIME_NS),
},
0);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myActivitySeries.getDataForRange(range);
assertThat(dataList).hasSize(1);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(
String.format("%s - %s", ACTIVITY_NAME, Interaction.ViewData.State.DESTROYED.toString().toLowerCase()));
}
@Test
public void testDestroyedEventOutOfOrder() {
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED, TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.DESTROYED, TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED, TEST_START_TIME_NS),
},
0);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myActivitySeries.getDataForRange(range);
assertThat(dataList).hasSize(1);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(ACTIVITY_NAME);
}
@Test
public void testMultipleActivity() {
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
},
0);
buildActivityEvent(ACTIVITY_NAME_2,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.CREATED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED,
TEST_END_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.DESTROYED,
TEST_END_TIME_NS),
},
0);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myActivitySeries.getDataForRange(range);
assertThat(dataList).hasSize(2);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
verifyActivity(event, 0);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.STARTED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(ACTIVITY_NAME);
event = dataList.get(1);
verifyActivity(event, TEST_END_TIME_NS);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(
String.format("%s - %s", ACTIVITY_NAME_2, Interaction.ViewData.State.DESTROYED.toString().toLowerCase()));
}
@Test
public void testOnlyFragmentReceived() {
buildActivityEvent(FRAGMENT_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED,
TEST_END_TIME_NS),
},
1234
);
buildActivityEvent(ACTIVITY_NAME,
new ActivityStateData[]{
new ActivityStateData(Interaction.ViewData.State.RESUMED,
TEST_START_TIME_NS),
new ActivityStateData(Interaction.ViewData.State.PAUSED,
TEST_END_TIME_NS),
},
0
);
Range range = new Range(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS), TimeUnit.NANOSECONDS.toMicros(TEST_END_TIME_NS));
List<SeriesData<EventAction<LifecycleEvent>>> dataList = myFragmentSeries.getDataForRange(range);
assertThat(dataList).hasSize(1);
SeriesData<EventAction<LifecycleEvent>> event = dataList.get(0);
verifyActivity(event, TEST_END_TIME_NS);
assertThat(event.value.getType()).isEqualTo(LifecycleEvent.COMPLETED);
assertThat(((LifecycleAction)event.value).getName()).isEqualTo(FRAGMENT_NAME);
}
private static void verifyActivity(SeriesData<EventAction<LifecycleEvent>> event, long endTime) {
assertThat(event.x).isEqualTo(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS));
assertThat(event.value.getStartUs()).isEqualTo(TimeUnit.NANOSECONDS.toMicros(TEST_START_TIME_NS));
assertThat(event.value.getEndUs()).isEqualTo(TimeUnit.NANOSECONDS.toMicros(endTime));
}
private void buildActivityEvent(String name, ActivityStateData[] states, long contextHash) {
if (myIdeProfilerServices.getFeatureConfig().isUnifiedPipelineEnabled()) {
for (ActivityStateData state : states) {
myTransportService.addEventToStream(FAKE_DEVICE_ID,
Common.Event.newBuilder()
.setKind(Common.Event.Kind.VIEW)
.setTimestamp(state.activityStateTime)
.setGroupId(name.hashCode())
.setIsEnded(state.isEndState())
.setView(
Interaction.ViewData.newBuilder()
.setName(name)
.setState(state.activityState)
.setParentActivityId(contextHash)
)
.build());
}
}
else {
EventProfiler.ActivityData.Builder builder = EventProfiler.ActivityData.newBuilder();
builder.setName(name).setHash(name.hashCode()).setActivityContextHash(contextHash);
for (ActivityStateData state : states) {
builder.addStateChanges(EventProfiler.ActivityStateData.newBuilder()
.setState(state.activityState)
.setTimestamp(state.activityStateTime)
.build());
}
myEventService.addActivityEvent(builder.build());
}
}
private static final class ActivityStateData {
public Interaction.ViewData.State activityState;
public long activityStateTime;
private ActivityStateData(Interaction.ViewData.State state, long time) {
activityState = state;
activityStateTime = time;
}
private boolean isEndState() {
switch (activityState) {
case PAUSED:
case STOPPED:
case DESTROYED:
case SAVED:
case REMOVED:
return true;
default:
return false;
}
}
}
} | 8,836 |
852 | <reponame>ckamtsikis/cmssw
#include "CondFormats/Calibration/interface/big.h"
#include <iostream>
//fill big
void big::fill(size_t tVectorSize, size_t thVectorSize, size_t sVectorSize, const std::string& atitle) {
for (size_t i = 0; i < tVectorSize; ++i) {
big::bigEntry b;
b.fill(i, 1.0);
tVector_.push_back(b);
}
for (size_t i = 0; i < thVectorSize; ++i) {
big::bigHeader h;
h.fill(atitle);
thVector_.push_back(h);
}
for (size_t i = 0; i < sVectorSize; ++i) {
big::bigStore s;
s.fill(atitle);
sVector_.push_back(s);
}
}
//fill bigEntry
void big::bigEntry::fill(int r, float seed) {
runnum = r;
alpha = seed;
cotalpha = seed;
beta = seed;
cotbeta = seed;
costrk[0] = seed * 0.1;
costrk[1] = seed * 0.2;
costrk[2] = seed * 0.3;
qavg = seed;
symax = seed;
dyone = seed;
syone = seed;
sxmax = seed;
dxone = seed;
sxone = seed;
dytwo = seed;
sytwo = seed;
dxtwo = seed;
sxtwo = seed;
qmin = seed;
for (int i = 0; i < parIDX::LEN1; ++i) {
for (int j = 0; j < parIDX::LEN2; ++j) {
for (int k = 0; k < parIDX::LEN3; ++k) {
par[parIDX::indexOf(i, j, k)] = seed;
}
}
}
for (int i = 0; i < ytempIDX::LEN1; ++i) {
for (int j = 0; j < ytempIDX::LEN2; ++j) {
ytemp[ytempIDX::indexOf(i, j)] = seed;
}
}
for (int i = 0; i < xtempIDX::LEN1; ++i) {
for (int j = 0; j < xtempIDX::LEN2; ++j) {
xtemp[xtempIDX::indexOf(i, j)] = seed;
}
}
for (int i = 0; i < avgIDX::LEN1; ++i) {
for (int j = 0; j < avgIDX::LEN2; ++j) {
for (int k = 0; k < avgIDX::LEN3; ++k) {
avg[avgIDX::indexOf(i, j, k)] = seed;
}
}
}
for (int i = 0; i < aqflIDX::LEN1; ++i) {
for (int j = 0; j < aqflIDX::LEN2; ++j) {
for (int k = 0; k < aqflIDX::LEN3; ++k) {
aqfl[aqflIDX::indexOf(i, j, k)] = seed;
}
}
}
for (int i = 0; i < chi2IDX::LEN1; ++i) {
for (int j = 0; j < chi2IDX::LEN2; ++j) {
for (int k = 0; k < chi2IDX::LEN3; ++k) {
chi2[chi2IDX::indexOf(i, j, k)] = seed;
}
}
}
for (int i = 0; i < spareIDX::LEN1; ++i) {
for (int j = 0; j < spareIDX::LEN2; ++j) {
spare[spareIDX::indexOf(i, j)] = seed;
}
}
}
//fill bigHeader
void big::bigHeader::fill(const std::string& atitle) {
title = std::string("atitle");
ID = 0;
NBy = 1;
NByx = 2;
NBxx = 3;
NFy = 4;
NFyx = 5;
NFxx = 6;
vbias = 0.1;
temperature = 0.2;
fluence = 0.3;
qscale = 0.4;
s50 = 0.5;
templ_version = 1;
}
//fill bigStore
void big::bigStore::fill(const std::string& atitle) {
head.fill(atitle);
for (int i = 0; i < entbyIDX::LEN1; ++i) {
bigEntry b;
b.fill(i, 0.5 * i);
entby[entbyIDX::indexOf(i)] = b; //or use push_back as prefer
}
std::cout << "length of entbx 1 " << entbxIDX::LEN1 << std::endl;
std::cout << "length of entbx 2 " << entbxIDX::LEN2 << std::endl;
std::cout << "total size of entbx " << entbxIDX::SIZE << std::endl;
for (int i = 0; i < entbxIDX::LEN1; ++i) {
for (int j = 0; j < entbxIDX::LEN2; ++j) {
bigEntry c;
c.fill(i * j, 0.3 * j);
entbx[entbxIDX::indexOf(i, j)] = c; //or use push_back as prefer
}
}
for (int i = 0; i < entfyIDX::LEN1; ++i) {
bigEntry f;
f.fill(i, 0.4 * i);
entfy[entfyIDX::indexOf(i)] = f; //or use push_back as prefer
}
for (int i = 0; i < entfxIDX::LEN1; ++i) {
for (int j = 0; j < entfxIDX::LEN2; ++j) {
bigEntry f;
f.fill(i * j, 0.25 * j);
entfx[entfxIDX::indexOf(i, j)] = f; //or use push_back as prefer
}
}
}
| 1,849 |
880 | <reponame>GitHubRepoDescription/OLINUXINO-forked<filename>SOFTWARE/iMX233/I2C/MOD-IO2/i2c-tool.c
/*
This software uses a BSD license.
Copyright (c) 2010, <NAME> / chumby industries
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
* Neither the name of Sean Cross / chumby industries nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <string.h>
int main(int argc, char **argv)
{
int file;
char *buffer_in, *buffer_out;
int num_bytes_read,num_bytes_write, address, i;
char device[] = "/dev/i2c-0";
if(argc < 2)
{
fprintf(stdout, "Invalid number of parameters\n");
exit(1);
}
if(!strcmp(argv[1], "-w") || !strcmp(argv[1], "w"))
{
num_bytes_write = strtol(argv[4], NULL, 0);
address = strtol(argv[3], NULL, 0);
if(argc != num_bytes_write + 5)
{
fprintf(stdout, "Invalid number of parameters\n");
exit(1);
}
if(address < 0x03 || address > 0x77)
{
fprintf(stdout, "Slave address out of range! [0x03..0x77]\n");
exit(1);
}
fprintf(stdout, "SLAVE ADDRESS: 0x%02X\n", (unsigned int) address);
fflush(stdout);
if(num_bytes_write > 255 || num_bytes_write < 1)
{
fprintf(stdout, "Invalid number of bytes to write\n");
exit(1);
}
fprintf(stdout, "NUMBER OF BYTES TO WRITE: %d\n", (unsigned int) num_bytes_write);
fflush(stdout);
buffer_out = (char*) malloc(num_bytes_write * sizeof(char));
if(buffer_out == NULL)
{
fprintf(stdout, "Out of memory\n");
exit(1);
}
fprintf(stdout, "MEMORY ALLOCATED AT ADDRESS: 0x%X\n", buffer_out);
fflush(stdout);
for ( i = 0; i < num_bytes_write; i++)
{
buffer_out[i] = strtol(argv[5+i], NULL, 0);
}
device[9] = argv[2][0];
if ((file = open(device, O_RDWR)) < 0)
{
fprintf(stdout, "%s --> ", device);
perror("Failed to open control file");
exit(1);
}
fprintf(stdout, "%s OPENDED!\n", device);
fflush(stdout);
if(ioctl(file, I2C_SLAVE, address) < 0)
{
perror("Failed to access the bus");
exit(1);
}
if(write(file, buffer_out, num_bytes_write) != num_bytes_write)
{
perror("Failed writing to the I2C-bus");
exit(1);
}
else
{
printf("WRITE:SUCCESS\n");
}
free(buffer_out);
close(file);
}
else if(!strcmp(argv[1], "-r") || !strcmp(argv[1], "r"))
{
if(argc != 5)
{
fprintf(stdout, "Invalid number of parameters\n");
exit(1);
}
num_bytes_read = strtol(argv[4], NULL, 0);
address = strtol(argv[3], NULL, 0);
if(address < 0x03 || address > 0x77)
{
fprintf(stdout, "Slave address out of range! [0x03..0x77]\n");
exit(1);
}
fprintf(stdout, "SLAVE ADDRESS: 0x%02X\n", (unsigned int) address);
fflush(stdout);
if(num_bytes_read > 255 || num_bytes_read < 1)
{
fprintf(stdout, "Invalid number of bytes to read\n");
exit(1);
}
fprintf(stdout, "NUMBER OF BYTES TO READ: %d\n", (unsigned int) num_bytes_read);
fflush(stdout);
buffer_in = (char*) malloc(num_bytes_read * sizeof(char));
if(buffer_in == NULL)
{
fprintf(stdout, "Out of memory\n");
exit(1);
}
fprintf(stdout, "MEMORY ALLOCATED AT ADDRESS: 0x%X\n", buffer_in);
fflush(stdout);
device[9] = argv[2][0];
if ((file = open(device, O_RDWR)) < 0)
{
fprintf(stdout, "%s --> ", device);
perror("Failed to open control file");
exit(1);
}
fprintf(stdout, "%s OPENDED!\n", device);
fflush(stdout);
if(ioctl(file, I2C_SLAVE, address) < 0)
{
perror("Failed to access the bus");
exit(1);
}
if(read(file, buffer_in, num_bytes_read) != num_bytes_read)
{
perror("Failed reading from the I2C-bus");
}
else
{
for(i = 0; i < num_bytes_read; i++)
{
printf("BYTE[%d]: 0x%02X\n", i,(unsigned int)buffer_in[i]);
}
}
free(buffer_in);
close(file);
}
else if(!strcmp(argv[1], "-rw") || !strcmp(argv[1], "rw"))
{
if(argc < 7)
{
fprintf(stdout, "Invalid number of parameters\n");
exit(1);
}
address = strtol(argv[3], NULL, 0);
num_bytes_write = strtol(argv[5], NULL, 0);
num_bytes_read = strtol(argv[4], NULL, 0);
if(argc != num_bytes_write + 6)
{
fprintf(stdout, "Invalid number of parameters\n");
exit(1);
}
fprintf(stdout, "Number of parameters: %d\n", argc);
fflush(stdout);
if(address < 0x03 || address > 0x77)
{
fprintf(stdout, "Slave address out of range! [0x03..0x77]\n");
exit(1);
}
fprintf(stdout, "SLAVE ADDRESS: 0x%02X\n", (unsigned int) address);
fflush(stdout);
if(!num_bytes_read)
{
struct i2c_rdwr_ioctl_data packets;
struct i2c_msg messages[1];
int user;
unsigned long funcs;
if(num_bytes_write > 255 || num_bytes_write < 1)
{
fprintf(stdout, "Invalid number of bytes to write\n");
exit(1);
}
fprintf(stdout, "NUMBER OF BYTES TO READ: %d\n", (unsigned int) num_bytes_read);
fflush(stdout);
fprintf(stdout, "NUMBER OF BYTES TO WRITE: %d\n", (unsigned int) num_bytes_write);
fflush(stdout);
device[9] = argv[2][0];
if ((file = open(device, O_RDWR)) < 0)
{
fprintf(stdout, "%s --> ", device);
perror("Failed to open control file");
exit(1);
}
fprintf(stdout, "%s OPENDED!\n", device);
fflush(stdout);
user = ioctl(file,I2C_FUNCS, &funcs);
fprintf(stdout, "FUNCTIONALITY: 0x%X\n", (unsigned int)funcs);
buffer_out = (char*) malloc(num_bytes_write * sizeof(char));
if(buffer_out == NULL)
{
fprintf(stdout, "Out of memory\n");
exit(1);
}
fprintf(stdout, "MEMORY ALLOCATED AT ADDRESS: 0x%X\n", buffer_out);
fflush(stdout);
for ( i = 0; i < num_bytes_write; i++)
{
buffer_out[i] = strtol(argv[6+i], NULL, 0);
}
messages[0].addr = address;
messages[0].flags = 0;
messages[0].len = num_bytes_write;
messages[0].buf = buffer_out;
packets.msgs = messages;
packets.nmsgs = 1;
fprintf(stdout, "\nPACKET DATA: \
\n-------------------- \
\nADDRESS: 0x%02X \
\nFLAG: 0x%02X \
\nLENGHT: 0x%02X \
\nMESSAGES: %d\n",
(unsigned int)address,
(unsigned int)messages[0].flags,
(unsigned int)sizeof(num_bytes_write),
packets.nmsgs);
fflush(stdout);
if(ioctl(file, I2C_RDWR, &packets) < 0)
{
perror("Error sending data");
return 1;
}
printf("SENDING:DONE\n");
free(buffer_out);
close(file);
}
else
{
struct i2c_rdwr_ioctl_data packets;
struct i2c_msg messages[2];
if(num_bytes_write > 255 || num_bytes_write < 1)
{
fprintf(stdout, "Invalid number of bytes to write\n");
exit(1);
}
if(num_bytes_read > 255)
{
fprintf(stdout, "Invalid number of bytes to read\n");
exit(1);
}
fprintf(stdout, "NUMBER OF BYTES TO READ: %d\n", (unsigned int) num_bytes_read);
fflush(stdout);
fprintf(stdout, "NUMBER OF BYTES TO WRITE: %d\n", (unsigned int) num_bytes_write);
fflush(stdout);
device[9] = argv[2][0];
if ((file = open(device, O_RDWR)) < 0)
{
fprintf(stdout, "%s --> ", device);
perror("Failed to open control file");
exit(1);
}
fprintf(stdout, "%s OPENDED!\n", device);
fflush(stdout);
buffer_out = (char*)malloc(num_bytes_write * sizeof(char));
buffer_in = (char*)malloc(num_bytes_read * sizeof(char));
if(buffer_out == NULL || buffer_in == NULL)
{
fprintf(stdout, "Out of memory\n");
exit(1);
}
fprintf(stdout, "MEMORY ALLOCATED AT ADDRESS: 0x%X\n", buffer_out);
fflush(stdout);
fprintf(stdout, "MEMORY ALLOCATED AT ADDRESS: 0x%X\n", buffer_in);
fflush(stdout);
for ( i = 0; i < num_bytes_write; i++)
{
buffer_out[i] = strtol(argv[6+i], NULL, 0);
}
messages[0].addr = address;
messages[0].flags = 0;
messages[0].len = sizeof(buffer_out);
messages[0].buf = buffer_out;
messages[1].addr = address;
messages[1].flags = I2C_M_RD;
messages[1].len = sizeof(buffer_in);
messages[1].buf = buffer_in;
packets.msgs = messages;
packets.nmsgs = 2;
fprintf(stdout, "\nPACKET DATA: \
\n-------------------- \
\nADDRESS: 0x%02X \
\nFLAG: 0x%02X \
\nLENGHT: 0x%02X \
\n\nADDRESS: 0x%02X \
\nFLAG: 0x%02X \
\nLENGHT: 0x%02X \
\nMESSAGES: %d\n",
(unsigned int)messages[0].addr,
(unsigned int)messages[0].flags,
(unsigned int)sizeof(num_bytes_write),
(unsigned int)messages[1].addr,
(unsigned int)messages[1].flags,
(unsigned int)sizeof(num_bytes_read),
packets.nmsgs);
fflush(stdout);
if(ioctl(file, I2C_RDWR, &packets) < 0)
{
perror("Error sending data");
exit(1);
}
printf("SENDING:DONE\n");
for(i = 0; i <num_bytes_read; i++)
{
printf("BYTE[%d]: 0x%02X\n", i,(unsigned int)buffer_in[i]);
}
free(buffer_out);
free(buffer_in);
close(file);
}
}
else if(!strcmp(argv[1], "-h") || !strcmp(argv[1], "--h") || !strcmp(argv[1], "-help") || !strcmp(argv[1], "h"))
{
printf("\nUSAGE:\t%s -r I2CBUS ADDRESS [NUMBER OF BYTES]\n", argv[0]);
printf("\t%s -w I2CBUS ADDRESS [NUMBER OF BYTES] [DATA0] [DATA1] ...\n", argv[0]);
printf("\t%s -rw I2CBUS ADDRESS [BYTES TO READ] [BYTES TO WRITE] [DATA0] [DATA1] ...\n\n", argv[0]);
printf("\t -r\tRead <n> bytes from specific address\n");
printf("\t -w\tWrite <n> bytes to specific address\n");
printf("\t -rw\tFirst write bytes to an address, followed by restart condition and read of <n> bytes\n\n");
}
else
{
fprintf(stdout, "Invalid operator \"%s\"\n", argv[1]);
exit(1);
}
return 0;
}
| 5,792 |
374 | #include <avr/io.h>
#include <avr/interrupt.h>
#include <util/delay.h>
#include "avr_print.h"
#include <stdio.h>
#include "iparpetc.h"
#include "enc424j600.h"
#include "http.h"
#include <basicfat.h>
#include <util10.h>
#include <avr/pgmspace.h>
#include <string.h>
#include <ntsc.h>
#include <video.h>
#include <tcp.h>
#define NOOP asm volatile("nop" ::)
const char * twiddles = "/-\\|";
uint8_t twiddle = 0;
static void setup_clock( void )
{
/*Examine Page 33*/
CLKPR = 0x80; /*Setup CLKPCE to be receptive*/
CLKPR = 0x00; /*No scalar*/
// OSCCAL=0xff;
}
unsigned short frameno;
unsigned char cc;
#define POP enc424j600_pop8()
#define POP16 enc424j600_pop16()
#define PUSH(x) enc424j600_push8(x)
#define PUSH16(x) enc424j600_push16(x)
#ifndef INCLUDE_TCP
#error Need TCP for the HTTP test.
#endif
unsigned char MyIP[4] = { 192, 168, 0, 142 };
unsigned char MyMask[4] = { 255, 255, 255, 0 };
unsigned char MyMAC[6];
int8_t termconn = -1;
uint8_t TCPReceiveSyn( uint16_t portno )
{
// sendhex4( portno );
if( portno == 80 )
{
uint8_t ret = GetFreeConnection();
HTTPInit( ret, ret );
return ret;
}
if( portno == 23 )
{
if( termconn == -1 )
{
sendstr( "New.\n" );
termconn = GetFreeConnection();
return termconn;
}
else
{
sendstr( "Ful.\n" );
}
}
return 0;
}
void TCPConnectionClosing( uint8_t conn )
{
if( conn == termconn )
{
termconn = -1;
}
else
{
HTTPClose( conn );
}
}
uint8_t didsend;
uint8_t TCPReceiveData( uint8_t connection, uint16_t totallen )
{
if( connection == termconn )
{
while( totallen-- )
sendchr( POP );
}
else
{
didsend = 0;
HTTPGotData( connection, totallen );
return 0;
}
}
void PushStr( const char * msg )
{
while( *msg )
{
PUSH( *(msg++) );
}
}
void HTTPCustomStart( )
{
uint8_t i, clusterno;
struct HTTPConnection * h = curhttp;
const char * path = &h->pathbuffer[0];
sendstr( "Request: " );
sendstr( path );
sendchr( '\n' );
h->data.user.a = 5;
h->isdone = 0;
h->isfirst = 1;
}
uint8_t hex2val( unsigned char c )
{
if( c >= '0' && c <= '9' )
return c - '0';
else if( c >= 'a' && c <= 'f' )
return c - 'a' + 10;
else if( c >= 'A' && c <= 'F' )
return c - 'A' + 10;
else
return 0;
}
void int8tohex( unsigned char v, char * data )
{
unsigned char nibble = v>>4;
data[0] = (nibble<10)?(nibble+'0'):(nibble+'a'-10);
nibble = v&0x0f;
data[1] = (nibble<10)?(nibble+'0'):(nibble+'a'-10);
}
void HTTPCustomCallback( )
{
uint16_t i, bytestoread;
struct HTTPConnection * h = curhttp;
if( h->isdone )
{
HTTPClose( h->socket );
return;
}
if( h->isfirst )
{
TCPs[h->socket].sendtype = ACKBIT | PSHBIT;
StartTCPWrite( h->socket );
//TODO: Content Length? MIME-Type?
PushStr( "HTTP/1.1 200 Ok\r\nConnection: close\r\n\r\n" );
if( strncmp( h->pathbuffer, "/d/r1?", 6 ) == 0 )
{
char outb[3] = {0, 0, 0};
char * bp = h->pathbuffer + 6;
unsigned char address = 0;
address += hex2val( *(bp++) )<<4;
address += hex2val( *(bp++) );
unsigned char * cc = (unsigned char*)address;
int8tohex( *cc, outb );
PushStr( outb );
}
else if( strncmp( h->pathbuffer, "/d/w1?", 6 ) == 0 )
{
char * bp = h->pathbuffer + 6;
unsigned char address = 0;
address += hex2val( *(bp++) )<<4;
address += hex2val( *(bp++) );
unsigned char value = 0;
value += hex2val( *(bp++) )<<4;
value += hex2val( *(bp++) );
unsigned char * cc = (unsigned char*)address;
*cc = value;
}
else if( strncmp( h->pathbuffer, "/d/r2?", 6 ) == 0 )
{
char outb[3] = {0, 0, 0};
char * bp = h->pathbuffer + 6;
unsigned char address = 0;
address += hex2val( *(bp++) )<<4;
address += hex2val( *(bp++) );
unsigned short * cc = (unsigned char*)address;
unsigned short vo = *cc;
int8tohex( vo>>8, outb );
PushStr( outb );
int8tohex( vo&0xff, outb );
PushStr( outb );
}
else if( strncmp( h->pathbuffer, "/d/w2?", 6 ) == 0 )
{
char * bp = h->pathbuffer + 6;
unsigned char address = 0;
address += hex2val( *(bp++) )<<4;
address += hex2val( *(bp++) );
unsigned short value = 0;
value += hex2val( *(bp++) )<<12;
value += hex2val( *(bp++) )<<8;
value += hex2val( *(bp++) )<<4;
value += hex2val( *(bp++) );
unsigned short * cc = (unsigned char*)address;
*cc = value;
}
else
{
PushStr( "Hello, World!\r\n" );
PushStr( h->pathbuffer );
}
EndTCPWrite( h->socket );
h->isfirst = 0;
h->isdone = 1;
return;
}
}
volatile extern char framebuffer[];
int main( void )
{
char stip[4];
uint8_t delayctr, i;
uint8_t marker;
termconn = -1;
//Input the interrupt.
DDRD &= ~_BV(2);
cli();
setup_clock();
SetupNTSC();
setup_spi();
restart:
//Configure T2 to "overflow" at 100 Hz, this lets us run the TCP clock
TCCR2A = _BV(WGM21) | _BV(WGM20);
TCCR2B = _BV(WGM22) | _BV(CS22) | _BV(CS21) | _BV(CS20);
//T2 operates on clkIO, fast PWM. Fast PWM's TOP is OCR2A
#define T2CNT ((F_CPU/1024)/100)
#if( T2CNT > 254 )
#undef T2CNT
#define T2CNT 254
#endif
OCR2A = T2CNT;
#ifndef HTTP_USE_MEMORY_FS
if( initSD() )
{
sendstr( "SD card Fail.\n" );
}
openFAT();
#endif
sei();
InitTCP();
DDRC &= 0;
if( enc424j600_init( MyMAC ) )
{
sendstr( "Network Fail!\n" );
goto restart;
}
sendstr( "Network Ok at " );
for( i = 0; i < 4; i++ )
{
Uint8To10Str( stip, MyIP[i] );
sendstr( stip );
if( i != 3 )
sendchr( '.' );
}
sendstr( "\nBoot Ok.\n" );
while(1)
{
unsigned short r;
r = enc424j600_recvpack( );
if( r ) continue; //may be more
HTTPTick();
if( TIFR2 & _BV(TOV2) )
{
TIFR2 |= _BV(TOV2);
TickTCP();
}
framebuffer[NTWIDTH*NTLINES-1] = twiddles[(twiddle++)&3];
}
return 0;
}
| 2,706 |
317 | //
// NSNumber+Nib2ObjcExtensions.h
// nib2objc
//
// Created by Adrian on 3/14/09.
// <NAME> 2009
//
#import <Cocoa/Cocoa.h>
@interface NSNumber (Nib2ObjcExtensions)
- (NSString *)booleanString;
- (NSString *)intString;
- (NSString *)floatString;
- (NSString *)autoresizingMaskString;
- (NSString *)contentModeString;
- (NSString *)textAlignmentString;
- (NSString *)borderStyleString;
- (NSString *)contentHorizontalAlignmentString;
- (NSString *)contentVerticalAlignmentString;
- (NSString *)keyboardAppearanceString;
- (NSString *)returnKeyTypeString;
- (NSString *)autocapitalizationTypeString;
- (NSString *)autocorrectionTypeString;
- (NSString *)keyboardTypeString;
- (NSString *)progressViewStyleString;
- (NSString *)baselineAdjustmentString;
- (NSString *)lineBreakModeString;
- (NSString *)activityIndicatorViewStyleString;
- (NSString *)buttonTypeString;
- (NSString *)segmentedControlStyleString;
- (NSString *)scrollViewIndicatorStyleString;
- (NSString *)tableViewStyleString;
- (NSString *)tableViewCellSeparatorStyleString;
- (NSString *)tableViewCellAccessoryString;
- (NSString *)tableViewCellEditingStyleString;
- (NSString *)tableViewCellSelectionStyleString;
- (NSString *)datePickerModeString;
- (NSString *)barStyleString;
- (NSString *)barButtonItemStyleString;
- (NSString *)barButtonSystemItemString;
- (NSString *)tabBarSystemItemString;
- (NSString *)mapTypeString;
- (NSString *)clearButtonModeString;
- (NSString *)swipeGestureRecognizerDirectionString;
- (NSString *)modalPresentationStyleString;
- (NSString *)modalTransitionStyleString;
- (NSString *)drawableColorFormatString;
- (NSString *)drawableDepthFormatString;
- (NSString *)drawableMultisampleString;
- (NSString *)drawableStencilFormatString;
@end
| 563 |
482 | package io.cattle.platform.async.retry;
import io.cattle.platform.util.concurrent.DelayedObject;
import java.util.concurrent.Future;
public class Retry {
int retryCount;
int retries;
Long timeoutMillis;
Runnable runnable;
Future<?> future;
boolean keepalive = false;
public Retry(int retries, Long timeoutMillis, Future<?> future, Runnable runnable) {
super();
this.retryCount = 0;
this.retries = retries;
this.timeoutMillis = timeoutMillis;
this.runnable = runnable;
this.future = future;
}
public int getRetryCount() {
return retryCount;
}
public int getRetries() {
return retries;
}
public Long getTimeoutMillis() {
return timeoutMillis;
}
public Runnable getRunnable() {
return runnable;
}
public Future<?> getFuture() {
return future;
}
public int increment() {
return ++retryCount;
}
public void setKeepalive(boolean keepalive) {
this.keepalive = keepalive;
}
public boolean isKeepalive() {
return keepalive;
}
@Override
@SuppressWarnings("rawtypes")
public boolean equals(Object obj) {
if (obj instanceof DelayedObject) {
return ((DelayedObject) obj).getObject().equals(this);
}
return super.equals(obj);
}
}
| 595 |
918 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.cluster;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.testng.annotations.Test;
import com.typesafe.config.ConfigFactory;
import org.apache.gobblin.util.GobblinProcessBuilder;
import org.apache.gobblin.util.SystemPropertiesWrapper;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class SingleTaskLauncherTest {
private static final String JOB_ID = "1";
private static final String JAVAHOME = "/javahome";
private static final String TEST_CLASS_PATH = "foo.jar:bar.jar";
private static final String WORK_UNIT_PATH = "workUnit.wu";
private static final String CLUSTER_CONFIG_CONF_PATH = "clusterConfig.conf";
@Test
public void testLaunch()
throws Exception {
final SystemPropertiesWrapper propertiesWrapper = mock(SystemPropertiesWrapper.class);
when(propertiesWrapper.getJavaHome()).thenReturn(JAVAHOME);
when(propertiesWrapper.getJavaClassPath()).thenReturn(TEST_CLASS_PATH);
final GobblinProcessBuilder processBuilder = mock(GobblinProcessBuilder.class);
final Process mockProcess = mock(Process.class);
when(processBuilder.start(any())).thenReturn(mockProcess);
final Path clusterConfPath = Paths.get(CLUSTER_CONFIG_CONF_PATH);
final SingleTaskLauncher launcher =
new SingleTaskLauncher(processBuilder, propertiesWrapper, clusterConfPath, ConfigFactory.empty());
final Path workUnitPath = Paths.get(WORK_UNIT_PATH);
final Process process = launcher.launch(JOB_ID, workUnitPath);
final List<String> expectedInput = new ArrayList<>(Arrays
.asList("/javahome/bin/java", "-cp", TEST_CLASS_PATH,
"org.apache.gobblin.cluster.SingleTaskRunnerMain", "--cluster_config_file_path",
CLUSTER_CONFIG_CONF_PATH, "--job_id", JOB_ID, "--work_unit_file_path", WORK_UNIT_PATH));
verify(processBuilder).start(expectedInput);
assertThat(process).isEqualTo(mockProcess);
}
}
| 949 |
1,223 | package carpet.mixins;
import carpet.CarpetSettings;
import net.minecraft.network.packet.c2s.play.PlayerActionC2SPacket;
import net.minecraft.server.network.ServerPlayerEntity;
import net.minecraft.server.network.ServerPlayerInteractionManager;
import net.minecraft.util.math.BlockPos;
import net.minecraft.util.math.Direction;
import net.minecraft.util.math.Vec3d;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Redirect;
@Mixin(value = ServerPlayerInteractionManager.class, priority = 69420) // not that important for carpet
public class ServerPlayerInteractionManager_antiCheatMixin
{
/*
@ModifyConstant(method = "processBlockBreakingAction", require = 0,
constant = @Constant(doubleValue = 36D))
private double addDistance(double original) {
if (CarpetSettings.antiCheatDisabled)
return 1024D; // blocks 32 distance
return original;
}
*/
// that shoudn't've been a constant at the first place
// resolves problems with mobs using reach entity attributes.
@Redirect(method = "processBlockBreakingAction", at = @At(
value = "INVOKE",
target = "Lnet/minecraft/server/network/ServerPlayerEntity;getX()D"
))
private double getXX(ServerPlayerEntity player,
BlockPos pos, PlayerActionC2SPacket.Action action, Direction direction, int worldHeight)
{
if (CarpetSettings.antiCheatDisabled &&
player.getPos().add(0, 1.5, 0).squaredDistanceTo(Vec3d.ofCenter(pos)) < 1024
) return pos.getX()+0.5;
return player.getX();
}
@Redirect(method = "processBlockBreakingAction", at = @At(
value = "INVOKE",
target = "Lnet/minecraft/server/network/ServerPlayerEntity;getY()D"
))
private double getYY(ServerPlayerEntity player,
BlockPos pos, PlayerActionC2SPacket.Action action, Direction direction, int worldHeight)
{
if (CarpetSettings.antiCheatDisabled &&
player.getPos().add(0, 1.5, 0).squaredDistanceTo(Vec3d.ofCenter(pos)) < 1024
) return pos.getY()-1.0;
return player.getY();
}
@Redirect(method = "processBlockBreakingAction", at = @At(
value = "INVOKE",
target = "Lnet/minecraft/server/network/ServerPlayerEntity;getZ()D"
))
private double getZZ(ServerPlayerEntity player,
BlockPos pos, PlayerActionC2SPacket.Action action, Direction direction, int worldHeight)
{
if (CarpetSettings.antiCheatDisabled &&
player.getPos().add(0, 1.5, 0).squaredDistanceTo(Vec3d.ofCenter(pos)) < 1024
) return pos.getZ()+0.5;
return player.getZ();
}
}
| 1,131 |
2,062 | <reponame>TheVinhLuong102/Strawberry
from .base import BaseSchema as BaseSchema
from .schema import Schema as Schema
__all__ = ["BaseSchema", "Schema"]
| 55 |
360 | <gh_stars>100-1000
/* *
Copyright (c) 2021 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
---------------------------------------------------------------------------------------
distance_functions.cpp
Current set of distance functions that can be used (for k-means for example)
IDENTIFICATION
src/gausskernel/dbmind/db4ai/executor/distance_functions.cpp
---------------------------------------------------------------------------------------
* */
#include "postgres.h"
#include "db4ai/distance_functions.h"
#include "db4ai/fp_ops.h"
#include "db4ai/db4ai_cpu.h"
#include <cmath>
#if defined(__x86_64__) && defined(__SSE3__)
#include <pmmintrin.h>
#elif defined(__aarch64__) && defined(__ARM_NEON)
#include <arm_neon.h>
#endif
/*
* L1 distance (Manhattan)
* We sum using cascaded summation
* This version is unvectorized and is used in case vectorized instructions
* are not available or for the the case that the dimension is not a multiple
* of the width of the registers
*/
static force_inline double l1_non_vectorized(double const * p, double const * q, uint32_t const dimension)
{
double term = 0.;
double term_correction = 0.;
double distance = 0.;
double distance_correction = 0.;
twoDiff(q[0], p[0], &term, &term_correction);
term += term_correction;
// absolute value of the difference (hopefully done by clearing the sign bit)
distance = std::abs(term);
for (uint32_t d = 1; d < dimension; ++d) {
twoDiff(q[d], p[d], &term, &term_correction);
term += term_correction;
term = std::abs(term);
twoSum(distance, term, &distance, &term_correction);
distance_correction += term_correction;
}
return distance + distance_correction;
}
#if (defined(__x86_64__) && defined(__SSE3__)) || (defined(__aarch64__) && defined(__ARM_NEON))
/*
* L1 distance (Manhattan)
* This version is vectorized using SSE or NEON and is used in case only 128-bit
* vectorized instructions are available
*/
static double l1_128(double const * p, double const * q, uint32_t const dimension)
{
if (unlikely(dimension == 0))
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("L1 distance (128-bit): dimension must be larger than 0")));
double distance[2] = {0.};
/* the result of dimension modulo 2 */
uint32_t const dimension_remainder = dimension & 0x1U;
uint32_t const offset = 2U;
double distance_first_terms = 0.;
double local_distance_correction = 0.;
double global_distance_correction = 0.;
/*
* this will compute the very first terms of the distance
* (the ones that cannot be fully computed using simd registers, and
* thus have to be done with scalar computations)
*/
if (dimension_remainder > 0) {
/*
* this is gonna compute the 1-dimensional distance (the very first
* term in the whole distance computation)
*/
distance_first_terms = l1_non_vectorized(p, q, dimension_remainder);
}
/*
* if the dimension is < 2 then the term above is the whole distance
* otherwise we have at least one simd register we can fill
*/
if (unlikely(dimension < offset))
return distance_first_terms;
#if defined(__x86_64__)
__m128d const zero = _mm_setzero_pd();
__m128d const sign_mask = _mm_set1_pd(-0.0f);
__m128d sum = zero;
__m128d absolute_value;
__m128d sub;
__m128d p128, q128;
#else // aarch64
float64x2_t const zero = vdupq_n_f64(0);
float64x2_t sum = zero;
float64x2_t absolute_value;
float64x2_t sub;
float64x2_t p128, q128;
#endif
Assert(((dimension - dimension_remainder) & 0x1U) == 0U);
for (uint32_t d = dimension_remainder; d < dimension; d += offset) {
#if defined(__x86_64__)
p128 = _mm_loadu_pd(p + d);
q128 = _mm_loadu_pd(q + d);
sub = _mm_sub_pd(p128, q128);
/* this clears out the sign bit of sub (thus computing its absolute value) */
absolute_value = _mm_andnot_pd(sign_mask, sub);
sum = _mm_add_pd(sum, absolute_value);
#else // aarch64
p128 = vld1q_f64(p + d);
q128 = vld1q_f64(q + d);
sub = vsubq_f64(p128, q128);
/* this clears out the sign bit of sub - hopefully (thus computing its absolute value */
absolute_value = vabsq_f64(sub);
sum = vaddq_f64(sum, absolute_value);
#endif
}
/*
* in here we end up having a register with two terms that need to be added up to produce
* the final answer, we first perform an horizontal add to reduce two to one term
*/
#if defined(__x86_64__)
sum = _mm_hadd_pd(sum, zero);
#else // aarch64
sum = vpaddq_f64(sum, zero);
#endif
/*
* we extract the remaining term [x,0] to produce the final solution
*/
#if defined(__x86_64__)
_mm_storeu_pd(distance, sum);
#else // aarch64
vst1q_f64(distance, sum);
#endif
if (dimension_remainder > 0) {
/* d[0] = d[0] + distance_first_terms */
twoSum(distance[0], distance_first_terms, distance, &local_distance_correction);
global_distance_correction += local_distance_correction;
}
return distance[0] + global_distance_correction;
}
#endif
/*
* Squared Euclidean (default)
* We sum using cascaded summation
* This version is unvectorized and is used in case vectorized instructions
* are not available or for the the case that the dimension is not a multiple
* of the width of the registers
*/
static force_inline double l2_squared_non_vectorized(double const * p, double const * q, uint32_t const dimension)
{
double subtraction = 0.;
double subtraction_correction = 0.;
double term = 0.;
double term_correction = 0.;
double distance = 0.;
double distance_correction = 0.;
twoDiff(q[0], p[0], &subtraction, &subtraction_correction);
subtraction += subtraction_correction;
square(subtraction, &term, &term_correction);
term += term_correction;
distance = term;
for (uint32_t d = 1; d < dimension; ++d) {
twoDiff(q[d], p[d], &subtraction, &subtraction_correction);
subtraction += subtraction_correction;
square(subtraction, &term, &term_correction);
term += term_correction;
twoSum(distance, term, &distance, &term_correction);
distance_correction += term_correction;
}
return distance + distance_correction;
}
#if (defined(__x86_64__) && defined(__SSE3__)) || (defined(__aarch64__) && defined(__ARM_NEON))
/*
* Squared Euclidean (default)
* This version is vectorized using SSE or NEON and is used in case only 128-bit
* vectorized instructions are available
*/
static double l2_squared_128(double const * p, double const * q, uint32_t const dimension)
{
if (unlikely(dimension == 0))
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("L2 squared distance (128-bit): dimension must be larger than 0")));
double distance[2] = {0.};
/* the result of dimension modulo 2 */
uint32_t const dimension_remainder = dimension & 0x1U;
uint32_t const offset = 2U;
double distance_first_terms = 0.;
double local_distance_correction = 0.;
double global_distance_correction = 0.;
/*
* this will compute the very first terms of the distance
* (the ones that cannot be fully computed using simd registers, and
* thus have to be done with scalar computations)
*/
if (dimension_remainder > 0) {
/*
* this is gonna compute the 1-dimensional distance (the very first
* term in the whole distance computation)
*/
distance_first_terms = l2_squared_non_vectorized(p, q, dimension_remainder);
}
/*
* if the dimension is < 2 then the term above is the whole distance
* otherwise we have at least one simd register we can fill
*/
if (unlikely(dimension < offset))
return distance_first_terms;
#if defined(__x86_64__)
__m128d const zero = _mm_setzero_pd();
__m128d sum = zero;
__m128d square;
__m128d sub;
__m128d p128, q128;
#else // aarch64
float64x2_t const zero = vdupq_n_f64(0);
float64x2_t sum = zero;
float64x2_t square;
float64x2_t sub;
float64x2_t p128, q128;
#endif
Assert(((dimension - dimension_remainder) & 0x1U) == 0U);
for (uint32_t d = dimension_remainder; d < dimension; d += offset) {
#if defined(__x86_64__)
p128 = _mm_loadu_pd(p + d);
q128 = _mm_loadu_pd(q + d);
sub = _mm_sub_pd(p128, q128);
square = _mm_mul_pd(sub, sub);
sum = _mm_add_pd(sum, square);
#else // aarch64
p128 = vld1q_f64(p + d);
q128 = vld1q_f64(q + d);
sub = vsubq_f64(p128, q128);
square = vmulq_f64(sub, sub);
sum = vaddq_f64(sum, square);
#endif
}
/*
* in here we end up having a register with two terms that need to be added up to produce
* the final answer, we first perform an horizontal add to reduce two to one term
*/
#if defined(__x86_64__)
sum = _mm_hadd_pd(sum, zero);
#else // aarch64
sum = vpaddq_f64(sum, zero);
#endif
/*
* we extract the remaining term [x,0] to produce the final solution
*/
#if defined(__x86_64__)
_mm_storeu_pd(distance, sum);
#else // aarch64
vst1q_f64(distance, sum);
#endif
if (dimension_remainder > 0) {
/* d[0] = d[0] + distance_first_terms */
twoSum(distance[0], distance_first_terms, distance, &local_distance_correction);
global_distance_correction += local_distance_correction;
}
return distance[0] + global_distance_correction;
}
#endif
/*
* L infinity distance (Chebyshev)
* This version is unvectorized and is used in case vectorized instructions
* are not available or for the the case that the dimension is not a multiple
* of the width of the registers
*/
static force_inline double linf_non_vectorized(double const * p, double const * q, uint32_t const dimension)
{
double distance = 0.;
double term = 0.;
double term_correction = 0.;
twoDiff(q[0], p[0], &term, &term_correction);
term += term_correction;
// absolute value of the difference (hopefully done by clearing the sign bit)
distance = std::abs(term);
for (uint32_t d = 1; d < dimension; ++d) {
twoDiff(q[d], p[d], &term, &term_correction);
term += term_correction;
term = std::abs(term);
distance = term > distance ? term : distance;
}
return distance;
}
#if (defined(__x86_64__) && defined(__SSE3__)) || (defined(__aarch64__) && defined(__ARM_NEON))
/*
* L infinity distance (Chebyshev)
* This version is vectorized using SSE or NEON and is used in case only 128-bit
* vectorized instructions are available
*/
static double linf_128(double const * p, double const * q, uint32_t const dimension)
{
if (unlikely(dimension == 0))
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("L infinity distance (128-bit): dimension must be larger than 0")));
double distance[2] = {0.};
/* the result of dimension modulo 2 */
uint32_t const dimension_remainder = dimension & 0x1U;
uint32_t const offset = 2U;
double distance_first_terms = 0.;
/*
* this will compute the very first terms of the distance
* (the ones that cannot be fully computed using simd registers)
*/
if (dimension_remainder > 0)
distance_first_terms = linf_non_vectorized(p, q, dimension_remainder);
/*
* if the dimension is < 4 then the term above is the whole distance
* otherwise we have at least one simd register we can fill
*/
if (unlikely(dimension < offset))
return distance_first_terms;
#if defined(__x86_64__)
__m128d const zero = _mm_setzero_pd();
__m128d const sign_mask = _mm_set1_pd(-0.0f);
__m128d max = zero;
__m128d absolute_value;
__m128d sub;
__m128d p128, q128;
#else // aarch64
float64x2_t const zero = vdupq_n_f64(0);
float64x2_t max = zero;
float64x2_t absolute_value;
float64x2_t sub;
float64x2_t p128, q128;
#endif
Assert(((dimension - dimension_remainder) & 0x1U) == 0U);
for (uint32_t d = dimension_remainder; d < dimension; d += offset) {
#if defined(__x86_64__)
p128 = _mm_loadu_pd(p + d);
q128 = _mm_loadu_pd(q + d);
sub = _mm_sub_pd(p128, q128);
/* this clears out the sign bit of sub (thus computing its absolute value */
absolute_value = _mm_andnot_pd(sign_mask, sub);
max = _mm_max_pd(max, absolute_value);
#else // aarch64
p128 = vld1q_f64(p + d);
q128 = vld1q_f64(q + d);
sub = vsubq_f64(p128, q128);
/* this clears out the sign bit of sub - hopefully (thus computing its absolute value */
absolute_value = vabsq_f64(sub);
max = vmaxq_f64(max, absolute_value);
#endif
}
/*
* in here we end up having a register with two terms, from which we extract the max
* to produce the final answer
*/
#if defined(__x86_64__)
_mm_storeu_pd(distance, max);
#else // aarch64
vst1q_f64(distance, max);
#endif
double result = distance_first_terms;
for (uint32_t m = 0; m < offset; ++m)
result = distance[m] > result ? distance[m] : result;
return result;
}
#endif
/*
* L1 distance (Manhattan)
* This is the main function. It will be automatically vectorized
* if possible
*/
double l1(double const * p, double const * q, uint32_t const dimension)
{
if (unlikely(dimension == 0))
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("L1 distance: dimension must be larger than 0")));
/*
* depending on the feature of the underlying processor we vectorized one way
* or another. in the worst case we do not vectorized at all
*/
#if (defined(__x86_64__) && defined(__SSE3__)) || (defined(__aarch64__) && defined(__ARM_NEON))
return l1_128(p, q, dimension);
#else
return l1_non_vectorized(p, q, dimension);
#endif
}
/*
* Squared Euclidean (default)
* This is the main function. It will be automatically vectorized
* if possible
*/
double l2_squared(double const * p, double const * q, uint32_t const dimension)
{
if (unlikely(dimension == 0))
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("L2 squared distance: dimension must be larger than 0")));
/*
* depending on the feature of the underlying processor we vectorized one way
* or another. in the worst case we do not vectorized at all
*/
#if (defined(__x86_64__) && defined(__SSE3__)) || (defined(__aarch64__) && defined(__ARM_NEON))
return l2_squared_128(p, q, dimension);
#else
return l2_squared_non_vectorized(p, q, dimension);
#endif
}
/*
* L2 distance (Euclidean)
* This is the main function. It will be automatically vectorized
* if possible
*/
double l2(double const * p, double const * q, uint32_t const dimension)
{
if (unlikely(dimension == 0))
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("L2 distance: dimension must be larger than 0")));
/*
* this one is vectorized automatically (or not)
*/
double const l2_sq = l2_squared(p, q, dimension);
// we can replace this with exact computation via mpfr (more costly, but the best alternative
// for fixed precision)
return std::sqrt(l2_sq);
}
/*
* L infinity distance (Chebyshev)
* This is the main function. It will be automatically vectorized
* if possible
*/
double linf(double const * p, double const * q, uint32_t const dimension)
{
if (unlikely(dimension == 0))
ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("L infinity distance: dimension must be larger than 0")));
/*
* depending on the feature of the underlying processor we vectorized one way
* or another. in the worst case we do not vectorized at all
*/
#if (defined(__x86_64__) && defined(__SSE3__)) || (defined(__aarch64__) && defined(__ARM_NEON))
return linf_128(p, q, dimension);
#else
return linf_non_vectorized(p, q, dimension);
#endif
}
| 6,449 |
1,066 | package org.gradle.profiler.client.protocol;
public class SyncStarted extends Message {
private final int id;
public SyncStarted(int id) {
this.id = id;
}
@Override
public String toString() {
return "sync started " + id;
}
public int getId() {
return id;
}
}
| 131 |
1,405 | package com.lenovo.safecenter.utils.updateLab;
import android.content.Context;
import android.content.Intent;
import android.util.Log;
import com.lenovo.safecenter.utils.Const;
import com.lenovo.safecenter.utils.LeSafeObservable;
import com.lenovo.safecenter.utils.WflUtils;
import com.lenovo.safecenter.utils.httpApi.LeSafeAPI;
public class UpdateLabManager {
public static final String ACTION_NOTICE_QUERY_NETBLACK_LAB = "com.lenovo.antispam.netblackquery";
public static final String ACTION_NOTICE_QUERY_SIGNCALL_LAB = "com.lenovo.antispam.signcallquery";
public static final String ACTION_NOTICE_QUERY_VIRUS_LAB = "com.lenovo.antivirus.query";
public static final String ACTION_NOTICE_UPDATE_NETBLACK_LAB = "com.lenovo.antispam.netblackupdate";
public static final String ACTION_NOTICE_UPDATE_SIGNCALL_LAB = "com.lenovo.antispam.signcallupdate";
public static final String ACTION_NOTICE_UPDATE_SYS_LAB = "com.lenovo.antispam.sysupdate";
public static final String ACTION_NOTICE_UPDATE_VIRUS_LAB = "com.lenovo.antivirus.update";
public static final int UPDATE_STATUS_NET_ERROR = 2;
public static final int UPDATE_STATUS_NEWEST = 1;
public static final int UPDATE_STATUS_SUCCESS = 0;
private static boolean[] a = new boolean[4];
private static boolean[] b = new boolean[4];
private static int[] c = new int[4];
private static int[] d = new int[4];
public static boolean isManualUpdateLab = false;
public static int getUpdateLabStatus(int index) {
return d[index];
}
public static void setUpdateLabStatus(int index, int state) {
d[index] = state;
}
public static int getQueryLabStatus(int index) {
return c[index];
}
public static void setQueryLabStatus(int index, int state) {
c[index] = state;
}
public static boolean getIs_QueryLabing(int index) {
return b[index];
}
public static void setIs_QueryLabing(int index, boolean isQuerying) {
b[index] = isQuerying;
}
public static boolean getIs_UpdateLabing(int index) {
return a[index];
}
public static void setIs_UpdateLabing(int index, boolean isUpdating) {
a[index] = isUpdating;
}
public static void startServiceUpdateLab(Context context) {
if (getQueryLabStatus(0) != 1 && !getIs_UpdateLabing(0)) {
Log.i("wu0wu", "startServiceUpdateVirusLab-->");
d[0] = -1;
Intent intent = new Intent();
intent.setAction(ACTION_NOTICE_UPDATE_VIRUS_LAB);
context.startService(intent);
setIs_UpdateLabing(0, true);
LeSafeObservable.get(context).noticeUpdatingLab();
}
if (getQueryLabStatus(1) != 1 && !getIs_UpdateLabing(1)) {
Log.i("wu0wu", "startServiceUpdateNetBlackLab-->");
setUpdateLabStatus(1, -1);
Intent intent2 = new Intent();
intent2.setAction(ACTION_NOTICE_UPDATE_NETBLACK_LAB);
context.startService(intent2);
setIs_UpdateLabing(1, true);
LeSafeObservable.get(context).noticeUpdatingLab();
}
if (getQueryLabStatus(2) != 1 && !getIs_UpdateLabing(2)) {
Log.i("wu0wu", "startServiceUpdateSysLab-->");
setUpdateLabStatus(2, -1);
Intent intent3 = new Intent();
intent3.setAction(ACTION_NOTICE_UPDATE_SYS_LAB);
context.startService(intent3);
setIs_UpdateLabing(2, true);
LeSafeObservable.get(context).noticeUpdatingLab();
}
if (getQueryLabStatus(3) != 1 && !getIs_UpdateLabing(3)) {
Log.i("wu0wu", "startServiceUpdateSignCallLab-->");
setUpdateLabStatus(3, -1);
Intent intent4 = new Intent();
intent4.setAction(ACTION_NOTICE_UPDATE_SIGNCALL_LAB);
context.startService(intent4);
setIs_UpdateLabing(3, true);
LeSafeObservable.get(context).noticeUpdatingLab();
}
}
public static void useWifiUpdateVirusLab(Context context) {
if (AutoUpdateLabManager.isAutoUpdate(context) && AutoUpdateLabManager.getAutoUpdateMode(context) == 0) {
Log.i("wu0wu", "WifiConnectedReceiver is_auto_update=true");
if (!WflUtils.isInTheSameDay(Const.getLastUpdateLabTime(), System.currentTimeMillis())) {
Log.i("wu0wu", "WifiConnectedReceiver isInTheSameDay=false");
if (WflUtils.isWifiNetwork(context)) {
Log.i("wu0wu", "WifiConnectedReceiver isWifiNetwork=true");
startServiceUpdateLab(context);
return;
}
return;
}
Log.i("wu0wu", "WifiConnectedReceiver isInTheSameDay=true");
}
}
public static void startServiceQueryLab(Context context) {
LeSafeAPI.getAppSync(context);
if (!hasNewVersionLab() && !isAllNewVersionLab() && !WflUtils.isInTheSameDay(Const.getDialogNoticeUpdateTime(), System.currentTimeMillis()) && !WflUtils.isInTheSameDay(Const.getLastUpdateLabTime(), System.currentTimeMillis())) {
if (!getIs_QueryLabing(0)) {
Log.i("wu0wu", "startServiceQueryVirusLab-->");
c[0] = -1;
Intent intent = new Intent();
intent.setAction(ACTION_NOTICE_QUERY_VIRUS_LAB);
context.startService(intent);
setIs_QueryLabing(0, true);
}
if (!getIs_QueryLabing(1)) {
Log.i("wu0wu", "startServiceQueryNetBlackLab-->");
setQueryLabStatus(1, -1);
Intent intent2 = new Intent();
intent2.setAction(ACTION_NOTICE_QUERY_NETBLACK_LAB);
context.startService(intent2);
setIs_QueryLabing(1, true);
}
if (!getIs_QueryLabing(2)) {
Log.i("wu0wu", "startServiceQuerySysLab-->");
setQueryLabStatus(2, -1);
Intent intent3 = new Intent();
intent3.setAction(Const.ACTION_NOTICE_QUERY_SYS_LAB);
context.startService(intent3);
setIs_QueryLabing(2, true);
}
if (!getIs_QueryLabing(3)) {
Log.i("wu0wu", "startServiceQuerySignCallLab-->");
setQueryLabStatus(3, -1);
Intent intent4 = new Intent();
intent4.setAction(ACTION_NOTICE_QUERY_SIGNCALL_LAB);
context.startService(intent4);
setIs_QueryLabing(3, true);
}
}
}
public static boolean isQueryedLab() {
return !b[0] && !b[1] && !b[2] && !b[3];
}
public static boolean isUpdateingLab() {
return a[0] || a[1] || a[2] || a[3];
}
public static void setQueryLabStatus() {
if (d[0] != 2) {
c[0] = -1;
}
if (d[1] != 2) {
c[1] = -1;
}
if (d[2] != 2) {
c[2] = -1;
}
if (d[3] != 2) {
c[3] = -1;
}
}
public static boolean hasNewVersionLab() {
return c[0] == 0 || c[1] == 0 || c[2] == 0 || c[3] == 0;
}
public static boolean hasUpdatedSuccessLab() {
return d[0] == 0 || d[1] == 0 || d[2] == 0 || d[3] == 0;
}
public static void initQueryLabStatus() {
c[0] = -1;
c[1] = -1;
c[2] = -1;
c[3] = -1;
}
public static boolean isAllNewVersionLab() {
return c[0] == 1 && c[1] == 1 && c[2] == 1 && c[3] == 1;
}
}
| 3,560 |
523 | <gh_stars>100-1000
#pragma once
namespace BWAPI
{
/** Used for converting between TilePosition coordinates and Position coordinates. */
#define TILE_SIZE 32
#define PYLON_X_RADIUS 8
#define PYLON_Y_RADIUS 5
}
| 85 |
305 | //===--- ExceptionAnalyzer.cpp - clang-tidy -------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ExceptionAnalyzer.h"
namespace clang {
namespace tidy {
namespace utils {
void ExceptionAnalyzer::ExceptionInfo::registerException(
const Type *ExceptionType) {
assert(ExceptionType != nullptr && "Only valid types are accepted");
Behaviour = State::Throwing;
ThrownExceptions.insert(ExceptionType);
}
void ExceptionAnalyzer::ExceptionInfo::registerExceptions(
const Throwables &Exceptions) {
if (Exceptions.size() == 0)
return;
Behaviour = State::Throwing;
ThrownExceptions.insert(Exceptions.begin(), Exceptions.end());
}
ExceptionAnalyzer::ExceptionInfo &ExceptionAnalyzer::ExceptionInfo::merge(
const ExceptionAnalyzer::ExceptionInfo &Other) {
// Only the following two cases require an update to the local
// 'Behaviour'. If the local entity is already throwing there will be no
// change and if the other entity is throwing the merged entity will throw
// as well.
// If one of both entities is 'Unknown' and the other one does not throw
// the merged entity is 'Unknown' as well.
if (Other.Behaviour == State::Throwing)
Behaviour = State::Throwing;
else if (Other.Behaviour == State::Unknown && Behaviour == State::NotThrowing)
Behaviour = State::Unknown;
ContainsUnknown = ContainsUnknown || Other.ContainsUnknown;
ThrownExceptions.insert(Other.ThrownExceptions.begin(),
Other.ThrownExceptions.end());
return *this;
}
static bool isBaseOf(const Type *DerivedType, const Type *BaseType) {
const auto *DerivedClass = DerivedType->getAsCXXRecordDecl();
const auto *BaseClass = BaseType->getAsCXXRecordDecl();
if (!DerivedClass || !BaseClass)
return false;
return !DerivedClass->forallBases(
[BaseClass](const CXXRecordDecl *Cur) { return Cur != BaseClass; });
}
bool ExceptionAnalyzer::ExceptionInfo::filterByCatch(const Type *BaseClass) {
llvm::SmallVector<const Type *, 8> TypesToDelete;
for (const Type *T : ThrownExceptions) {
if (T == BaseClass || isBaseOf(T, BaseClass))
TypesToDelete.push_back(T);
}
for (const Type *T : TypesToDelete)
ThrownExceptions.erase(T);
reevaluateBehaviour();
return TypesToDelete.size() > 0;
}
ExceptionAnalyzer::ExceptionInfo &
ExceptionAnalyzer::ExceptionInfo::filterIgnoredExceptions(
const llvm::StringSet<> &IgnoredTypes, bool IgnoreBadAlloc) {
llvm::SmallVector<const Type *, 8> TypesToDelete;
// Note: Using a 'SmallSet' with 'llvm::remove_if()' is not possible.
// Therefore this slightly hacky implementation is required.
for (const Type *T : ThrownExceptions) {
if (const auto *TD = T->getAsTagDecl()) {
if (TD->getDeclName().isIdentifier()) {
if ((IgnoreBadAlloc &&
(TD->getName() == "bad_alloc" && TD->isInStdNamespace())) ||
(IgnoredTypes.count(TD->getName()) > 0))
TypesToDelete.push_back(T);
}
}
}
for (const Type *T : TypesToDelete)
ThrownExceptions.erase(T);
reevaluateBehaviour();
return *this;
}
void ExceptionAnalyzer::ExceptionInfo::clear() {
Behaviour = State::NotThrowing;
ContainsUnknown = false;
ThrownExceptions.clear();
}
void ExceptionAnalyzer::ExceptionInfo::reevaluateBehaviour() {
if (ThrownExceptions.size() == 0)
if (ContainsUnknown)
Behaviour = State::Unknown;
else
Behaviour = State::NotThrowing;
else
Behaviour = State::Throwing;
}
ExceptionAnalyzer::ExceptionInfo ExceptionAnalyzer::throwsException(
const FunctionDecl *Func,
llvm::SmallSet<const FunctionDecl *, 32> &CallStack) {
if (CallStack.count(Func))
return ExceptionInfo::createNonThrowing();
if (const Stmt *Body = Func->getBody()) {
CallStack.insert(Func);
ExceptionInfo Result =
throwsException(Body, ExceptionInfo::Throwables(), CallStack);
CallStack.erase(Func);
return Result;
}
auto Result = ExceptionInfo::createUnknown();
if (const auto *FPT = Func->getType()->getAs<FunctionProtoType>()) {
for (const QualType &Ex : FPT->exceptions())
Result.registerException(Ex.getTypePtr());
}
return Result;
}
/// Analyzes a single statement on it's throwing behaviour. This is in principle
/// possible except some 'Unknown' functions are called.
ExceptionAnalyzer::ExceptionInfo ExceptionAnalyzer::throwsException(
const Stmt *St, const ExceptionInfo::Throwables &Caught,
llvm::SmallSet<const FunctionDecl *, 32> &CallStack) {
auto Results = ExceptionInfo::createNonThrowing();
if (!St)
return Results;
if (const auto *Throw = dyn_cast<CXXThrowExpr>(St)) {
if (const auto *ThrownExpr = Throw->getSubExpr()) {
const auto *ThrownType =
ThrownExpr->getType()->getUnqualifiedDesugaredType();
if (ThrownType->isReferenceType())
ThrownType = ThrownType->castAs<ReferenceType>()
->getPointeeType()
->getUnqualifiedDesugaredType();
Results.registerException(
ThrownExpr->getType()->getUnqualifiedDesugaredType());
} else
// A rethrow of a caught exception happens which makes it possible
// to throw all exception that are caught in the 'catch' clause of
// the parent try-catch block.
Results.registerExceptions(Caught);
} else if (const auto *Try = dyn_cast<CXXTryStmt>(St)) {
ExceptionInfo Uncaught =
throwsException(Try->getTryBlock(), Caught, CallStack);
for (unsigned i = 0; i < Try->getNumHandlers(); ++i) {
const CXXCatchStmt *Catch = Try->getHandler(i);
// Everything is catched through 'catch(...)'.
if (!Catch->getExceptionDecl()) {
ExceptionInfo Rethrown = throwsException(
Catch->getHandlerBlock(), Uncaught.getExceptionTypes(), CallStack);
Results.merge(Rethrown);
Uncaught.clear();
} else {
const auto *CaughtType =
Catch->getCaughtType()->getUnqualifiedDesugaredType();
if (CaughtType->isReferenceType()) {
CaughtType = CaughtType->castAs<ReferenceType>()
->getPointeeType()
->getUnqualifiedDesugaredType();
}
// If the caught exception will catch multiple previously potential
// thrown types (because it's sensitive to inheritance) the throwing
// situation changes. First of all filter the exception types and
// analyze if the baseclass-exception is rethrown.
if (Uncaught.filterByCatch(CaughtType)) {
ExceptionInfo::Throwables CaughtExceptions;
CaughtExceptions.insert(CaughtType);
ExceptionInfo Rethrown = throwsException(Catch->getHandlerBlock(),
CaughtExceptions, CallStack);
Results.merge(Rethrown);
}
}
}
Results.merge(Uncaught);
} else if (const auto *Call = dyn_cast<CallExpr>(St)) {
if (const FunctionDecl *Func = Call->getDirectCallee()) {
ExceptionInfo Excs = throwsException(Func, CallStack);
Results.merge(Excs);
}
} else {
for (const Stmt *Child : St->children()) {
ExceptionInfo Excs = throwsException(Child, Caught, CallStack);
Results.merge(Excs);
}
}
return Results;
}
ExceptionAnalyzer::ExceptionInfo
ExceptionAnalyzer::analyzeImpl(const FunctionDecl *Func) {
ExceptionInfo ExceptionList;
// Check if the function has already been analyzed and reuse that result.
if (FunctionCache.count(Func) == 0) {
llvm::SmallSet<const FunctionDecl *, 32> CallStack;
ExceptionList = throwsException(Func, CallStack);
// Cache the result of the analysis. This is done prior to filtering
// because it is best to keep as much information as possible.
// The results here might be relevant to different analysis passes
// with different needs as well.
FunctionCache.insert(std::make_pair(Func, ExceptionList));
} else
ExceptionList = FunctionCache[Func];
return ExceptionList;
}
ExceptionAnalyzer::ExceptionInfo
ExceptionAnalyzer::analyzeImpl(const Stmt *Stmt) {
llvm::SmallSet<const FunctionDecl *, 32> CallStack;
return throwsException(Stmt, ExceptionInfo::Throwables(), CallStack);
}
template <typename T>
ExceptionAnalyzer::ExceptionInfo
ExceptionAnalyzer::analyzeDispatch(const T *Node) {
ExceptionInfo ExceptionList = analyzeImpl(Node);
if (ExceptionList.getBehaviour() == State::NotThrowing ||
ExceptionList.getBehaviour() == State::Unknown)
return ExceptionList;
// Remove all ignored exceptions from the list of exceptions that can be
// thrown.
ExceptionList.filterIgnoredExceptions(IgnoredExceptions, IgnoreBadAlloc);
return ExceptionList;
}
ExceptionAnalyzer::ExceptionInfo
ExceptionAnalyzer::analyze(const FunctionDecl *Func) {
return analyzeDispatch(Func);
}
ExceptionAnalyzer::ExceptionInfo
ExceptionAnalyzer::analyze(const Stmt *Stmt) {
return analyzeDispatch(Stmt);
}
} // namespace utils
} // namespace tidy
} // namespace clang
| 3,197 |
1,585 | <reponame>j-xiong/ompi
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2009 University of Houston. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All Rights
* reserved.
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018 Siberian State University of Telecommunications
* and Information Science. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "mpi.h"
#include "opal/util/bit_ops.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/op/op.h"
#include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_base_topo.h"
#include "coll_base_util.h"
/*
* ompi_coll_base_allreduce_intra_nonoverlapping
*
* This function just calls a reduce followed by a broadcast
* both called functions are base but they complete sequentially,
* i.e. no additional overlapping
* meaning if the number of segments used is greater than the topo depth
* then once the first segment of data is fully 'reduced' it is not broadcast
* while the reduce continues (cost = cost-reduce + cost-bcast + decision x 3)
*
*/
int
ompi_coll_base_allreduce_intra_nonoverlapping(const void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int err, rank;
rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:allreduce_intra_nonoverlapping rank %d", rank));
/* Reduce to 0 and broadcast. */
if (MPI_IN_PLACE == sbuf) {
if (0 == rank) {
err = comm->c_coll->coll_reduce (MPI_IN_PLACE, rbuf, count, dtype,
op, 0, comm, comm->c_coll->coll_reduce_module);
} else {
err = comm->c_coll->coll_reduce (rbuf, NULL, count, dtype, op, 0,
comm, comm->c_coll->coll_reduce_module);
}
} else {
err = comm->c_coll->coll_reduce (sbuf, rbuf, count, dtype, op, 0,
comm, comm->c_coll->coll_reduce_module);
}
if (MPI_SUCCESS != err) {
return err;
}
return comm->c_coll->coll_bcast (rbuf, count, dtype, 0, comm,
comm->c_coll->coll_bcast_module);
}
/*
* ompi_coll_base_allreduce_intra_recursivedoubling
*
* Function: Recursive doubling algorithm for allreduce operation
* Accepts: Same as MPI_Allreduce()
* Returns: MPI_SUCCESS or error code
*
* Description: Implements recursive doubling algorithm for allreduce.
* Original (non-segmented) implementation is used in MPICH-2
* for small and intermediate size messages.
* The algorithm preserves order of operations so it can
* be used both by commutative and non-commutative operations.
*
* Example on 7 nodes:
* Initial state
* # 0 1 2 3 4 5 6
* [0] [1] [2] [3] [4] [5] [6]
* Initial adjustment step for non-power of two nodes.
* old rank 1 3 5 6
* new rank 0 1 2 3
* [0+1] [2+3] [4+5] [6]
* Step 1
* old rank 1 3 5 6
* new rank 0 1 2 3
* [0+1+] [0+1+] [4+5+] [4+5+]
* [2+3+] [2+3+] [6 ] [6 ]
* Step 2
* old rank 1 3 5 6
* new rank 0 1 2 3
* [0+1+] [0+1+] [0+1+] [0+1+]
* [2+3+] [2+3+] [2+3+] [2+3+]
* [4+5+] [4+5+] [4+5+] [4+5+]
* [6 ] [6 ] [6 ] [6 ]
* Final adjustment step for non-power of two nodes
* # 0 1 2 3 4 5 6
* [0+1+] [0+1+] [0+1+] [0+1+] [0+1+] [0+1+] [0+1+]
* [2+3+] [2+3+] [2+3+] [2+3+] [2+3+] [2+3+] [2+3+]
* [4+5+] [4+5+] [4+5+] [4+5+] [4+5+] [4+5+] [4+5+]
* [6 ] [6 ] [6 ] [6 ] [6 ] [6 ] [6 ]
*
*/
int
ompi_coll_base_allreduce_intra_recursivedoubling(const void *sbuf, void *rbuf,
int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int ret, line, rank, size, adjsize, remote, distance;
int newrank, newremote, extra_ranks;
char *tmpsend = NULL, *tmprecv = NULL, *tmpswap = NULL, *inplacebuf_free = NULL, *inplacebuf;
ptrdiff_t span, gap = 0;
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:base:allreduce_intra_recursivedoubling rank %d", rank));
/* Special case for size == 1 */
if (1 == size) {
if (MPI_IN_PLACE != sbuf) {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, (char*)sbuf);
if (ret < 0) { line = __LINE__; goto error_hndl; }
}
return MPI_SUCCESS;
}
/* Allocate and initialize temporary send buffer */
span = opal_datatype_span(&dtype->super, count, &gap);
inplacebuf_free = (char*) malloc(span);
if (NULL == inplacebuf_free) { ret = -1; line = __LINE__; goto error_hndl; }
inplacebuf = inplacebuf_free - gap;
if (MPI_IN_PLACE == sbuf) {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, inplacebuf, (char*)rbuf);
if (ret < 0) { line = __LINE__; goto error_hndl; }
} else {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, inplacebuf, (char*)sbuf);
if (ret < 0) { line = __LINE__; goto error_hndl; }
}
tmpsend = (char*) inplacebuf;
tmprecv = (char*) rbuf;
/* Determine nearest power of two less than or equal to size */
adjsize = opal_next_poweroftwo (size);
adjsize >>= 1;
/* Handle non-power-of-two case:
- Even ranks less than 2 * extra_ranks send their data to (rank + 1), and
sets new rank to -1.
- Odd ranks less than 2 * extra_ranks receive data from (rank - 1),
apply appropriate operation, and set new rank to rank/2
- Everyone else sets rank to rank - extra_ranks
*/
extra_ranks = size - adjsize;
if (rank < (2 * extra_ranks)) {
if (0 == (rank % 2)) {
ret = MCA_PML_CALL(send(tmpsend, count, dtype, (rank + 1),
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
newrank = -1;
} else {
ret = MCA_PML_CALL(recv(tmprecv, count, dtype, (rank - 1),
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* tmpsend = tmprecv (op) tmpsend */
ompi_op_reduce(op, tmprecv, tmpsend, count, dtype);
newrank = rank >> 1;
}
} else {
newrank = rank - extra_ranks;
}
/* Communication/Computation loop
- Exchange message with remote node.
- Perform appropriate operation taking in account order of operations:
result = value (op) result
*/
for (distance = 0x1; distance < adjsize; distance <<=1) {
if (newrank < 0) break;
/* Determine remote node */
newremote = newrank ^ distance;
remote = (newremote < extra_ranks)?
(newremote * 2 + 1):(newremote + extra_ranks);
/* Exchange the data */
ret = ompi_coll_base_sendrecv_actual(tmpsend, count, dtype, remote,
MCA_COLL_BASE_TAG_ALLREDUCE,
tmprecv, count, dtype, remote,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, MPI_STATUS_IGNORE);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Apply operation */
if (rank < remote) {
/* tmprecv = tmpsend (op) tmprecv */
ompi_op_reduce(op, tmpsend, tmprecv, count, dtype);
tmpswap = tmprecv;
tmprecv = tmpsend;
tmpsend = tmpswap;
} else {
/* tmpsend = tmprecv (op) tmpsend */
ompi_op_reduce(op, tmprecv, tmpsend, count, dtype);
}
}
/* Handle non-power-of-two case:
- Odd ranks less than 2 * extra_ranks send result from tmpsend to
(rank - 1)
- Even ranks less than 2 * extra_ranks receive result from (rank + 1)
*/
if (rank < (2 * extra_ranks)) {
if (0 == (rank % 2)) {
ret = MCA_PML_CALL(recv(rbuf, count, dtype, (rank + 1),
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
tmpsend = (char*)rbuf;
} else {
ret = MCA_PML_CALL(send(tmpsend, count, dtype, (rank - 1),
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
}
}
/* Ensure that the final result is in rbuf */
if (tmpsend != rbuf) {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, tmpsend);
if (ret < 0) { line = __LINE__; goto error_hndl; }
}
if (NULL != inplacebuf_free) free(inplacebuf_free);
return MPI_SUCCESS;
error_hndl:
OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret));
(void)line; // silence compiler warning
if (NULL != inplacebuf_free) free(inplacebuf_free);
return ret;
}
/*
* ompi_coll_base_allreduce_intra_ring
*
* Function: Ring algorithm for allreduce operation
* Accepts: Same as MPI_Allreduce()
* Returns: MPI_SUCCESS or error code
*
* Description: Implements ring algorithm for allreduce: the message is
* automatically segmented to segment of size M/N.
* Algorithm requires 2*N - 1 steps.
*
* Limitations: The algorithm DOES NOT preserve order of operations so it
* can be used only for commutative operations.
* In addition, algorithm cannot work if the total count is
* less than size.
* Example on 5 nodes:
* Initial state
* # 0 1 2 3 4
* [00] [10] [20] [30] [40]
* [01] [11] [21] [31] [41]
* [02] [12] [22] [32] [42]
* [03] [13] [23] [33] [43]
* [04] [14] [24] [34] [44]
*
* COMPUTATION PHASE
* Step 0: rank r sends block r to rank (r+1) and receives bloc (r-1)
* from rank (r-1) [with wraparound].
* # 0 1 2 3 4
* [00] [00+10] [20] [30] [40]
* [01] [11] [11+21] [31] [41]
* [02] [12] [22] [22+32] [42]
* [03] [13] [23] [33] [33+43]
* [44+04] [14] [24] [34] [44]
*
* Step 1: rank r sends block (r-1) to rank (r+1) and receives bloc
* (r-2) from rank (r-1) [with wraparound].
* # 0 1 2 3 4
* [00] [00+10] [01+10+20] [30] [40]
* [01] [11] [11+21] [11+21+31] [41]
* [02] [12] [22] [22+32] [22+32+42]
* [33+43+03] [13] [23] [33] [33+43]
* [44+04] [44+04+14] [24] [34] [44]
*
* Step 2: rank r sends block (r-2) to rank (r+1) and receives bloc
* (r-2) from rank (r-1) [with wraparound].
* # 0 1 2 3 4
* [00] [00+10] [01+10+20] [01+10+20+30] [40]
* [01] [11] [11+21] [11+21+31] [11+21+31+41]
* [22+32+42+02] [12] [22] [22+32] [22+32+42]
* [33+43+03] [33+43+03+13] [23] [33] [33+43]
* [44+04] [44+04+14] [44+04+14+24] [34] [44]
*
* Step 3: rank r sends block (r-3) to rank (r+1) and receives bloc
* (r-3) from rank (r-1) [with wraparound].
* # 0 1 2 3 4
* [00] [00+10] [01+10+20] [01+10+20+30] [FULL]
* [FULL] [11] [11+21] [11+21+31] [11+21+31+41]
* [22+32+42+02] [FULL] [22] [22+32] [22+32+42]
* [33+43+03] [33+43+03+13] [FULL] [33] [33+43]
* [44+04] [44+04+14] [44+04+14+24] [FULL] [44]
*
* DISTRIBUTION PHASE: ring ALLGATHER with ranks shifted by 1.
*
*/
int
ompi_coll_base_allreduce_intra_ring(const void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int ret, line, rank, size, k, recv_from, send_to, block_count, inbi;
int early_segcount, late_segcount, split_rank, max_segcount;
size_t typelng;
char *tmpsend = NULL, *tmprecv = NULL, *inbuf[2] = {NULL, NULL};
ptrdiff_t true_lb, true_extent, lb, extent;
ptrdiff_t block_offset, max_real_segsize;
ompi_request_t *reqs[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:base:allreduce_intra_ring rank %d, count %d", rank, count));
/* Special case for size == 1 */
if (1 == size) {
if (MPI_IN_PLACE != sbuf) {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, (char*)sbuf);
if (ret < 0) { line = __LINE__; goto error_hndl; }
}
return MPI_SUCCESS;
}
/* Special case for count less than size - use recursive doubling */
if (count < size) {
OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:allreduce_ring rank %d/%d, count %d, switching to recursive doubling", rank, size, count));
return (ompi_coll_base_allreduce_intra_recursivedoubling(sbuf, rbuf,
count,
dtype, op,
comm, module));
}
/* Allocate and initialize temporary buffers */
ret = ompi_datatype_get_extent(dtype, &lb, &extent);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
ret = ompi_datatype_get_true_extent(dtype, &true_lb, &true_extent);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
ret = ompi_datatype_type_size( dtype, &typelng);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Determine the number of elements per block and corresponding
block sizes.
The blocks are divided into "early" and "late" ones:
blocks 0 .. (split_rank - 1) are "early" and
blocks (split_rank) .. (size - 1) are "late".
Early blocks are at most 1 element larger than the late ones.
*/
COLL_BASE_COMPUTE_BLOCKCOUNT( count, size, split_rank,
early_segcount, late_segcount );
max_segcount = early_segcount;
max_real_segsize = true_extent + (max_segcount - 1) * extent;
inbuf[0] = (char*)malloc(max_real_segsize);
if (NULL == inbuf[0]) { ret = -1; line = __LINE__; goto error_hndl; }
if (size > 2) {
inbuf[1] = (char*)malloc(max_real_segsize);
if (NULL == inbuf[1]) { ret = -1; line = __LINE__; goto error_hndl; }
}
/* Handle MPI_IN_PLACE */
if (MPI_IN_PLACE != sbuf) {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, (char*)sbuf);
if (ret < 0) { line = __LINE__; goto error_hndl; }
}
/* Computation loop */
/*
For each of the remote nodes:
- post irecv for block (r-1)
- send block (r)
- in loop for every step k = 2 .. n
- post irecv for block (r + n - k) % n
- wait on block (r + n - k + 1) % n to arrive
- compute on block (r + n - k + 1) % n
- send block (r + n - k + 1) % n
- wait on block (r + 1)
- compute on block (r + 1)
- send block (r + 1) to rank (r + 1)
Note that we must be careful when computing the begining of buffers and
for send operations and computation we must compute the exact block size.
*/
send_to = (rank + 1) % size;
recv_from = (rank + size - 1) % size;
inbi = 0;
/* Initialize first receive from the neighbor on the left */
ret = MCA_PML_CALL(irecv(inbuf[inbi], max_segcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE, comm, &reqs[inbi]));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Send first block (my block) to the neighbor on the right */
block_offset = ((rank < split_rank)?
((ptrdiff_t)rank * (ptrdiff_t)early_segcount) :
((ptrdiff_t)rank * (ptrdiff_t)late_segcount + split_rank));
block_count = ((rank < split_rank)? early_segcount : late_segcount);
tmpsend = ((char*)rbuf) + block_offset * extent;
ret = MCA_PML_CALL(send(tmpsend, block_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
for (k = 2; k < size; k++) {
const int prevblock = (rank + size - k + 1) % size;
inbi = inbi ^ 0x1;
/* Post irecv for the current block */
ret = MCA_PML_CALL(irecv(inbuf[inbi], max_segcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE, comm, &reqs[inbi]));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Wait on previous block to arrive */
ret = ompi_request_wait(&reqs[inbi ^ 0x1], MPI_STATUS_IGNORE);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Apply operation on previous block: result goes to rbuf
rbuf[prevblock] = inbuf[inbi ^ 0x1] (op) rbuf[prevblock]
*/
block_offset = ((prevblock < split_rank)?
((ptrdiff_t)prevblock * early_segcount) :
((ptrdiff_t)prevblock * late_segcount + split_rank));
block_count = ((prevblock < split_rank)? early_segcount : late_segcount);
tmprecv = ((char*)rbuf) + (ptrdiff_t)block_offset * extent;
ompi_op_reduce(op, inbuf[inbi ^ 0x1], tmprecv, block_count, dtype);
/* send previous block to send_to */
ret = MCA_PML_CALL(send(tmprecv, block_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
}
/* Wait on the last block to arrive */
ret = ompi_request_wait(&reqs[inbi], MPI_STATUS_IGNORE);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Apply operation on the last block (from neighbor (rank + 1)
rbuf[rank+1] = inbuf[inbi] (op) rbuf[rank + 1] */
recv_from = (rank + 1) % size;
block_offset = ((recv_from < split_rank)?
((ptrdiff_t)recv_from * early_segcount) :
((ptrdiff_t)recv_from * late_segcount + split_rank));
block_count = ((recv_from < split_rank)? early_segcount : late_segcount);
tmprecv = ((char*)rbuf) + (ptrdiff_t)block_offset * extent;
ompi_op_reduce(op, inbuf[inbi], tmprecv, block_count, dtype);
/* Distribution loop - variation of ring allgather */
send_to = (rank + 1) % size;
recv_from = (rank + size - 1) % size;
for (k = 0; k < size - 1; k++) {
const int recv_data_from = (rank + size - k) % size;
const int send_data_from = (rank + 1 + size - k) % size;
const int send_block_offset =
((send_data_from < split_rank)?
((ptrdiff_t)send_data_from * early_segcount) :
((ptrdiff_t)send_data_from * late_segcount + split_rank));
const int recv_block_offset =
((recv_data_from < split_rank)?
((ptrdiff_t)recv_data_from * early_segcount) :
((ptrdiff_t)recv_data_from * late_segcount + split_rank));
block_count = ((send_data_from < split_rank)?
early_segcount : late_segcount);
tmprecv = (char*)rbuf + (ptrdiff_t)recv_block_offset * extent;
tmpsend = (char*)rbuf + (ptrdiff_t)send_block_offset * extent;
ret = ompi_coll_base_sendrecv(tmpsend, block_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE,
tmprecv, max_segcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, MPI_STATUS_IGNORE, rank);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl;}
}
if (NULL != inbuf[0]) free(inbuf[0]);
if (NULL != inbuf[1]) free(inbuf[1]);
return MPI_SUCCESS;
error_hndl:
OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret));
ompi_coll_base_free_reqs(reqs, 2);
(void)line; // silence compiler warning
if (NULL != inbuf[0]) free(inbuf[0]);
if (NULL != inbuf[1]) free(inbuf[1]);
return ret;
}
/*
* ompi_coll_base_allreduce_intra_ring_segmented
*
* Function: Pipelined ring algorithm for allreduce operation
* Accepts: Same as MPI_Allreduce(), segment size
* Returns: MPI_SUCCESS or error code
*
* Description: Implements pipelined ring algorithm for allreduce:
* user supplies suggested segment size for the pipelining of
* reduce operation.
* The segment size determines the number of phases, np, for
* the algorithm execution.
* The message is automatically divided into blocks of
* approximately (count / (np * segcount)) elements.
* At the end of reduction phase, allgather like step is
* executed.
* Algorithm requires (np + 1)*(N - 1) steps.
*
* Limitations: The algorithm DOES NOT preserve order of operations so it
* can be used only for commutative operations.
* In addition, algorithm cannot work if the total size is
* less than size * segment size.
* Example on 3 nodes with 2 phases
* Initial state
* # 0 1 2
* [00a] [10a] [20a]
* [00b] [10b] [20b]
* [01a] [11a] [21a]
* [01b] [11b] [21b]
* [02a] [12a] [22a]
* [02b] [12b] [22b]
*
* COMPUTATION PHASE 0 (a)
* Step 0: rank r sends block ra to rank (r+1) and receives bloc (r-1)a
* from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [20a]
* [00b] [10b] [20b]
* [01a] [11a] [11a+21a]
* [01b] [11b] [21b]
* [22a+02a] [12a] [22a]
* [02b] [12b] [22b]
*
* Step 1: rank r sends block (r-1)a to rank (r+1) and receives bloc
* (r-2)a from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [00a+10a+20a]
* [00b] [10b] [20b]
* [11a+21a+01a] [11a] [11a+21a]
* [01b] [11b] [21b]
* [22a+02a] [22a+02a+12a] [22a]
* [02b] [12b] [22b]
*
* COMPUTATION PHASE 1 (b)
* Step 0: rank r sends block rb to rank (r+1) and receives bloc (r-1)b
* from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [20a]
* [00b] [00b+10b] [20b]
* [01a] [11a] [11a+21a]
* [01b] [11b] [11b+21b]
* [22a+02a] [12a] [22a]
* [22b+02b] [12b] [22b]
*
* Step 1: rank r sends block (r-1)b to rank (r+1) and receives bloc
* (r-2)b from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [00a+10a+20a]
* [00b] [10b] [0bb+10b+20b]
* [11a+21a+01a] [11a] [11a+21a]
* [11b+21b+01b] [11b] [21b]
* [22a+02a] [22a+02a+12a] [22a]
* [02b] [22b+01b+12b] [22b]
*
*
* DISTRIBUTION PHASE: ring ALLGATHER with ranks shifted by 1 (same as
* in regular ring algorithm.
*
*/
int
ompi_coll_base_allreduce_intra_ring_segmented(const void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
uint32_t segsize)
{
int ret, line, rank, size, k, recv_from, send_to;
int early_blockcount, late_blockcount, split_rank;
int segcount, max_segcount, num_phases, phase, block_count, inbi;
size_t typelng;
char *tmpsend = NULL, *tmprecv = NULL, *inbuf[2] = {NULL, NULL};
ptrdiff_t block_offset, max_real_segsize;
ompi_request_t *reqs[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
ptrdiff_t lb, extent, gap;
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:base:allreduce_intra_ring_segmented rank %d, count %d", rank, count));
/* Special case for size == 1 */
if (1 == size) {
if (MPI_IN_PLACE != sbuf) {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, (char*)sbuf);
if (ret < 0) { line = __LINE__; goto error_hndl; }
}
return MPI_SUCCESS;
}
/* Determine segment count based on the suggested segment size */
ret = ompi_datatype_type_size( dtype, &typelng);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
segcount = count;
COLL_BASE_COMPUTED_SEGCOUNT(segsize, typelng, segcount)
/* Special case for count less than size * segcount - use regular ring */
if (count < (size * segcount)) {
OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:allreduce_ring_segmented rank %d/%d, count %d, switching to regular ring", rank, size, count));
return (ompi_coll_base_allreduce_intra_ring(sbuf, rbuf, count, dtype, op,
comm, module));
}
/* Determine the number of phases of the algorithm */
num_phases = count / (size * segcount);
if ((count % (size * segcount) >= size) &&
(count % (size * segcount) > ((size * segcount) / 2))) {
num_phases++;
}
/* Determine the number of elements per block and corresponding
block sizes.
The blocks are divided into "early" and "late" ones:
blocks 0 .. (split_rank - 1) are "early" and
blocks (split_rank) .. (size - 1) are "late".
Early blocks are at most 1 element larger than the late ones.
Note, these blocks will be split into num_phases segments,
out of the largest one will have max_segcount elements.
*/
COLL_BASE_COMPUTE_BLOCKCOUNT( count, size, split_rank,
early_blockcount, late_blockcount );
COLL_BASE_COMPUTE_BLOCKCOUNT( early_blockcount, num_phases, inbi,
max_segcount, k);
ret = ompi_datatype_get_extent(dtype, &lb, &extent);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
max_real_segsize = opal_datatype_span(&dtype->super, max_segcount, &gap);
/* Allocate and initialize temporary buffers */
inbuf[0] = (char*)malloc(max_real_segsize);
if (NULL == inbuf[0]) { ret = -1; line = __LINE__; goto error_hndl; }
if (size > 2) {
inbuf[1] = (char*)malloc(max_real_segsize);
if (NULL == inbuf[1]) { ret = -1; line = __LINE__; goto error_hndl; }
}
/* Handle MPI_IN_PLACE */
if (MPI_IN_PLACE != sbuf) {
ret = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, (char*)sbuf);
if (ret < 0) { line = __LINE__; goto error_hndl; }
}
/* Computation loop: for each phase, repeat ring allreduce computation loop */
for (phase = 0; phase < num_phases; phase ++) {
ptrdiff_t phase_offset;
int early_phase_segcount, late_phase_segcount, split_phase, phase_count;
/*
For each of the remote nodes:
- post irecv for block (r-1)
- send block (r)
To do this, first compute block offset and count, and use block offset
to compute phase offset.
- in loop for every step k = 2 .. n
- post irecv for block (r + n - k) % n
- wait on block (r + n - k + 1) % n to arrive
- compute on block (r + n - k + 1) % n
- send block (r + n - k + 1) % n
- wait on block (r + 1)
- compute on block (r + 1)
- send block (r + 1) to rank (r + 1)
Note that we must be careful when computing the begining of buffers and
for send operations and computation we must compute the exact block size.
*/
send_to = (rank + 1) % size;
recv_from = (rank + size - 1) % size;
inbi = 0;
/* Initialize first receive from the neighbor on the left */
ret = MCA_PML_CALL(irecv(inbuf[inbi], max_segcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE, comm, &reqs[inbi]));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Send first block (my block) to the neighbor on the right:
- compute my block and phase offset
- send data */
block_offset = ((rank < split_rank)?
((ptrdiff_t)rank * (ptrdiff_t)early_blockcount) :
((ptrdiff_t)rank * (ptrdiff_t)late_blockcount + split_rank));
block_count = ((rank < split_rank)? early_blockcount : late_blockcount);
COLL_BASE_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
early_phase_segcount, late_phase_segcount)
phase_count = ((phase < split_phase)?
(early_phase_segcount) : (late_phase_segcount));
phase_offset = ((phase < split_phase)?
((ptrdiff_t)phase * (ptrdiff_t)early_phase_segcount) :
((ptrdiff_t)phase * (ptrdiff_t)late_phase_segcount + split_phase));
tmpsend = ((char*)rbuf) + (ptrdiff_t)(block_offset + phase_offset) * extent;
ret = MCA_PML_CALL(send(tmpsend, phase_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
for (k = 2; k < size; k++) {
const int prevblock = (rank + size - k + 1) % size;
inbi = inbi ^ 0x1;
/* Post irecv for the current block */
ret = MCA_PML_CALL(irecv(inbuf[inbi], max_segcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
&reqs[inbi]));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Wait on previous block to arrive */
ret = ompi_request_wait(&reqs[inbi ^ 0x1], MPI_STATUS_IGNORE);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Apply operation on previous block: result goes to rbuf
rbuf[prevblock] = inbuf[inbi ^ 0x1] (op) rbuf[prevblock]
*/
block_offset = ((prevblock < split_rank)?
((ptrdiff_t)prevblock * (ptrdiff_t)early_blockcount) :
((ptrdiff_t)prevblock * (ptrdiff_t)late_blockcount + split_rank));
block_count = ((prevblock < split_rank)?
early_blockcount : late_blockcount);
COLL_BASE_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
early_phase_segcount, late_phase_segcount)
phase_count = ((phase < split_phase)?
(early_phase_segcount) : (late_phase_segcount));
phase_offset = ((phase < split_phase)?
((ptrdiff_t)phase * (ptrdiff_t)early_phase_segcount) :
((ptrdiff_t)phase * (ptrdiff_t)late_phase_segcount + split_phase));
tmprecv = ((char*)rbuf) + (ptrdiff_t)(block_offset + phase_offset) * extent;
ompi_op_reduce(op, inbuf[inbi ^ 0x1], tmprecv, phase_count, dtype);
/* send previous block to send_to */
ret = MCA_PML_CALL(send(tmprecv, phase_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
}
/* Wait on the last block to arrive */
ret = ompi_request_wait(&reqs[inbi], MPI_STATUS_IGNORE);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
/* Apply operation on the last block (from neighbor (rank + 1)
rbuf[rank+1] = inbuf[inbi] (op) rbuf[rank + 1] */
recv_from = (rank + 1) % size;
block_offset = ((recv_from < split_rank)?
((ptrdiff_t)recv_from * (ptrdiff_t)early_blockcount) :
((ptrdiff_t)recv_from * (ptrdiff_t)late_blockcount + split_rank));
block_count = ((recv_from < split_rank)?
early_blockcount : late_blockcount);
COLL_BASE_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
early_phase_segcount, late_phase_segcount)
phase_count = ((phase < split_phase)?
(early_phase_segcount) : (late_phase_segcount));
phase_offset = ((phase < split_phase)?
((ptrdiff_t)phase * (ptrdiff_t)early_phase_segcount) :
((ptrdiff_t)phase * (ptrdiff_t)late_phase_segcount + split_phase));
tmprecv = ((char*)rbuf) + (ptrdiff_t)(block_offset + phase_offset) * extent;
ompi_op_reduce(op, inbuf[inbi], tmprecv, phase_count, dtype);
}
/* Distribution loop - variation of ring allgather */
send_to = (rank + 1) % size;
recv_from = (rank + size - 1) % size;
for (k = 0; k < size - 1; k++) {
const int recv_data_from = (rank + size - k) % size;
const int send_data_from = (rank + 1 + size - k) % size;
const int send_block_offset =
((send_data_from < split_rank)?
((ptrdiff_t)send_data_from * (ptrdiff_t)early_blockcount) :
((ptrdiff_t)send_data_from * (ptrdiff_t)late_blockcount + split_rank));
const int recv_block_offset =
((recv_data_from < split_rank)?
((ptrdiff_t)recv_data_from * (ptrdiff_t)early_blockcount) :
((ptrdiff_t)recv_data_from * (ptrdiff_t)late_blockcount + split_rank));
block_count = ((send_data_from < split_rank)?
early_blockcount : late_blockcount);
tmprecv = (char*)rbuf + (ptrdiff_t)recv_block_offset * extent;
tmpsend = (char*)rbuf + (ptrdiff_t)send_block_offset * extent;
ret = ompi_coll_base_sendrecv(tmpsend, block_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE,
tmprecv, early_blockcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE,
comm, MPI_STATUS_IGNORE, rank);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl;}
}
if (NULL != inbuf[0]) free(inbuf[0]);
if (NULL != inbuf[1]) free(inbuf[1]);
return MPI_SUCCESS;
error_hndl:
OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret));
ompi_coll_base_free_reqs(reqs, 2);
(void)line; // silence compiler warning
if (NULL != inbuf[0]) free(inbuf[0]);
if (NULL != inbuf[1]) free(inbuf[1]);
return ret;
}
/*
* Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they
* are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not
* have to duplicate code.
* GEF Oct05 after asking Jeff.
*/
/* copied function (with appropriate renaming) starts here */
/*
* allreduce_intra
*
* Function: - allreduce using other MPI collectives
* Accepts: - same as MPI_Allreduce()
* Returns: - MPI_SUCCESS or error code
*/
int
ompi_coll_base_allreduce_intra_basic_linear(const void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int err, rank;
rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:allreduce_intra_basic_linear rank %d", rank));
/* Reduce to 0 and broadcast. */
if (MPI_IN_PLACE == sbuf) {
if (0 == rank) {
err = ompi_coll_base_reduce_intra_basic_linear (MPI_IN_PLACE, rbuf, count, dtype,
op, 0, comm, module);
} else {
err = ompi_coll_base_reduce_intra_basic_linear(rbuf, NULL, count, dtype,
op, 0, comm, module);
}
} else {
err = ompi_coll_base_reduce_intra_basic_linear(sbuf, rbuf, count, dtype,
op, 0, comm, module);
}
if (MPI_SUCCESS != err) {
return err;
}
return ompi_coll_base_bcast_intra_basic_linear(rbuf, count, dtype, 0, comm, module);
}
/*
* ompi_coll_base_allreduce_intra_redscat_allgather
*
* Function: Allreduce using Rabenseifner's algorithm.
* Accepts: Same arguments as MPI_Allreduce
* Returns: MPI_SUCCESS or error code
*
* Description: an implementation of Rabenseifner's allreduce algorithm [1, 2].
* [1] <NAME>, <NAME> and <NAME>.
* Optimization of Collective Communication Operations in MPICH //
* The Int. Journal of High Performance Computing Applications. Vol 19,
* Issue 1, pp. 49--66.
* [2] http://www.hlrs.de/mpi/myreduce.html.
*
* This algorithm is a combination of a reduce-scatter implemented with
* recursive vector halving and recursive distance doubling, followed either
* by an allgather implemented with recursive doubling [1].
*
* Step 1. If the number of processes is not a power of two, reduce it to
* the nearest lower power of two (p' = 2^{\floor{\log_2 p}})
* by removing r = p - p' extra processes as follows. In the first 2r processes
* (ranks 0 to 2r - 1), all the even ranks send the second half of the input
* vector to their right neighbor (rank + 1), and all the odd ranks send
* the first half of the input vector to their left neighbor (rank - 1).
* The even ranks compute the reduction on the first half of the vector and
* the odd ranks compute the reduction on the second half. The odd ranks then
* send the result to their left neighbors (the even ranks). As a result,
* the even ranks among the first 2r processes now contain the reduction with
* the input vector on their right neighbors (the odd ranks). These odd ranks
* do not participate in the rest of the algorithm, which leaves behind
* a power-of-two number of processes. The first r even-ranked processes and
* the last p - 2r processes are now renumbered from 0 to p' - 1.
*
* Step 2. The remaining processes now perform a reduce-scatter by using
* recursive vector halving and recursive distance doubling. The even-ranked
* processes send the second half of their buffer to rank + 1 and the odd-ranked
* processes send the first half of their buffer to rank - 1. All processes
* then compute the reduction between the local buffer and the received buffer.
* In the next log_2(p') - 1 steps, the buffers are recursively halved, and the
* distance is doubled. At the end, each of the p' processes has 1 / p' of the
* total reduction result.
*
* Step 3. An allgather is performed by using recursive vector doubling and
* distance halving. All exchanges are executed in reverse order relative
* to recursive doubling on previous step. If the number of processes is not
* a power of two, the total result vector must be sent to the r processes
* that were removed in the first step.
*
* Limitations:
* count >= 2^{\floor{\log_2 p}}
* commutative operations only
* intra-communicators only
*
* Memory requirements (per process):
* count * typesize + 4 * \log_2(p) * sizeof(int) = O(count)
*/
int ompi_coll_base_allreduce_intra_redscat_allgather(
const void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int *rindex = NULL, *rcount = NULL, *sindex = NULL, *scount = NULL;
int comm_size = ompi_comm_size(comm);
int rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:base:allreduce_intra_redscat_allgather: rank %d/%d",
rank, comm_size));
/* Find nearest power-of-two less than or equal to comm_size */
int nsteps = opal_hibit(comm_size, comm->c_cube_dim + 1); /* ilog2(comm_size) */
assert(nsteps >= 0);
int nprocs_pof2 = 1 << nsteps; /* flp2(comm_size) */
if (count < nprocs_pof2 || !ompi_op_is_commute(op)) {
OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:base:allreduce_intra_redscat_allgather: rank %d/%d "
"count %d switching to basic linear allreduce",
rank, comm_size, count));
return ompi_coll_base_allreduce_intra_basic_linear(sbuf, rbuf, count, dtype,
op, comm, module);
}
int err = MPI_SUCCESS;
ptrdiff_t lb, extent, dsize, gap = 0;
ompi_datatype_get_extent(dtype, &lb, &extent);
dsize = opal_datatype_span(&dtype->super, count, &gap);
/* Temporary buffer for receiving messages */
char *tmp_buf = NULL;
char *tmp_buf_raw = (char *)malloc(dsize);
if (NULL == tmp_buf_raw)
return OMPI_ERR_OUT_OF_RESOURCE;
tmp_buf = tmp_buf_raw - gap;
if (sbuf != MPI_IN_PLACE) {
err = ompi_datatype_copy_content_same_ddt(dtype, count, (char *)rbuf,
(char *)sbuf);
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
}
/*
* Step 1. Reduce the number of processes to the nearest lower power of two
* p' = 2^{\floor{\log_2 p}} by removing r = p - p' processes.
* 1. In the first 2r processes (ranks 0 to 2r - 1), all the even ranks send
* the second half of the input vector to their right neighbor (rank + 1)
* and all the odd ranks send the first half of the input vector to their
* left neighbor (rank - 1).
* 2. All 2r processes compute the reduction on their half.
* 3. The odd ranks then send the result to their left neighbors
* (the even ranks).
*
* The even ranks (0 to 2r - 1) now contain the reduction with the input
* vector on their right neighbors (the odd ranks). The first r even
* processes and the p - 2r last processes are renumbered from
* 0 to 2^{\floor{\log_2 p}} - 1.
*/
int vrank, step, wsize;
int nprocs_rem = comm_size - nprocs_pof2;
if (rank < 2 * nprocs_rem) {
int count_lhalf = count / 2;
int count_rhalf = count - count_lhalf;
if (rank % 2 != 0) {
/*
* Odd process -- exchange with rank - 1
* Send the left half of the input vector to the left neighbor,
* Recv the right half of the input vector from the left neighbor
*/
err = ompi_coll_base_sendrecv(rbuf, count_lhalf, dtype, rank - 1,
MCA_COLL_BASE_TAG_ALLREDUCE,
(char *)tmp_buf + (ptrdiff_t)count_lhalf * extent,
count_rhalf, dtype, rank - 1,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE, rank);
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
/* Reduce on the right half of the buffers (result in rbuf) */
ompi_op_reduce(op, (char *)tmp_buf + (ptrdiff_t)count_lhalf * extent,
(char *)rbuf + count_lhalf * extent, count_rhalf, dtype);
/* Send the right half to the left neighbor */
err = MCA_PML_CALL(send((char *)rbuf + (ptrdiff_t)count_lhalf * extent,
count_rhalf, dtype, rank - 1,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
/* This process does not pariticipate in recursive doubling phase */
vrank = -1;
} else {
/*
* Even process -- exchange with rank + 1
* Send the right half of the input vector to the right neighbor,
* Recv the left half of the input vector from the right neighbor
*/
err = ompi_coll_base_sendrecv((char *)rbuf + (ptrdiff_t)count_lhalf * extent,
count_rhalf, dtype, rank + 1,
MCA_COLL_BASE_TAG_ALLREDUCE,
tmp_buf, count_lhalf, dtype, rank + 1,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE, rank);
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
/* Reduce on the right half of the buffers (result in rbuf) */
ompi_op_reduce(op, tmp_buf, rbuf, count_lhalf, dtype);
/* Recv the right half from the right neighbor */
err = MCA_PML_CALL(recv((char *)rbuf + (ptrdiff_t)count_lhalf * extent,
count_rhalf, dtype, rank + 1,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE));
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
vrank = rank / 2;
}
} else { /* rank >= 2 * nprocs_rem */
vrank = rank - nprocs_rem;
}
/*
* Step 2. Reduce-scatter implemented with recursive vector halving and
* recursive distance doubling. We have p' = 2^{\floor{\log_2 p}}
* power-of-two number of processes with new ranks (vrank) and result in rbuf.
*
* The even-ranked processes send the right half of their buffer to rank + 1
* and the odd-ranked processes send the left half of their buffer to
* rank - 1. All processes then compute the reduction between the local
* buffer and the received buffer. In the next \log_2(p') - 1 steps, the
* buffers are recursively halved, and the distance is doubled. At the end,
* each of the p' processes has 1 / p' of the total reduction result.
*/
rindex = malloc(sizeof(*rindex) * nsteps);
sindex = malloc(sizeof(*sindex) * nsteps);
rcount = malloc(sizeof(*rcount) * nsteps);
scount = malloc(sizeof(*scount) * nsteps);
if (NULL == rindex || NULL == sindex || NULL == rcount || NULL == scount) {
err = OMPI_ERR_OUT_OF_RESOURCE;
goto cleanup_and_return;
}
if (vrank != -1) {
step = 0;
wsize = count;
sindex[0] = rindex[0] = 0;
for (int mask = 1; mask < nprocs_pof2; mask <<= 1) {
/*
* On each iteration: rindex[step] = sindex[step] -- begining of the
* current window. Length of the current window is storded in wsize.
*/
int vdest = vrank ^ mask;
/* Translate vdest virtual rank to real rank */
int dest = (vdest < nprocs_rem) ? vdest * 2 : vdest + nprocs_rem;
if (rank < dest) {
/*
* Recv into the left half of the current window, send the right
* half of the window to the peer (perform reduce on the left
* half of the current window)
*/
rcount[step] = wsize / 2;
scount[step] = wsize - rcount[step];
sindex[step] = rindex[step] + rcount[step];
} else {
/*
* Recv into the right half of the current window, send the left
* half of the window to the peer (perform reduce on the right
* half of the current window)
*/
scount[step] = wsize / 2;
rcount[step] = wsize - scount[step];
rindex[step] = sindex[step] + scount[step];
}
/* Send part of data from the rbuf, recv into the tmp_buf */
err = ompi_coll_base_sendrecv((char *)rbuf + (ptrdiff_t)sindex[step] * extent,
scount[step], dtype, dest,
MCA_COLL_BASE_TAG_ALLREDUCE,
(char *)tmp_buf + (ptrdiff_t)rindex[step] * extent,
rcount[step], dtype, dest,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE, rank);
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
/* Local reduce: rbuf[] = tmp_buf[] <op> rbuf[] */
ompi_op_reduce(op, (char *)tmp_buf + (ptrdiff_t)rindex[step] * extent,
(char *)rbuf + (ptrdiff_t)rindex[step] * extent,
rcount[step], dtype);
/* Move the current window to the received message */
if (step + 1 < nsteps) {
rindex[step + 1] = rindex[step];
sindex[step + 1] = rindex[step];
wsize = rcount[step];
step++;
}
}
/*
* Assertion: each process has 1 / p' of the total reduction result:
* rcount[nsteps - 1] elements in the rbuf[rindex[nsteps - 1], ...].
*/
/*
* Step 3. Allgather by the recursive doubling algorithm.
* Each process has 1 / p' of the total reduction result:
* rcount[nsteps - 1] elements in the rbuf[rindex[nsteps - 1], ...].
* All exchanges are executed in reverse order relative
* to recursive doubling (previous step).
*/
step = nsteps - 1;
for (int mask = nprocs_pof2 >> 1; mask > 0; mask >>= 1) {
int vdest = vrank ^ mask;
/* Translate vdest virtual rank to real rank */
int dest = (vdest < nprocs_rem) ? vdest * 2 : vdest + nprocs_rem;
/*
* Send rcount[step] elements from rbuf[rindex[step]...]
* Recv scount[step] elements to rbuf[sindex[step]...]
*/
err = ompi_coll_base_sendrecv((char *)rbuf + (ptrdiff_t)rindex[step] * extent,
rcount[step], dtype, dest,
MCA_COLL_BASE_TAG_ALLREDUCE,
(char *)rbuf + (ptrdiff_t)sindex[step] * extent,
scount[step], dtype, dest,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE, rank);
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
step--;
}
}
/*
* Step 4. Send total result to excluded odd ranks.
*/
if (rank < 2 * nprocs_rem) {
if (rank % 2 != 0) {
/* Odd process -- recv result from rank - 1 */
err = MCA_PML_CALL(recv(rbuf, count, dtype, rank - 1,
MCA_COLL_BASE_TAG_ALLREDUCE, comm,
MPI_STATUS_IGNORE));
if (OMPI_SUCCESS != err) { goto cleanup_and_return; }
} else {
/* Even process -- send result to rank + 1 */
err = MCA_PML_CALL(send(rbuf, count, dtype, rank + 1,
MCA_COLL_BASE_TAG_ALLREDUCE,
MCA_PML_BASE_SEND_STANDARD, comm));
if (MPI_SUCCESS != err) { goto cleanup_and_return; }
}
}
cleanup_and_return:
if (NULL != tmp_buf_raw)
free(tmp_buf_raw);
if (NULL != rindex)
free(rindex);
if (NULL != sindex)
free(sindex);
if (NULL != rcount)
free(rcount);
if (NULL != scount)
free(scount);
return err;
}
/* copied function (with appropriate renaming) ends here */
| 30,121 |
903 | <filename>exts/jphp-sql-ext/src/main/java/org/develnext/jphp/ext/sql/classes/WrapSqlException.java
package org.develnext.jphp.ext.sql.classes;
import org.develnext.jphp.ext.sql.SqlExtension;
import php.runtime.annotation.Reflection;
import php.runtime.annotation.Reflection.Name;
import php.runtime.env.Environment;
import php.runtime.ext.java.JavaException;
import php.runtime.reflection.ClassEntity;
@Name("SqlException")
@Reflection.Namespace(SqlExtension.NS)
public class WrapSqlException extends JavaException {
public WrapSqlException(Environment env, Throwable throwable) {
super(env, throwable);
}
public WrapSqlException(Environment env, ClassEntity clazz) {
super(env, clazz);
}
}
| 256 |
788 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.usergrid.persistence.index.impl;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.usergrid.persistence.index.*;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.client.AdminClient;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListenableFutureTask;
import com.google.common.util.concurrent.ListeningScheduledExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.inject.Inject;
import com.google.inject.Singleton;
/**
* Cache for Es index operations
*/
@Singleton
public class EsIndexCacheImpl implements IndexCache {
private static final Logger logger = LoggerFactory.getLogger( EsEntityIndexImpl.class );
private final ListeningScheduledExecutorService refreshExecutors;
private LoadingCache<String, String[]> aliasIndexCache;
private EsProvider provider;
@Inject
public EsIndexCacheImpl( final EsProvider provider, final IndexFig indexFig ) {
this.refreshExecutors =
MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool( indexFig.getIndexCacheMaxWorkers() ) );
this.provider = provider;
aliasIndexCache = CacheBuilder.newBuilder().maximumSize( 1000 ).refreshAfterWrite( 5, TimeUnit.MINUTES )
.build( new CacheLoader<String, String[]>() {
@Override
public ListenableFuture<String[]> reload( final String key,
String[] oldValue )
throws Exception {
ListenableFutureTask<String[]> task =
ListenableFutureTask.create( new Callable<String[]>() {
public String[] call() {
return load( key );
}
} );
refreshExecutors.execute( task );
return task;
}
@Override
public String[] load( final String aliasName ) {
return getIndexesFromEs(aliasName);
}
} );
}
/**
* Get indexes for an alias
*/
@Override
public String[] getIndexes(IndexAlias alias, EntityIndex.AliasType aliasType) {
String[] indexes;
try {
indexes = aliasIndexCache.get( getAliasName( alias, aliasType ) );
}
catch ( ExecutionException ee ) {
logger.error( "Failed to retreive indexes", ee );
throw new RuntimeException( ee );
}
return indexes;
}
private String[] getIndexesFromEs(final String aliasName){
final AdminClient adminClient = this.provider.getClient().admin();
//remove write alias, can only have one
ImmutableOpenMap<String, List<AliasMetaData>> aliasMap =
adminClient.indices().getAliases( new GetAliasesRequest( aliasName ) ).actionGet().getAliases();
return aliasMap.keys().toArray( String.class );
}
/**
* Get the name of the alias to use
* @param alias
* @param aliasType
* @return
*/
private String getAliasName( IndexAlias alias, EntityIndex.AliasType aliasType ) {
return aliasType == EntityIndex.AliasType.Read ? alias.getReadAlias() : alias.getWriteAlias();
}
/**
* clean up cache
*/
@Override
public void invalidate(IndexAlias alias) {
aliasIndexCache.invalidate( alias.getWriteAlias() );
aliasIndexCache.invalidate( alias.getReadAlias() );
}
}
| 2,361 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef INCLUDED_BINARYURP_SOURCE_BINARYANY_HXX
#define INCLUDED_BINARYURP_SOURCE_BINARYANY_HXX
#include "sal/config.h"
#include "uno/any2.h"
namespace com { namespace sun { namespace star { namespace uno {
class TypeDescription;
} } } }
namespace binaryurp {
class BinaryAny {
public:
BinaryAny() throw ();
BinaryAny(com::sun::star::uno::TypeDescription const & type, void * value)
throw ();
explicit BinaryAny(uno_Any const & raw) throw ();
// takes over raw.pData (but copies raw.pType); raw must not be passed
// to uno_any_destruct
BinaryAny(BinaryAny const & other) throw ();
~BinaryAny() throw ();
BinaryAny & operator =(BinaryAny const & other) throw ();
uno_Any * get() throw ();
com::sun::star::uno::TypeDescription getType() const throw ();
void * getValue(com::sun::star::uno::TypeDescription const & type) const
throw ();
private:
mutable uno_Any data_;
// mutable so that getValue() can return a non-const void *, as in turn
// required at various places in binary UNO
};
}
#endif
| 615 |
379 | <reponame>joelostblom/dash-docs
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
from dash_docs import styles
from dash_docs import tools
from dash_docs import reusable_components as rc
examples = tools.load_examples(__file__)
layout = html.Div(children=[
html.H1('Button Examples and Reference'),
html.H2('Button Basic Example'),
rc.Markdown("An example of a default button without any extra properties \
and `n_clicks` in the callback. `n_clicks` is an integer that represents \
that number of times the button has been clicked. Note that the original \
value is `None`."),
rc.Syntax(examples['button_basic.py'][0]),
rc.Example(examples['button_basic.py'][1]),
html.Br(),
html.H2(['Determining which Button Changed with ', html.Code('callback_context')]),
rc.Markdown("This example utilizes the `dash.callback_context` property, \
to determine which input was changed."),
rc.Syntax(examples['button_ctx.py'][0]),
rc.Example(examples['button_ctx.py'][1]),
html.Br(),
html.H2('Button Properties'),
rc.ComponentReference('Button', html)
])
| 394 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-qhr8-p6mw-gmf5",
"modified": "2022-05-05T02:48:34Z",
"published": "2022-05-05T02:48:34Z",
"aliases": [
"CVE-2013-0209"
],
"details": "lib/MT/Upgrade.pm in mt-upgrade.cgi in Movable Type 4.2x and 4.3x through 4.38 does not require authentication for requests to database-migration functions, which allows remote attackers to conduct eval injection and SQL injection attacks via crafted parameters, as demonstrated by an eval injection attack against the core_drop_meta_for_table function, leading to execution of arbitrary Perl code.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2013-0209"
},
{
"type": "WEB",
"url": "http://openwall.com/lists/oss-security/2013/01/22/3"
},
{
"type": "WEB",
"url": "http://www.movabletype.org/2013/01/movable_type_438_patch.html"
},
{
"type": "WEB",
"url": "http://www.sec-1.com/blog/?p=402"
},
{
"type": "WEB",
"url": "http://www.sec-1.com/blog/wp-content/uploads/2013/01/movabletype_upgrade_exec.rb_.txt"
}
],
"database_specific": {
"cwe_ids": [
"CWE-287"
],
"severity": "HIGH",
"github_reviewed": false
}
} | 572 |
5,847 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import pathlib
import pkg_resources
from setuptools import find_packages, setup
from build_helpers.build_helpers import (
ANTLRCommand,
BuildPyCommand,
CleanCommand,
Develop,
SDistCommand,
find_version,
)
with pathlib.Path("requirements/requirements.txt").open() as requirements_txt:
install_requires = [
str(requirement)
for requirement in pkg_resources.parse_requirements(requirements_txt)
]
with open("README.md", "r") as fh:
LONG_DESC = fh.read()
setup(
cmdclass={
"antlr": ANTLRCommand,
"clean": CleanCommand,
"sdist": SDistCommand,
"build_py": BuildPyCommand,
"develop": Develop,
},
name="hydra-core",
version=find_version("hydra", "__init__.py"),
author="<NAME>",
author_email="<EMAIL>",
description="A framework for elegantly configuring complex applications",
license="MIT",
long_description=LONG_DESC,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/hydra",
keywords="command-line configuration yaml tab-completion",
packages=find_packages(include=["hydra"]),
include_package_data=True,
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
],
install_requires=install_requires,
entry_points={"pytest11": ["hydra_pytest = hydra.extra.pytest_plugin"]},
# Install development dependencies with
# pip install -r requirements/dev.txt -e .
)
| 864 |
10,608 | # coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Corpus for Knowledge-Enhanced Language Model Pre-training (KELM)"""
import csv
import datasets
_DESCRIPTION = """\
Data-To-Text Generation involves converting knowledge graph (KG) triples of the form (subject, relation, object) into
a natural language sentence(s). This dataset consists of English KG data converted into paired natural language text.
The generated corpus consists of ∼18M sentences spanning ∼45M triples with ∼1500 distinct relations.
"""
_CITATION = """\
@misc{agarwal2020large,
title={Large Scale Knowledge Graph Based Synthetic Corpus Generation for Knowledge-Enhanced Language Model Pre-training},
author={<NAME> and <NAME> and <NAME> and <NAME>},
year={2020},
eprint={2010.12688},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DOWNLOAD_URL = "https://storage.googleapis.com/gresearch/kelm-corpus/quadruples-{}.tsv"
_WEBPAGE = "https://github.com/google-research-datasets/KELM-corpus"
class KELM(datasets.GeneratorBasedBuilder):
"""Corpus for Knowledge-Enhanced Language Model Pre-training (KELM)"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"triple": datasets.Value("string"),
"sentence": datasets.Value("string"),
}
),
homepage=_WEBPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_DOWNLOAD_URL.format("train"))
validation_path = dl_manager.download_and_extract(_DOWNLOAD_URL.format("validation"))
test_path = dl_manager.download_and_extract(_DOWNLOAD_URL.format("test"))
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter="\t", fieldnames=["triple", "sentence"])
for irow, row in enumerate(csv_reader):
yield irow, row
| 1,136 |
742 | #include <algorithm>
#include <array>
#include <cmath>
#include <tuple>
#include "SDL.h"
#include "ArenaClockUtils.h"
#include "Game.h"
#include "GameState.h"
#include "../Assets/ArenaPaletteName.h"
#include "../Assets/ExeData.h"
#include "../Assets/INFFile.h"
#include "../Assets/MIFFile.h"
#include "../Assets/RMDFile.h"
#include "../Entities/Entity.h"
#include "../Entities/EntityManager.h"
#include "../Entities/Player.h"
#include "../Interface/GameWorldUiView.h"
#include "../Math/Constants.h"
#include "../Media/TextureManager.h"
#include "../Rendering/Renderer.h"
#include "../UI/TextAlignment.h"
#include "../UI/TextBox.h"
#include "../UI/TextRenderUtils.h"
#include "../World/ArenaVoxelUtils.h"
#include "../World/ArenaWeatherUtils.h"
#include "../World/MapType.h"
#include "../World/WeatherUtils.h"
#include "../WorldMap/LocationDefinition.h"
#include "../WorldMap/LocationInstance.h"
#include "../WorldMap/LocationType.h"
#include "../WorldMap/LocationUtils.h"
#include "components/debug/Debug.h"
#include "components/utilities/String.h"
GameState::WorldMapLocationIDs::WorldMapLocationIDs(int provinceID, int locationID)
{
this->provinceID = provinceID;
this->locationID = locationID;
}
void GameState::MapState::init(MapDefinition &&mapDefinition, MapInstance &&mapInstance,
WeatherDefinition &&weatherDef, const std::optional<CoordInt3> &returnCoord)
{
this->definition = std::move(mapDefinition);
this->instance = std::move(mapInstance);
this->weatherDef = std::move(weatherDef);
this->returnCoord = returnCoord;
}
void GameState::MapTransitionState::init(MapState &&mapState,
const std::optional<WorldMapLocationIDs> &worldMapLocationIDs,
std::optional<CitizenUtils::CitizenGenInfo> &&citizenGenInfo, const CoordInt2 &startCoord,
const std::optional<bool> &enteringInteriorFromExterior)
{
this->mapState = std::move(mapState);
this->worldMapLocationIDs = worldMapLocationIDs;
this->citizenGenInfo = std::move(citizenGenInfo);
this->startCoord = startCoord;
this->enteringInteriorFromExterior = enteringInteriorFromExterior;
}
GameState::GameState(Player &&player, const BinaryAssetLibrary &binaryAssetLibrary)
: player(std::move(player))
{
// Most values need to be initialized elsewhere in the program in order to determine
// the world state, etc..
DebugLog("Initializing.");
// Initialize world map definition and instance to default.
this->worldMapDef.init(binaryAssetLibrary);
this->worldMapInst.init(this->worldMapDef);
// @temp: set main quest dungeons visible for testing.
for (int i = 0; i < this->worldMapInst.getProvinceCount(); i++)
{
ProvinceInstance &provinceInst = this->worldMapInst.getProvinceInstance(i);
const int provinceDefIndex = provinceInst.getProvinceDefIndex();
const ProvinceDefinition &provinceDef = this->worldMapDef.getProvinceDef(provinceDefIndex);
for (int j = 0; j < provinceInst.getLocationCount(); j++)
{
LocationInstance &locationInst = provinceInst.getLocationInstance(j);
const int locationDefIndex = locationInst.getLocationDefIndex();
const LocationDefinition &locationDef = provinceDef.getLocationDef(locationDefIndex);
const std::string &locationName = locationInst.getName(locationDef);
const bool isMainQuestDungeon = locationDef.getType() == LocationDefinition::Type::MainQuestDungeon;
const bool isStartDungeon = isMainQuestDungeon &&
(locationDef.getMainQuestDungeonDefinition().type == LocationDefinition::MainQuestDungeonDefinition::Type::Start);
const bool shouldSetVisible = (locationName.size() > 0) &&
isMainQuestDungeon && !isStartDungeon && !locationInst.isVisible();
if (shouldSetVisible)
{
locationInst.toggleVisibility();
}
}
}
// Do initial weather update (to set each value to a valid state).
this->updateWeatherList(binaryAssetLibrary.getExeData());
this->provinceIndex = -1;
this->locationIndex = -1;
this->triggerTextRemainingSeconds = 0.0;
this->actionTextRemainingSeconds = 0.0;
this->effectTextRemainingSeconds = 0.0;
this->isCamping = false;
this->chasmAnimSeconds = 0.0;
}
GameState::~GameState()
{
DebugLog("Closing.");
}
/*bool GameState::tryMakeMapFromLocation(const LocationDefinition &locationDef, int raceID, WeatherType weatherType,
int currentDay, int starCount, bool provinceHasAnimatedLand, const CharacterClassLibrary &charClassLibrary,
const EntityDefinitionLibrary &entityDefLibrary, const BinaryAssetLibrary &binaryAssetLibrary,
const TextAssetLibrary &textAssetLibrary, TextureManager &textureManager, MapState *outMapState)
{
// Decide how to load and instantiate the map.
const LocationDefinition::Type locationType = locationDef.getType();
if (locationType == LocationDefinition::Type::City)
{
const LocationDefinition::CityDefinition &cityDef = locationDef.getCityDefinition();
Buffer<uint8_t> reservedBlocks = [&cityDef]()
{
DebugAssert(cityDef.reservedBlocks != nullptr);
Buffer<uint8_t> buffer(static_cast<int>(cityDef.reservedBlocks->size()));
std::copy(cityDef.reservedBlocks->begin(), cityDef.reservedBlocks->end(), buffer.get());
return buffer;
}();
const std::optional<LocationDefinition::CityDefinition::MainQuestTempleOverride> mainQuestTempleOverride =
[&cityDef]() -> std::optional<LocationDefinition::CityDefinition::MainQuestTempleOverride>
{
if (cityDef.hasMainQuestTempleOverride)
{
return cityDef.mainQuestTempleOverride;
}
else
{
return std::nullopt;
}
}();
MapGeneration::CityGenInfo cityGenInfo;
cityGenInfo.init(std::string(cityDef.mapFilename), std::string(cityDef.typeDisplayName), cityDef.type,
cityDef.citySeed, cityDef.rulerSeed, raceID, cityDef.premade, cityDef.coastal,
cityDef.palaceIsMainQuestDungeon, std::move(reservedBlocks), mainQuestTempleOverride,
cityDef.blockStartPosX, cityDef.blockStartPosY, cityDef.cityBlocksPerSide);
SkyGeneration::ExteriorSkyGenInfo skyGenInfo;
skyGenInfo.init(cityDef.climateType, weatherType, currentDay, starCount, cityDef.citySeed,
cityDef.skySeed, provinceHasAnimatedLand);
MapDefinition mapDefinition;
if (!mapDefinition.initCity(cityGenInfo, skyGenInfo, charClassLibrary, entityDefLibrary,
binaryAssetLibrary, textAssetLibrary, textureManager))
{
DebugLogError("Couldn't init city map for location \"" + locationDef.getName() + "\".");
return false;
}
MapInstance mapInstance;
mapInstance.init(mapDefinition, textureManager);
outMapState->init(std::move(mapDefinition), std::move(mapInstance), std::nullopt);
}
else if (locationType == LocationDefinition::Type::Dungeon)
{
const LocationDefinition::DungeonDefinition &dungeonDef = locationDef.getDungeonDefinition();
MapGeneration::InteriorGenInfo interiorGenInfo;
constexpr bool isArtifactDungeon = false; // @todo: not supported yet.
interiorGenInfo.initDungeon(&dungeonDef, isArtifactDungeon);
MapDefinition mapDefinition;
if (!mapDefinition.initInterior(interiorGenInfo, charClassLibrary, entityDefLibrary,
binaryAssetLibrary, textureManager))
{
DebugLogError("Couldn't init dungeon map for location \"" + locationDef.getName() + "\".");
return false;
}
MapInstance mapInstance;
mapInstance.init(mapDefinition, textureManager);
outMapState->init(std::move(mapDefinition), std::move(mapInstance), std::nullopt);
}
else if (locationType == LocationDefinition::Type::MainQuestDungeon)
{
const LocationDefinition::MainQuestDungeonDefinition &mainQuestDungeonDef =
locationDef.getMainQuestDungeonDefinition();
MapGeneration::InteriorGenInfo interiorGenInfo;
constexpr std::optional<bool> rulerIsMale; // Unused for main quest dungeons.
interiorGenInfo.initPrefab(std::string(mainQuestDungeonDef.mapFilename),
ArenaTypes::InteriorType::Dungeon, rulerIsMale);
MapDefinition mapDefinition;
if (!mapDefinition.initInterior(interiorGenInfo, charClassLibrary, entityDefLibrary,
binaryAssetLibrary, textureManager))
{
DebugLogError("Couldn't init main quest dungeon map for location \"" + locationDef.getName() + "\".");
return false;
}
MapInstance mapInstance;
mapInstance.init(mapDefinition, textureManager);
outMapState->init(std::move(mapDefinition), std::move(mapInstance), std::nullopt);
}
else
{
DebugNotImplementedMsg(std::to_string(static_cast<int>(locationType)));
}
return true;
}
bool GameState::trySetFromWorldMap(int provinceID, int locationID, const std::optional<WeatherType> &overrideWeather,
int currentDay, int starCount, const CharacterClassLibrary &charClassLibrary,
const EntityDefinitionLibrary &entityDefLibrary, const BinaryAssetLibrary &binaryAssetLibrary,
const TextAssetLibrary &textAssetLibrary, TextureManager &textureManager, Renderer &renderer)
{
DebugAssertMsg(this->nextMap == nullptr, "Already have a map to transition to.");
// Get the province and location definitions.
if ((provinceID < 0) || (provinceID >= this->worldMapDef.getProvinceCount()))
{
DebugLogError("Invalid province ID \"" + std::to_string(provinceID) + "\".");
return false;
}
const ProvinceDefinition &provinceDef = this->worldMapDef.getProvinceDef(provinceID);
if ((locationID < 0) || (locationID >= provinceDef.getLocationCount()))
{
DebugLogError("Invalid location ID \"" + std::to_string(locationID) + "\" for province \"" + provinceDef.getName() + "\".");
return false;
}
const LocationDefinition &locationDef = provinceDef.getLocationDef(locationID);
const int raceID = provinceDef.getRaceID();
MapState mapState;
if (!GameState::tryMakeMapFromLocation(locationDef, raceID, weatherType, currentDay, starCount,
provinceDef.hasAnimatedDistantLand(), charClassLibrary, entityDefLibrary, binaryAssetLibrary,
textAssetLibrary, textureManager, &mapState))
{
DebugLogError("Couldn't make map from location \"" + locationDef.getName() + "\" in province \"" + provinceDef.getName() + "\".");
return false;
}
this->clearMaps();
this->maps.emplace(std::move(mapState));
const MapDefinition &activeMapDef = this->getActiveMapDef();
const MapType activeMapType = activeMapDef.getMapType();
MapInstance &activeMapInst = this->getActiveMapInst();
const int activeLevelIndex = activeMapInst.getActiveLevelIndex();
LevelInstance &activeLevelInst = activeMapInst.getLevel(activeLevelIndex);
const WeatherType weatherType = [&overrideWeather, &activeMapDef]()
{
if (overrideWeather.has_value())
{
// Use this when we don't want to randomly generate the weather.
return *overrideWeather;
}
else
{
// Determine weather from the map.
const MapType mapType = activeMapDef.getMapType();
if (mapType == MapType::Interior)
{
// Interiors are always clear.
return WeatherType::Clear;
}
else if ((mapType == MapType::City) || (mapType == MapType::Wilderness))
{
// @todo: generate weather based on the location.
return WeatherType::Clear;
}
else
{
DebugUnhandledReturnMsg(WeatherType, std::to_string(static_cast<int>(mapType)));
}
}
}();
DebugAssert(activeMapDef.getStartPointCount() > 0);
const LevelDouble2 &startPoint = activeMapDef.getStartPoint(0);
const CoordInt2 startCoord = VoxelUtils::levelVoxelToCoord(VoxelUtils::pointToVoxel(startPoint));
const std::optional<CitizenUtils::CitizenGenInfo> citizenGenInfo = [&entityDefLibrary, &textureManager,
&locationDef, raceID, activeMapType]() -> std::optional<CitizenUtils::CitizenGenInfo>
{
if ((activeMapType == MapType::City) || (activeMapType == MapType::Wilderness))
{
DebugAssert(locationDef.getType() == LocationDefinition::Type::City);
const LocationDefinition::CityDefinition &cityDef = locationDef.getCityDefinition();
const ClimateType climateType = cityDef.climateType;
return CitizenUtils::makeCitizenGenInfo(raceID, climateType, entityDefLibrary, textureManager);
}
else
{
return std::nullopt;
}
}();
// Set level active in the renderer.
if (!this->trySetLevelActive(activeLevelInst, activeLevelIndex, weatherType, startCoord, citizenGenInfo,
entityDefLibrary, binaryAssetLibrary, textureManager, renderer))
{
DebugLogError("Couldn't set level active in the renderer for location \"" + locationDef.getName() + "\".");
return false;
}
// Update world map location.
this->provinceIndex = provinceID;
this->locationIndex = locationID;
return true;
}*/
bool GameState::tryPushInterior(const MapGeneration::InteriorGenInfo &interiorGenInfo,
const std::optional<CoordInt3> &returnCoord, const CharacterClassLibrary &charClassLibrary,
const EntityDefinitionLibrary &entityDefLibrary, const BinaryAssetLibrary &binaryAssetLibrary,
TextureManager &textureManager, Renderer &renderer)
{
DebugAssertMsg(this->nextMap == nullptr, "Already have a map to transition to.");
MapDefinition mapDefinition;
if (!mapDefinition.initInterior(interiorGenInfo, charClassLibrary, entityDefLibrary,
binaryAssetLibrary, textureManager))
{
DebugLogError("Couldn't init interior map from generation info.");
return false;
}
constexpr int currentDay = 0; // Doesn't matter for interiors.
MapInstance mapInstance;
mapInstance.init(mapDefinition, currentDay, textureManager);
// Save return voxel to the current exterior (if any).
if (this->maps.size() > 0)
{
MapState &activeMapState = this->maps.top();
activeMapState.returnCoord = returnCoord;
}
DebugAssert(mapDefinition.getStartPointCount() > 0);
const LevelDouble2 &startPoint = mapDefinition.getStartPoint(0);
const CoordInt2 startCoord = VoxelUtils::levelVoxelToCoord(VoxelUtils::pointToVoxel(startPoint));
// Interiors are always clear weather.
Random weatherRandom(this->arenaRandom.getSeed()); // Cosmetic random.
WeatherDefinition weatherDef;
weatherDef.initFromClassic(ArenaTypes::WeatherType::Clear, currentDay, weatherRandom);
MapState mapState;
mapState.init(std::move(mapDefinition), std::move(mapInstance), std::move(weatherDef), std::nullopt);
const std::optional<WorldMapLocationIDs> worldMapLocationIDs; // Doesn't change when pushing an interior.
std::optional<CitizenUtils::CitizenGenInfo> citizenGenInfo; // No citizens in interiors.
constexpr bool enteringInteriorFromExterior = true;
this->nextMap = std::make_unique<MapTransitionState>();
this->nextMap->init(std::move(mapState), worldMapLocationIDs, std::move(citizenGenInfo),
startCoord, enteringInteriorFromExterior);
return true;
}
bool GameState::trySetInterior(const MapGeneration::InteriorGenInfo &interiorGenInfo,
const std::optional<VoxelInt2> &playerStartOffset, const WorldMapLocationIDs &worldMapLocationIDs,
const CharacterClassLibrary &charClassLibrary, const EntityDefinitionLibrary &entityDefLibrary,
const BinaryAssetLibrary &binaryAssetLibrary, TextureManager &textureManager, Renderer &renderer)
{
DebugAssertMsg(this->nextMap == nullptr, "Already have a map to transition to.");
MapDefinition mapDefinition;
if (!mapDefinition.initInterior(interiorGenInfo, charClassLibrary, entityDefLibrary,
binaryAssetLibrary, textureManager))
{
DebugLogError("Couldn't init interior map from generation info.");
return false;
}
constexpr int currentDay = 0; // Doesn't matter for interiors.
MapInstance mapInstance;
mapInstance.init(mapDefinition, currentDay, textureManager);
const CoordInt2 startCoord = [&playerStartOffset, &mapDefinition]()
{
DebugAssert(mapDefinition.getStartPointCount() > 0);
const LevelDouble2 &startPoint = mapDefinition.getStartPoint(0);
const LevelInt2 startVoxel = VoxelUtils::pointToVoxel(startPoint);
const CoordInt2 coord = VoxelUtils::levelVoxelToCoord(startVoxel);
const VoxelInt2 offset = playerStartOffset.has_value() ? *playerStartOffset : VoxelInt2::Zero;
return ChunkUtils::recalculateCoord(coord.chunk, coord.voxel + offset);
}();
// Interiors are always clear weather.
Random weatherRandom(this->arenaRandom.getSeed()); // Cosmetic random.
WeatherDefinition weatherDef;
weatherDef.initFromClassic(ArenaTypes::WeatherType::Clear, currentDay, weatherRandom);
MapState mapState;
mapState.init(std::move(mapDefinition), std::move(mapInstance), std::move(weatherDef), std::nullopt);
std::optional<CitizenUtils::CitizenGenInfo> citizenGenInfo; // No citizens in interiors.
constexpr bool enteringInteriorFromExterior = false; // This method doesn't keep an exterior alive.
this->nextMap = std::make_unique<MapTransitionState>();
this->nextMap->init(std::move(mapState), worldMapLocationIDs, std::move(citizenGenInfo),
startCoord, enteringInteriorFromExterior);
// @todo: hack to make fast travel not crash when iterating stale distant objects in renderer
renderer.clearSky();
return true;
}
bool GameState::trySetCity(const MapGeneration::CityGenInfo &cityGenInfo,
const SkyGeneration::ExteriorSkyGenInfo &skyGenInfo, const std::optional<WeatherDefinition> &overrideWeather,
const std::optional<WorldMapLocationIDs> &newWorldMapLocationIDs,
const CharacterClassLibrary &charClassLibrary, const EntityDefinitionLibrary &entityDefLibrary,
const BinaryAssetLibrary &binaryAssetLibrary, const TextAssetLibrary &textAssetLibrary,
TextureManager &textureManager, Renderer &renderer)
{
DebugAssertMsg(this->nextMap == nullptr, "Already have a map to transition to.");
MapDefinition mapDefinition;
if (!mapDefinition.initCity(cityGenInfo, skyGenInfo, charClassLibrary, entityDefLibrary,
binaryAssetLibrary, textAssetLibrary, textureManager))
{
DebugLogError("Couldn't init city map from generation info.");
return false;
}
MapInstance mapInstance;
mapInstance.init(mapDefinition, skyGenInfo.currentDay, textureManager);
DebugAssert(mapDefinition.getStartPointCount() > 0);
const LevelDouble2 &startPoint = mapDefinition.getStartPoint(0);
const CoordInt2 startCoord = VoxelUtils::levelVoxelToCoord(VoxelUtils::pointToVoxel(startPoint));
const ProvinceDefinition *provinceDefPtr = nullptr;
const LocationDefinition *locationDefPtr = nullptr;
if (newWorldMapLocationIDs.has_value())
{
provinceDefPtr = &this->worldMapDef.getProvinceDef(newWorldMapLocationIDs->provinceID);
locationDefPtr = &provinceDefPtr->getLocationDef(newWorldMapLocationIDs->locationID);
}
else
{
// Use existing world map location (likely a wilderness->city transition).
provinceDefPtr = &this->getProvinceDefinition();
locationDefPtr = &this->getLocationDefinition();
}
const LocationDefinition::CityDefinition &cityDef = locationDefPtr->getCityDefinition();
WeatherDefinition weatherDef = [&overrideWeather, &cityDef]()
{
if (overrideWeather.has_value())
{
// Use this when we don't want to randomly generate the weather.
return WeatherUtils::getFilteredWeather(*overrideWeather, cityDef.climateType);
}
else
{
WeatherDefinition def;
def.initClear(); // @todo: generate the weather for this location.
return def;
}
}();
MapState mapState;
mapState.init(std::move(mapDefinition), std::move(mapInstance), std::move(weatherDef), std::nullopt);
CitizenUtils::CitizenGenInfo citizenGenInfo = CitizenUtils::makeCitizenGenInfo(
provinceDefPtr->getRaceID(), cityDef.climateType, entityDefLibrary, textureManager);
constexpr std::optional<bool> enteringInteriorFromExterior; // Unused for exteriors.
this->nextMap = std::make_unique<MapTransitionState>();
this->nextMap->init(std::move(mapState), newWorldMapLocationIDs, std::move(citizenGenInfo),
startCoord, enteringInteriorFromExterior);
// @todo: hack to make fast travel not crash when iterating stale distant objects in renderer
renderer.clearSky();
return true;
}
bool GameState::trySetWilderness(const MapGeneration::WildGenInfo &wildGenInfo,
const SkyGeneration::ExteriorSkyGenInfo &skyGenInfo, const std::optional<WeatherDefinition> &overrideWeather,
const std::optional<CoordInt3> &startCoord, const std::optional<WorldMapLocationIDs> &newWorldMapLocationIDs,
const CharacterClassLibrary &charClassLibrary, const EntityDefinitionLibrary &entityDefLibrary,
const BinaryAssetLibrary &binaryAssetLibrary, TextureManager &textureManager, Renderer &renderer)
{
DebugAssertMsg(this->nextMap == nullptr, "Already have a map to transition to.");
// @todo: try to get gate position if current active map is for city -- need to have saved it from when the
// gate was clicked in GameWorldPanel.
MapDefinition mapDefinition;
if (!mapDefinition.initWild(wildGenInfo, skyGenInfo, charClassLibrary, entityDefLibrary,
binaryAssetLibrary, textureManager))
{
DebugLogError("Couldn't init wild map from generation info.");
return false;
}
MapInstance mapInstance;
mapInstance.init(mapDefinition, skyGenInfo.currentDay, textureManager);
// Wilderness start point depends on city gate the player is coming out of.
DebugAssert(mapDefinition.getStartPointCount() == 0);
const CoordInt2 actualStartCoord = [&startCoord]()
{
if (startCoord.has_value())
{
return CoordInt2(startCoord->chunk, VoxelInt2(startCoord->voxel.x, startCoord->voxel.z));
}
else
{
// Don't have a city gate reference. Just pick somewhere in the center of the wilderness.
return CoordInt2(
ChunkInt2(ArenaWildUtils::WILD_WIDTH / 2, ArenaWildUtils::WILD_HEIGHT / 2),
VoxelInt2::Zero);
}
}();
const ProvinceDefinition *provinceDefPtr = nullptr;
const LocationDefinition *locationDefPtr = nullptr;
if (newWorldMapLocationIDs.has_value())
{
provinceDefPtr = &this->worldMapDef.getProvinceDef(newWorldMapLocationIDs->provinceID);
locationDefPtr = &provinceDefPtr->getLocationDef(newWorldMapLocationIDs->locationID);
}
else
{
// Use existing world map location (likely a city->wilderness transition).
provinceDefPtr = &this->getProvinceDefinition();
locationDefPtr = &this->getLocationDefinition();
}
const LocationDefinition::CityDefinition &cityDef = locationDefPtr->getCityDefinition();
WeatherDefinition weatherDef = [&overrideWeather, &cityDef]()
{
if (overrideWeather.has_value())
{
// Use this when we don't want to randomly generate the weather.
return WeatherUtils::getFilteredWeather(*overrideWeather, cityDef.climateType);
}
else
{
WeatherDefinition def;
def.initClear(); // @todo: generate the weather for this location.
return def;
}
}();
MapState mapState;
mapState.init(std::move(mapDefinition), std::move(mapInstance), std::move(weatherDef), std::nullopt);
CitizenUtils::CitizenGenInfo citizenGenInfo = CitizenUtils::makeCitizenGenInfo(
provinceDefPtr->getRaceID(), cityDef.climateType, entityDefLibrary, textureManager);
constexpr std::optional<bool> enteringInteriorFromExterior; // Unused for exteriors.
this->nextMap = std::make_unique<MapTransitionState>();
this->nextMap->init(std::move(mapState), newWorldMapLocationIDs, std::move(citizenGenInfo),
actualStartCoord, enteringInteriorFromExterior);
// @todo: hack to make fast travel not crash when iterating stale distant objects in renderer
renderer.clearSky();
return true;
}
bool GameState::tryPopMap(const EntityDefinitionLibrary &entityDefLibrary,
const BinaryAssetLibrary &binaryAssetLibrary, TextureManager &textureManager, Renderer &renderer)
{
if (this->maps.size() == 0)
{
DebugLogError("No map available to pop.");
return false;
}
this->maps.pop();
if (this->maps.size() == 0)
{
DebugLogError("No map available to set active.");
return false;
}
MapState &activeMapState = this->maps.top();
const MapDefinition &activeMapDef = activeMapState.definition;
const MapType activeMapType = activeMapDef.getMapType();
MapInstance &activeMapInst = activeMapState.instance;
const int activeLevelIndex = activeMapInst.getActiveLevelIndex();
LevelInstance &activeLevelInst = activeMapInst.getActiveLevel();
SkyInstance &activeSkyInst = activeMapInst.getActiveSky();
const std::optional<CoordInt3> &returnCoord = activeMapState.returnCoord;
// @todo: need a condition to determine if we need to recalculate the weather (i.e., if the player slept
// in an interior).
WeatherDefinition activeWeatherDef = activeMapState.weatherDef;
const CoordInt2 startCoord = [&activeMapDef, &returnCoord]()
{
// Use the return voxel as the start point if the now-activated map has one.
if (returnCoord.has_value())
{
return CoordInt2(returnCoord->chunk, VoxelInt2(returnCoord->voxel.x, returnCoord->voxel.z));
}
else
{
// Too complex to determine (based on interior/city/wild), so just don't support for now.
DebugUnhandledReturn(CoordInt2);
}
}();
const std::optional<CitizenUtils::CitizenGenInfo> citizenGenInfo = [this, &entityDefLibrary, &textureManager,
activeMapType]() -> std::optional<CitizenUtils::CitizenGenInfo>
{
if ((activeMapType == MapType::City) || (activeMapType == MapType::Wilderness))
{
const ProvinceDefinition &provinceDef = this->getProvinceDefinition();
const LocationDefinition &locationDef = this->getLocationDefinition();
const LocationDefinition::CityDefinition &cityDef = locationDef.getCityDefinition();
return CitizenUtils::makeCitizenGenInfo(provinceDef.getRaceID(), cityDef.climateType,
entityDefLibrary, textureManager);
}
else
{
return std::nullopt;
}
}();
// Set level active in the renderer.
if (!this->trySetLevelActive(activeLevelInst, activeLevelIndex, std::move(activeWeatherDef), startCoord,
citizenGenInfo, entityDefLibrary, binaryAssetLibrary, textureManager, renderer))
{
DebugLogError("Couldn't set level active in the renderer for previously active level.");
return false;
}
if (!this->trySetSkyActive(activeSkyInst, activeLevelIndex, textureManager, renderer))
{
DebugLogError("Couldn't set sky active in the renderer for previously active level.");
return false;
}
return true;
}
Player &GameState::getPlayer()
{
return this->player;
}
const MapDefinition &GameState::getActiveMapDef() const
{
if (this->nextMap != nullptr)
{
return this->nextMap->mapState.definition;
}
else
{
DebugAssert(!this->maps.empty());
const MapState &activeMapState = this->maps.top();
return activeMapState.definition;
}
}
MapInstance &GameState::getActiveMapInst()
{
if (this->nextMap != nullptr)
{
return this->nextMap->mapState.instance;
}
else
{
DebugAssert(!this->maps.empty());
MapState &activeMapState = this->maps.top();
return activeMapState.instance;
}
}
const MapInstance &GameState::getActiveMapInst() const
{
if (this->nextMap != nullptr)
{
return this->nextMap->mapState.instance;
}
else
{
DebugAssert(!this->maps.empty());
const MapState &activeMapState = this->maps.top();
return activeMapState.instance;
}
}
bool GameState::isActiveMapNested() const
{
return this->maps.size() >= 2;
}
WorldMapInstance &GameState::getWorldMapInstance()
{
return this->worldMapInst;
}
const WorldMapDefinition &GameState::getWorldMapDefinition() const
{
return this->worldMapDef;
}
const ProvinceDefinition &GameState::getProvinceDefinition() const
{
const int index = ((this->nextMap != nullptr) && this->nextMap->worldMapLocationIDs.has_value()) ?
this->nextMap->worldMapLocationIDs->provinceID : this->provinceIndex;
return this->worldMapDef.getProvinceDef(index);
}
const LocationDefinition &GameState::getLocationDefinition() const
{
const ProvinceDefinition &provinceDef = this->getProvinceDefinition();
const int index = ((this->nextMap != nullptr) && this->nextMap->worldMapLocationIDs.has_value()) ?
this->nextMap->worldMapLocationIDs->locationID : this->locationIndex;
return provinceDef.getLocationDef(index);
}
ProvinceInstance &GameState::getProvinceInstance()
{
const int index = ((this->nextMap != nullptr) && this->nextMap->worldMapLocationIDs.has_value()) ?
this->nextMap->worldMapLocationIDs->provinceID : this->provinceIndex;
return this->worldMapInst.getProvinceInstance(index);
}
LocationInstance &GameState::getLocationInstance()
{
ProvinceInstance &provinceInst = this->getProvinceInstance();
const int index = ((this->nextMap != nullptr) && this->nextMap->worldMapLocationIDs.has_value()) ?
this->nextMap->worldMapLocationIDs->locationID : this->locationIndex;
return provinceInst.getLocationInstance(index);
}
const ProvinceMapUiModel::TravelData *GameState::getTravelData() const
{
return this->travelData.get();
}
const GameState::WeatherList &GameState::getWeathersArray() const
{
return this->weathers;
}
Date &GameState::getDate()
{
return this->date;
}
Clock &GameState::getClock()
{
return this->clock;
}
ArenaRandom &GameState::getRandom()
{
return this->arenaRandom;
}
double GameState::getDaytimePercent() const
{
return this->clock.getPreciseTotalSeconds() /
static_cast<double>(Clock::SECONDS_IN_A_DAY);
}
double GameState::getChasmAnimPercent() const
{
const double percent = this->chasmAnimSeconds / ArenaVoxelUtils::CHASM_ANIM_SECONDS;
return std::clamp(percent, 0.0, Constants::JustBelowOne);
}
const WeatherDefinition &GameState::getWeatherDefinition() const
{
return this->weatherDef;
}
const WeatherInstance &GameState::getWeatherInstance() const
{
return this->weatherInst;
}
double GameState::getAmbientPercent() const
{
const MapDefinition *activeMapDef = nullptr;
if (this->nextMap != nullptr)
{
activeMapDef = &this->nextMap->mapState.definition;
}
else
{
DebugAssert(!this->maps.empty());
activeMapDef = &this->maps.top().definition;
}
DebugAssert(activeMapDef != nullptr);
const MapType activeMapType = activeMapDef->getMapType();
if (activeMapType == MapType::Interior)
{
// Completely dark indoors (some places might be an exception to this, and those
// would be handled eventually).
return 0.0;
}
else
{
// The ambient light outside depends on the clock time.
const double clockPreciseSeconds = this->clock.getPreciseTotalSeconds();
// Time ranges where the ambient light changes. The start times are inclusive,
// and the end times are exclusive.
const double startBrighteningTime = ArenaClockUtils::AmbientStartBrightening.getPreciseTotalSeconds();
const double endBrighteningTime = ArenaClockUtils::AmbientEndBrightening.getPreciseTotalSeconds();
const double startDimmingTime = ArenaClockUtils::AmbientStartDimming.getPreciseTotalSeconds();
const double endDimmingTime = ArenaClockUtils::AmbientEndDimming.getPreciseTotalSeconds();
// In Arena, the min ambient is 0 and the max ambient is 1, but we're using
// some values here that make testing easier.
constexpr double minAmbient = 0.15;
constexpr double maxAmbient = 1.0;
if ((clockPreciseSeconds >= endBrighteningTime) &&
(clockPreciseSeconds < startDimmingTime))
{
// Daytime ambient.
return maxAmbient;
}
else if ((clockPreciseSeconds >= startBrighteningTime) &&
(clockPreciseSeconds < endBrighteningTime))
{
// Interpolate brightening light (in the morning).
const double timePercent = (clockPreciseSeconds - startBrighteningTime) /
(endBrighteningTime - startBrighteningTime);
return minAmbient + ((maxAmbient - minAmbient) * timePercent);
}
else if ((clockPreciseSeconds >= startDimmingTime) &&
(clockPreciseSeconds < endDimmingTime))
{
// Interpolate dimming light (in the evening).
const double timePercent = (clockPreciseSeconds - startDimmingTime) /
(endDimmingTime - startDimmingTime);
return maxAmbient + ((minAmbient - maxAmbient) * timePercent);
}
else
{
// Night ambient.
return minAmbient;
}
}
}
double GameState::getBetterAmbientPercent() const
{
const double daytimePercent = this->getDaytimePercent();
const double minAmbient = 0.20;
const double maxAmbient = 0.90;
const double diff = maxAmbient - minAmbient;
const double center = minAmbient + (diff / 2.0);
return center + ((diff / 2.0) * -std::cos(daytimePercent * (2.0 * Constants::Pi)));
}
bool GameState::nightMusicIsActive() const
{
const double clockTime = this->clock.getPreciseTotalSeconds();
const bool beforeDayMusicChange = clockTime < ArenaClockUtils::MusicSwitchToDay.getPreciseTotalSeconds();
const bool afterNightMusicChange = clockTime >= ArenaClockUtils::MusicSwitchToNight.getPreciseTotalSeconds();
return beforeDayMusicChange || afterNightMusicChange;
}
bool GameState::nightLightsAreActive() const
{
const double clockTime = this->clock.getPreciseTotalSeconds();
const bool beforeLamppostDeactivate = clockTime < ArenaClockUtils::LamppostDeactivate.getPreciseTotalSeconds();
const bool afterLamppostActivate = clockTime >= ArenaClockUtils::LamppostActivate.getPreciseTotalSeconds();
return beforeLamppostDeactivate || afterLamppostActivate;
}
std::function<void(Game&)> &GameState::getOnLevelUpVoxelEnter()
{
return this->onLevelUpVoxelEnter;
}
bool GameState::triggerTextIsVisible() const
{
return this->triggerTextRemainingSeconds > 0.0;
}
bool GameState::actionTextIsVisible() const
{
return this->actionTextRemainingSeconds > 0.0;
}
bool GameState::effectTextIsVisible() const
{
return this->effectTextRemainingSeconds > 0.0;
}
void GameState::setIsCamping(bool isCamping)
{
this->isCamping = isCamping;
}
void GameState::setTravelData(std::unique_ptr<ProvinceMapUiModel::TravelData> travelData)
{
this->travelData = std::move(travelData);
}
void GameState::setTriggerTextDuration(const std::string_view &text)
{
this->triggerTextRemainingSeconds = GameWorldUiView::getTriggerTextSeconds(text);
}
void GameState::setActionTextDuration(const std::string_view &text)
{
this->actionTextRemainingSeconds = GameWorldUiView::getActionTextSeconds(text);
}
void GameState::setEffectTextDuration(const std::string_view &text)
{
// @todo
DebugNotImplemented();
}
void GameState::resetTriggerTextDuration()
{
this->triggerTextRemainingSeconds = 0.0;
}
void GameState::resetActionTextDuration()
{
this->actionTextRemainingSeconds = 0.0;
}
void GameState::resetEffectTextDuration()
{
this->effectTextRemainingSeconds = 0.0;
}
void GameState::setTransitionedPlayerPosition(const CoordDouble3 &position)
{
this->player.teleport(position);
this->player.setVelocityToZero();
}
bool GameState::trySetLevelActive(LevelInstance &levelInst, const std::optional<int> &activeLevelIndex,
WeatherDefinition &&weatherDef, const CoordInt2 &startCoord,
const std::optional<CitizenUtils::CitizenGenInfo> &citizenGenInfo,
const EntityDefinitionLibrary &entityDefLibrary, const BinaryAssetLibrary &binaryAssetLibrary,
TextureManager &textureManager, Renderer &renderer)
{
const VoxelDouble2 startVoxelReal = VoxelUtils::getVoxelCenter(startCoord.voxel);
const CoordDouble3 playerPos(
startCoord.chunk,
VoxelDouble3(startVoxelReal.x, levelInst.getCeilingScale() + Player::HEIGHT, startVoxelReal.y));
this->setTransitionedPlayerPosition(playerPos);
this->weatherDef = std::move(weatherDef);
Random weatherRandom; // Cosmetic random.
this->weatherInst = WeatherInstance(); // Make sure to reset weather instance.
this->weatherInst.init(this->weatherDef, this->clock, binaryAssetLibrary.getExeData(),
weatherRandom, textureManager);
DebugAssert(this->maps.size() > 0);
const MapDefinition &mapDefinition = this->maps.top().definition;
if (!levelInst.trySetActive(this->weatherDef, this->nightLightsAreActive(), activeLevelIndex,
mapDefinition, citizenGenInfo, textureManager, renderer))
{
DebugLogError("Couldn't set level active in the renderer.");
return false;
}
return true;
}
bool GameState::trySetSkyActive(SkyInstance &skyInst, const std::optional<int> &activeLevelIndex,
TextureManager &textureManager, Renderer &renderer)
{
DebugAssert(this->maps.size() > 0);
const MapDefinition &mapDefinition = this->maps.top().definition;
if (!skyInst.trySetActive(activeLevelIndex, mapDefinition, textureManager, renderer))
{
DebugLogError("Couldn't set sky active in renderer.");
return false;
}
return true;
}
bool GameState::tryApplyMapTransition(MapTransitionState &&transitionState,
const EntityDefinitionLibrary &entityDefLibrary, const BinaryAssetLibrary &binaryAssetLibrary,
TextureManager &textureManager, Renderer &renderer)
{
MapState &nextMapState = transitionState.mapState;
WeatherDefinition nextWeatherDef = nextMapState.weatherDef;
// Clear map stack if it's not entering an interior from an exterior.
if (!transitionState.enteringInteriorFromExterior.has_value() ||
!(*transitionState.enteringInteriorFromExterior))
{
this->clearMaps();
}
this->maps.emplace(std::move(nextMapState));
if (transitionState.worldMapLocationIDs.has_value())
{
this->provinceIndex = transitionState.worldMapLocationIDs->provinceID;
this->locationIndex = transitionState.worldMapLocationIDs->locationID;
}
MapInstance &newMapInst = this->maps.top().instance;
const int newLevelInstIndex = newMapInst.getActiveLevelIndex();
LevelInstance &newLevelInst = newMapInst.getActiveLevel();
SkyInstance &newSkyInst = newMapInst.getActiveSky();
if (!this->trySetLevelActive(newLevelInst, newLevelInstIndex, std::move(nextWeatherDef),
transitionState.startCoord, transitionState.citizenGenInfo, entityDefLibrary, binaryAssetLibrary,
textureManager, renderer))
{
DebugLogError("Couldn't set new level active.");
return false;
}
if (!this->trySetSkyActive(newSkyInst, newLevelInstIndex, textureManager, renderer))
{
DebugLogError("Couldn't set new sky active.");
return false;
}
return true;
}
void GameState::clearMaps()
{
while (!this->maps.empty())
{
this->maps.pop();
}
}
void GameState::updateWeatherList(const ExeData &exeData)
{
const int seasonIndex = this->date.getSeason();
for (size_t i = 0; i < this->weathers.size(); i++)
{
static_assert(std::tuple_size<decltype(exeData.locations.climates)>::value ==
std::tuple_size<decltype(this->weathers)>::value);
const int climateIndex = exeData.locations.climates[i];
const int variantIndex = [this]()
{
// 40% for 2, 20% for 1, 20% for 3, 10% for 0, and 10% for 4.
const int val = this->arenaRandom.next() % 100;
if (val >= 60)
{
return 2;
}
else if (val >= 40)
{
return 1;
}
else if (val >= 20)
{
return 3;
}
else if (val >= 10)
{
return 0;
}
else
{
return 4;
}
}();
const int weatherTableIndex = (climateIndex * 20) + (seasonIndex * 5) + variantIndex;
this->weathers[i] = static_cast<ArenaTypes::WeatherType>(
exeData.locations.weatherTable.at(weatherTableIndex));
}
}
void GameState::tick(double dt, Game &game)
{
DebugAssert(dt >= 0.0);
// See if there is a pending map transition.
if (this->nextMap != nullptr)
{
if (!this->tryApplyMapTransition(std::move(*this->nextMap), game.getEntityDefinitionLibrary(),
game.getBinaryAssetLibrary(), game.getTextureManager(), game.getRenderer()))
{
DebugLogError("Couldn't apply map transition.");
}
this->nextMap = nullptr;
}
// Tick the game clock.
const int oldHour = this->clock.getHours24();
const double timeScale = GameState::GAME_TIME_SCALE * (this->isCamping ? 250.0 : 1.0);
this->clock.tick(dt * timeScale);
const int newHour = this->clock.getHours24();
// Check if the hour changed.
if (newHour != oldHour)
{
// Update the weather list that's used for selecting the current one.
const auto &exeData = game.getBinaryAssetLibrary().getExeData();
this->updateWeatherList(exeData);
}
// Check if the clock hour looped back around.
if (newHour < oldHour)
{
// Increment the day.
this->date.incrementDay();
}
// Tick chasm animation.
this->chasmAnimSeconds += dt;
if (this->chasmAnimSeconds >= ArenaVoxelUtils::CHASM_ANIM_SECONDS)
{
this->chasmAnimSeconds = std::fmod(this->chasmAnimSeconds, ArenaVoxelUtils::CHASM_ANIM_SECONDS);
}
// Tick weather.
const Renderer &renderer = game.getRenderer();
this->weatherInst.update(dt, this->clock, renderer.getWindowAspect(), game.getRandom(), game.getAudioManager());
// Tick on-screen text messages.
if (this->triggerTextIsVisible())
{
this->triggerTextRemainingSeconds -= dt;
}
if (this->actionTextIsVisible())
{
this->actionTextRemainingSeconds -= dt;
}
if (this->effectTextIsVisible())
{
this->effectTextRemainingSeconds -= dt;
}
}
| 12,782 |
675 | /*
* H.265 video codec.
* Copyright (c) 2013-2014 struktur AG, <NAME> <<EMAIL>>
*
* Authors: <NAME> <<EMAIL>>
*
* This file is part of libde265.
*
* libde265 is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* libde265 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with libde265. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef CODING_OPTIONS_H
#define CODING_OPTIONS_H
#include "libde265/encoder/encoder-types.h"
template <class node> class CodingOption;
template <class node>
class CodingOptions
{
public:
CodingOptions(encoder_context*, node*, context_model_table& tab);
~CodingOptions();
typedef CodingOption<node> Option;
// --- init --- call before object use
CodingOption<node> new_option(bool active=true);
enum RateEstimationMethod
{
Rate_Default, // take default value from encoder_context
Rate_AdaptiveContext,
Rate_FixedContext
};
void start(enum RateEstimationMethod = Rate_Default);
// --- processing ---
// compute RDO cost (D + lambda*R) for all options
void compute_rdo_costs();
// --- end processing --- do not call any function after this one
/* Return the CB with the lowest RDO cost. All other CBs are destroyed.
If the current metadata stored in the image are not from the returned block,
its metadata flags are set to zero.
*/
node* return_best_rdo_node();
private:
struct CodingOptionData
{
node* mNode;
context_model_table context;
bool mOptionActive;
bool computed;
float rdoCost;
};
encoder_context* mECtx;
bool mCBMode;
node* mInputNode;
context_model_table* mContextModelInput;
int mBestRDO;
std::vector<CodingOptionData> mOptions;
CABAC_encoder_estim cabac_adaptive;
CABAC_encoder_estim_constant cabac_constant;
CABAC_encoder_estim* cabac;
friend class CodingOption<node>;
int find_best_rdo_index();
};
template <class node>
class CodingOption
{
public:
CodingOption() {
mParent = nullptr;
mOptionIdx = 0;
}
node* get_node() { return mParent->mOptions[mOptionIdx].mNode; }
void set_node(node* _node) {
if (_node != mParent->mOptions[mOptionIdx].mNode) {
//printf("delete TB %p\n", mParent->mOptions[mOptionIdx].tb);
//delete mParent->mOptions[mOptionIdx].mNode;
}
mParent->mOptions[mOptionIdx].mNode = _node;
}
context_model_table& get_context() { return mParent->mOptions[mOptionIdx].context; }
/** @return True if the option is active.
*/
operator bool() const { return mParent; }
/* When modifying the metadata stored in the image, you have to
encapsulate the modification between these two functions to ensure
that the correct reconstruction will be active after return_best_rdo().
*/
void begin();
void end();
// Manually set RDO costs instead of computing them with compute_rdo_costs.
// Only required when using custom costs.
void set_rdo_cost(float rdo) { mParent->mOptions[mOptionIdx].rdoCost=rdo; }
CABAC_encoder_estim* get_cabac() { return mParent->cabac; }
float get_cabac_rate() const { return mParent->cabac->getRDBits(); }
private:
CodingOption(class CodingOptions<node>* parent, int idx)
: mParent(parent), mOptionIdx(idx) { }
class CodingOptions<node>* mParent;
int mOptionIdx;
friend class CodingOptions<node>;
};
#endif
| 1,296 |
3,285 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import time
from datetime import datetime
import alexnet_model
import benchmark_util
import data_loader
import resnet_model
import vgg_model
from oneflow.compatible import single_client as flow
parser = argparse.ArgumentParser(description="flags for cnn benchmark")
parser.add_argument("--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("--node_num", type=int, default=1)
parser.add_argument(
"--node_list",
type=str,
default=None,
required=False,
help="nodes' IP address, split by comma",
)
parser.add_argument(
"--model", type=str, default="vgg16", required=False, help="vgg16 or resnet50"
)
parser.add_argument("--batch_size_per_device", type=int, default=8, required=False)
parser.add_argument("--learning_rate", type=float, default=0.0001, required=False)
parser.add_argument(
"--optimizer", type=str, default="sgd", required=False, help="sgd, adam, momentum"
)
parser.add_argument(
"--weight_l2",
type=float,
default=None,
required=False,
help="weight decay parameter",
)
parser.add_argument(
"--iter_num", type=int, default=10, required=False, help="total iterations to run"
)
parser.add_argument(
"--skip_iter_num",
type=int,
default=0,
required=False,
help="number of skipping iterations for benchmark purpose.",
)
parser.add_argument(
"--data_dir", type=str, default=None, required=False, help="dataset directory"
)
parser.add_argument(
"--data_part_num",
type=int,
default=32,
required=False,
help="data part number in dataset",
)
parser.add_argument(
"--gpu_image_decoder",
type=bool,
default=False,
required=False,
help="Whether to use use ImageDecoderRandomCropResize.",
)
parser.add_argument(
"--image_size", type=int, default=228, required=False, help="image size"
)
parser.add_argument(
"--loss_print_every_n_iter",
type=int,
default=1,
required=False,
help="print loss every n iteration",
)
parser.add_argument(
"--model_save_every_n_iter",
type=int,
default=200,
required=False,
help="save model every n iteration",
)
parser.add_argument(
"--model_save_dir",
type=str,
default="./output/model_save-{}".format(
str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
),
required=False,
help="model save directory",
)
parser.add_argument(
"--save_last_snapshot",
type=bool,
default=False,
required=False,
help="save model snapshot for last iteration",
)
parser.add_argument(
"--model_load_dir",
type=str,
default=None,
required=False,
help="model load directory",
)
parser.add_argument(
"--log_dir",
type=str,
default="./output",
required=False,
help="log info save directory",
)
parser.add_argument(
"--enable_auto_mixed_precision",
type=bool,
default=False,
required=False,
help="automatically change the float net into mixed precision net",
)
args = parser.parse_args()
model_dict = {
"resnet50": resnet_model.resnet50,
"vgg16": vgg_model.vgg16,
"alexnet": alexnet_model.alexnet,
}
func_config = flow.FunctionConfig()
func_config.default_distribute_strategy(flow.scope.consistent_view())
func_config.default_data_type(flow.float)
func_config.enable_auto_mixed_precision(args.enable_auto_mixed_precision)
if args.weight_l2:
func_config.train.weight_l2(args.weight_l2)
flow.config.gpu_device_num(args.gpu_num_per_node)
def set_up_optimizer(loss, args):
loss_scale_policy = None
if args.enable_auto_mixed_precision:
loss_scale_policy = flow.optimizer.loss_scale.dynamic_loss_scale(
increment_period=2000
)
if args.optimizer == "sgd":
print("Optimizer: SGD")
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [args.learning_rate]),
loss_scale_policy=loss_scale_policy,
).minimize(loss)
elif args.optimizer == "momentum":
print("Optimizer: Momentum")
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [args.learning_rate]),
momentum=0.9,
loss_scale_policy=loss_scale_policy,
).minimize(loss)
elif args.optimizer == "adam":
print("Optimizer: Adam")
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [args.learning_rate]),
beta1=0.9,
loss_scale_policy=loss_scale_policy,
).minimize(loss)
@flow.global_function(func_config)
def TrainNet():
total_device_num = args.node_num * args.gpu_num_per_node
batch_size = total_device_num * args.batch_size_per_device
if args.data_dir:
assert os.path.exists(args.data_dir)
print("Loading data from {}".format(args.data_dir))
(labels, images) = data_loader.load_imagenet(
args.data_dir,
args.image_size,
batch_size,
args.data_part_num,
args.gpu_image_decoder,
)
else:
print("Loading synthetic data.")
(labels, images) = data_loader.load_synthetic(args.image_size, batch_size)
logits = model_dict[args.model](images)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
set_up_optimizer(loss, args)
return loss
def main():
print("=".ljust(66, "="))
print(
"Running {}: num_gpu_per_node = {}, num_nodes = {}.".format(
args.model, args.gpu_num_per_node, args.node_num
)
)
print("=".ljust(66, "="))
for arg in vars(args):
print("{} = {}".format(arg, getattr(args, arg)))
print("-".ljust(66, "-"))
print("Time stamp: {}".format(str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))))
flow.env.log_dir(args.log_dir)
if args.node_num > 1:
nodes = []
for n in args.node_list.strip().split(","):
addr_dict = {}
addr_dict["addr"] = n
nodes.append(addr_dict)
flow.env.machine(nodes)
check_point = flow.train.CheckPoint()
if args.model_load_dir:
assert os.path.isdir(args.model_load_dir)
print("Restoring model from {}.".format(args.model_load_dir))
check_point.load(args.model_load_dir)
else:
print("Init model on demand.")
check_point.init()
total_batch_size = (
args.node_num * args.gpu_num_per_node * args.batch_size_per_device
)
speedometer = benchmark_util.CNNSpeedometer()
start_time = time.time()
for step in range(args.skip_iter_num + args.iter_num):
cb = speedometer.speedometer_cb(
step,
start_time,
total_batch_size,
args.skip_iter_num,
args.iter_num,
args.loss_print_every_n_iter,
)
TrainNet().async_get(cb)
if (step + 1) % args.model_save_every_n_iter == 0:
if not os.path.exists(args.model_save_dir):
os.makedirs(args.model_save_dir)
snapshot_save_path = os.path.join(
args.model_save_dir, "snapshot_%d" % (step + 1)
)
print("Saving model to {}.".format(snapshot_save_path))
check_point.save(snapshot_save_path)
if args.save_last_snapshot:
snapshot_save_path = os.path.join(args.model_save_dir, "last_snapshot")
if not os.path.exists(snapshot_save_path):
os.makedirs(snapshot_save_path)
print("Saving model to {}.".format(snapshot_save_path))
check_point.save(snapshot_save_path)
if __name__ == "__main__":
main()
| 3,495 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.