max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
1,894 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import # pylint: disable=wrong-import-position
import contextlib
import os
import six
LOCK_EX = None # Exclusive lock
LOCK_SH = None # Shared lock
LOCK_NB = None # Non-blocking (LockException is raised if resource is locked)
class LockException(Exception):
pass
# pylint: disable=import-error
# pylint: disable=wrong-import-position
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
_OVERLAPPED = pywintypes.OVERLAPPED()
elif os.name == 'posix':
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
# pylint: enable=import-error
# pylint: enable=wrong-import-position
@contextlib.contextmanager
def FileLock(target_file, flags):
""" Lock the target file. Similar to AcquireFileLock but allow user to write:
with FileLock(f, LOCK_EX):
...do stuff on file f without worrying about race condition
Args: see AcquireFileLock's documentation.
"""
AcquireFileLock(target_file, flags)
try:
yield
finally:
ReleaseFileLock(target_file)
def AcquireFileLock(target_file, flags):
""" Lock the target file. Note that if |target_file| is closed, the lock is
automatically released.
Args:
target_file: file handle of the file to acquire lock.
flags: can be any of the type LOCK_EX, LOCK_SH, LOCK_NB, or a bitwise
OR combination of flags.
"""
assert flags in (
LOCK_EX, LOCK_SH, LOCK_NB, LOCK_EX | LOCK_NB, LOCK_SH | LOCK_NB)
if os.name == 'nt':
_LockImplWin(target_file, flags)
elif os.name == 'posix':
_LockImplPosix(target_file, flags)
else:
raise NotImplementedError('%s is not supported' % os.name)
def ReleaseFileLock(target_file):
""" Unlock the target file.
Args:
target_file: file handle of the file to release the lock.
"""
if os.name == 'nt':
_UnlockImplWin(target_file)
elif os.name == 'posix':
_UnlockImplPosix(target_file)
else:
raise NotImplementedError('%s is not supported' % os.name)
# These implementations are based on
# http://code.activestate.com/recipes/65203/
def _LockImplWin(target_file, flags):
hfile = win32file._get_osfhandle(target_file.fileno())
try:
win32file.LockFileEx(hfile, flags, 0, -0x10000, _OVERLAPPED)
except pywintypes.error as exc_value:
if exc_value.args[0] == 33:
six.raise_from(LockException('Error trying acquiring lock of %s: %s' %
(target_file.name, exc_value.args[2])),
exc_value)
raise
def _UnlockImplWin(target_file):
hfile = win32file._get_osfhandle(target_file.fileno())
try:
win32file.UnlockFileEx(hfile, 0, -0x10000, _OVERLAPPED)
except pywintypes.error as exc_value:
if exc_value.args[0] == 158:
# error: (158, 'UnlockFileEx', 'The segment is already unlocked.')
# To match the 'posix' implementation, silently ignore this error
pass
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
def _LockImplPosix(target_file, flags):
try:
fcntl.flock(target_file.fileno(), flags)
except IOError as exc_value:
if exc_value.args[0] == 11 or exc_value.args[0] == 35:
six.raise_from(LockException('Error trying acquiring lock of %s: %s' %
(target_file.name, exc_value.args[1])),
exc_value)
raise
def _UnlockImplPosix(target_file):
fcntl.flock(target_file.fileno(), fcntl.LOCK_UN)
| 1,505 |
2,338 | // RUN: %clang -cc1 -triple x86_64-windows-msvc -emit-llvm %s -o - | FileCheck %s
// CHECK: @constinit = private global [3 x i8*] [i8* blockaddress(@main, %L), i8* null, i8* null]
void receivePtrs(void **);
int main() {
L:
receivePtrs((void *[]){ &&L, 0, 0 });
}
| 119 |
1,711 | <reponame>squirrelmaker/SOUL<filename>source/modules/soul_core/utilities/soul_ContainerUtilities.h
/*
_____ _____ _____ __
| __| | | | | The SOUL language
|__ | | | | | |__ Copyright (c) 2019 - ROLI Ltd.
|_____|_____|_____|_____|
The code in this file is provided under the terms of the ISC license:
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice and
this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN
NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
namespace soul
{
//==============================================================================
template <typename Vector, typename Type>
inline bool contains (const Vector& v, Type&& i)
{
return std::find (std::begin (v), std::end (v), i) != v.end();
}
template <typename Vector, typename Predicate>
inline bool removeIf (Vector& v, Predicate&& pred)
{
auto oldEnd = std::end (v);
auto newEnd = std::remove_if (std::begin (v), oldEnd, pred);
if (newEnd == oldEnd)
return false;
v.erase (newEnd, oldEnd);
return true;
}
template <typename Vector, typename Predicate>
inline bool removeFirst (Vector& v, Predicate&& pred)
{
auto found = std::find_if (std::begin (v), std::end (v), pred);
if (found == std::end (v))
return false;
v.erase (found);
return true;
}
template <typename Vector, typename ItemType>
inline bool removeItem (Vector& v, ItemType&& itemToRemove)
{
auto found = std::find (std::begin (v), std::end (v), itemToRemove);
if (found == std::end (v))
return false;
v.erase (found);
return true;
}
template <typename Vector>
inline void sortAndRemoveDuplicates (Vector& v)
{
if (v.size() > 1)
{
std::sort (std::begin (v), std::end (v));
v.erase (std::unique (std::begin (v), std::end (v)), std::end (v));
}
}
template <typename Vector1, typename Vector2>
inline void appendVector (Vector1& dest, const Vector2& source)
{
dest.reserve (dest.size() + source.size());
for (auto& i : source)
dest.push_back (i);
}
template<typename Vector, typename Type>
inline bool appendIfNotPresent (Vector& v, Type&& i)
{
if (contains (v, i))
return false;
v.push_back (i);
return true;
}
template <typename Vector1, typename Vector2>
inline void copyVector (Vector1& dest, const Vector2& source)
{
dest.clear();
appendVector (dest, source);
}
template <typename Vector1, typename Vector2>
inline void mergeSortedVectors (Vector1& dest, const Vector2& source)
{
appendVector (dest, source);
sortAndRemoveDuplicates (dest);
}
template <typename Vector1, typename Vector2>
inline bool intersectVectors (Vector1& target, const Vector2& itemsToRetain)
{
return removeIf (target, [&] (auto& item) { return ! contains (itemsToRetain, item); });
}
template <typename Vector1, typename Vector2>
inline bool removeFromVector (Vector1& target, const Vector2& itemsToRemove)
{
return removeIf (target, [&] (auto& item) { return contains (itemsToRemove, item); });
}
template <typename Vector, typename Position>
inline auto getIteratorForIndex (Vector& vector, Position index)
{
return vector.begin() + static_cast<typename Vector::difference_type> (index);
}
template <typename ConvertStringToValueFn>
static choc::value::Value replaceStringsWithValues (const choc::value::ValueView& value,
const ConvertStringToValueFn& convertStringToValue)
{
if (value.isString())
return convertStringToValue (value.getString());
if (value.isArray())
{
auto v = choc::value::createEmptyArray();
for (auto i : value)
v.addArrayElement (replaceStringsWithValues (i, convertStringToValue));
return v;
}
if (value.isObject())
{
auto v = choc::value::createObject (value.getObjectClassName());
value.visitObjectMembers ([&] (std::string_view memberName, const choc::value::ValueView& memberValue)
{
v.addMember (memberName, replaceStringsWithValues (memberValue, convertStringToValue));
});
return v;
}
return choc::value::Value (value);
}
//==============================================================================
/** A simple, intrusive single-linked-list.
The main use-case that this was written for is dealing with the list of statements
in a block, where using vectors is tricky because it's common to need to mutate
the list while iterating it.
*/
template <typename Type>
struct LinkedList
{
LinkedList() = default;
LinkedList (const LinkedList&) = default;
LinkedList& operator= (const LinkedList&) = default;
struct Iterator : public std::iterator<std::forward_iterator_tag, Type>
{
Iterator() = default;
Iterator (decltype (nullptr)) {}
Iterator (Type* o) : object (o) {}
Iterator (Type& o) : Iterator (std::addressof (o)) {}
Type* operator*() const { SOUL_ASSERT (object != nullptr); return object; }
Type* operator->() const { SOUL_ASSERT (object != nullptr); return object; }
operator bool() const { return object != nullptr; }
Iterator& operator++() { object = next(); return *this; }
Type* next() const { SOUL_ASSERT (object != nullptr); return object->nextObject; }
bool operator== (decltype (nullptr)) const { return object == nullptr; }
bool operator!= (decltype (nullptr)) const { return object != nullptr; }
bool operator== (Iterator other) const { return object == other.object; }
bool operator!= (Iterator other) const { return object != other.object; }
void removeAllSuccessors()
{
if (object != nullptr)
object->nextObject = nullptr;
}
private:
friend struct LinkedList;
void insertAfter (Type& newObject)
{
newObject.nextObject = next();
object->nextObject = std::addressof (newObject);
}
void replaceNext (Type& newObject)
{
SOUL_ASSERT (object != nullptr && object->nextObject != nullptr);
newObject.nextObject = object->nextObject->nextObject;
object->nextObject = std::addressof (newObject);
}
void removeNext()
{
SOUL_ASSERT (object != nullptr);
if (object->nextObject != nullptr)
object->nextObject = object->nextObject->nextObject;
}
Type* object = nullptr;
};
Iterator begin() const { return Iterator (firstObject); }
static Iterator end() { return {}; }
bool empty() const { return firstObject == nullptr; }
void clear() { firstObject = nullptr; }
Iterator getLast() const
{
if (auto o = firstObject)
{
while (o->nextObject != nullptr)
o = o->nextObject;
return Iterator (o);
}
return {};
}
Iterator getNext (Iterator predecessor) const
{
return predecessor == Iterator() ? begin() : predecessor.next();
}
Iterator getPredecessor (Type& object) const
{
Iterator last;
for (auto i : *this)
{
if (i == std::addressof (object))
return last;
last = i;
}
SOUL_ASSERT_FALSE;
return {};
}
bool contains (Type& object) const
{
for (auto i : *this)
if (i == std::addressof (object))
return true;
return false;
}
void insertFront (Type& newObject)
{
newObject.nextObject = firstObject;
firstObject = std::addressof (newObject);
}
void removeFront()
{
if (! empty())
firstObject = firstObject->nextObject;
}
void replaceFront (Type& newObject)
{
SOUL_ASSERT (firstObject != nullptr);
newObject.nextObject = firstObject->nextObject;
firstObject = std::addressof (newObject);
}
Iterator insertAfter (Iterator predecessor, Type& newObject)
{
if (predecessor == Iterator())
insertFront (newObject);
else
predecessor.insertAfter (newObject);
return Iterator (newObject);
}
void replaceAfter (Iterator predecessor, Type& newObject)
{
if (predecessor == Iterator())
replaceFront (newObject);
else
predecessor.replaceNext (newObject);
}
void removeNext (Iterator predecessor)
{
if (predecessor == Iterator())
removeFront();
else
predecessor.removeNext();
}
void append (Type& newObject)
{
if (auto last = getLast())
last->nextObject = std::addressof (newObject);
else
firstObject = std::addressof (newObject);
}
template <typename Predicate>
void removeMatches (Predicate&& shouldRemove)
{
while (! empty() && shouldRemove (*firstObject))
removeFront();
for (auto i : *this)
while (i->nextObject != nullptr && shouldRemove (*i->nextObject))
removeNext (*i);
}
void remove (Type& item)
{
removeMatches ([&] (Type& i) { return std::addressof (i) == std::addressof (item); });
}
template <typename Predicate>
void replaceMatches (Predicate&& getReplacement)
{
if (! empty())
{
for (;;)
{
if (auto replacement = getReplacement (*firstObject))
replaceFront (*replacement);
else
break;
}
for (auto i : *this)
{
while (i->nextObject != nullptr)
{
if (auto replacement = getReplacement (*i->nextObject))
replaceAfter (*i, *replacement);
else
break;
}
}
}
}
private:
Type* firstObject = nullptr;
};
} // namespace soul
| 4,520 |
674 | <gh_stars>100-1000
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from janitor import patterns
@pytest.fixture
def df_checks():
return pd.DataFrame(
[
{"region": "Pacific", "2007": 1039, "2009": 2587},
{"region": "Southwest", "2007": 51, "2009": 176},
{"region": "Rocky Mountains and Plains", "2007": 200, "2009": 338},
]
)
@pytest.fixture
def df_checks_output():
return pd.DataFrame(
{
"region": [
"Pacific",
"Pacific",
"Southwest",
"Southwest",
"Rocky Mountains and Plains",
"Rocky Mountains and Plains",
],
"year": ["2007", "2009", "2007", "2009", "2007", "2009"],
"num_nests": [1039, 2587, 51, 176, 200, 338],
}
)
@pytest.fixture
def test_df():
return pd.DataFrame(
{
"off_loc": ["A", "B", "C", "D", "E", "F"],
"pt_loc": ["G", "H", "I", "J", "K", "L"],
"pt_lat": [
100.07548220000001,
75.191326,
122.65134479999999,
124.13553329999999,
124.13553329999999,
124.01028909999998,
],
"off_lat": [
121.271083,
75.93845266,
135.043791,
134.51128400000002,
134.484374,
137.962195,
],
"pt_long": [
4.472089953,
-144.387785,
-40.45611048,
-46.07156181,
-46.07156181,
-46.01594293,
],
"off_long": [
-7.188632000000001,
-143.2288569,
21.242563,
40.937416999999996,
40.78472,
22.905889000000002,
],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_names_to_in_index(df_checks):
"""Raise TypeError if names_to intersects with index."""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="region", names_to="region")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the `index`."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for `column_names`."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for `names_to`."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if `names_to` is a sequence
and the wrong type is provided for entries
in `names_to`.
"""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to=[("year",)])
def test_name_sep_wrong_type(test_df):
"""Raise TypeError if the wrong type is provided for `names_sep`."""
with pytest.raises(TypeError):
test_df.pivot_longer(names_to=["set", ".value"], names_sep=["_"])
def test_name_sep_no_names_to(test_df):
"""Raise ValuError if `names_sep` and names_to is None."""
with pytest.raises(ValueError):
test_df.pivot_longer(names_to=None, names_sep="_")
def test_name_pattern_wrong_type(test_df):
"""Raise TypeError if the wrong type provided for `names_pattern`."""
with pytest.raises(TypeError):
test_df.pivot_longer(names_to=["set", ".value"], names_pattern=2007)
def test_name_pattern_no_names_to(test_df):
"""Raise ValueError if `names_pattern` and names_to is None."""
with pytest.raises(ValueError):
test_df.pivot_longer(names_to=None, names_pattern="(.+)_(.+)")
def test_names_pattern_wrong_subtype(test_df):
"""
Raise TypeError if `names_pattern` is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError):
test_df.pivot_longer(names_to=["off", "pt"], names_pattern=[1, "pt"])
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for `column_level`."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
def test_sort_by_appearance(test_df):
"""Raise error if `sort_by_appearance` is not boolean."""
with pytest.raises(TypeError):
test_df.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(test_df):
"""Raise error if `ignore_index` is not boolean."""
with pytest.raises(TypeError):
test_df.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_duplicate_names_to(test_df):
"""Raise error if `names_to` contains duplicates."""
with pytest.raises(ValueError):
test_df.pivot_longer(
names_to=[".value", ".value"], names_pattern="(.+)_(.+)"
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if `names_pattern` is a list/tuple
and wrong number of items in `names_to`.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["1", "rar"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if `names_pattern` is a list/tuple and
`.value` in `names_to`.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["1", "rar"]
)
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both `names_sep`
and `names_pattern` is provided.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern=r"\\d+"
)
def test_names_pattern_column_MultiIndex(df_multi):
"""Raise ValueError if `names_pattern` and MultiIndex column"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index="name", names_pattern=r"(.+)(.)")
def test_index_tuple_MultiIndex(df_multi):
"""
Raise ValueError if `index` is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_MultiIndex(df_multi):
"""
Raise ValueError if `column_names` is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_column_MultiIndex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_MultiIndex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if `values_to` already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="region")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if `values_to` is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="year", names_to="year")
def test_MultiIndex_column_level(df_multi):
"""Test output from MultiIndex column"""
result = df_multi.pivot_longer(
index="name", column_names="names", column_level=0
)
expected_output = df_multi.melt(
id_vars="name", value_vars="names", col_level=0
)
assert_frame_equal(result, expected_output)
def test_pivot_no_args_passed():
"""Test output if no arguments are passed."""
df_no_args = pd.DataFrame({"name": ["Wilbur", "Petunia", "Gregory"]})
result = df_no_args.pivot_longer()
assert_frame_equal(result, df_no_args.melt())
def test_pivot_index_only_and_sort_by_appearance(df_checks, df_checks_output):
"""
Test output if only `index` is passed and
`sort_by_apperance is `True`.
"""
result = df_checks.pivot_longer(
index="region",
names_to="year",
values_to="num_nests",
sort_by_appearance=True,
)
assert_frame_equal(result, df_checks_output)
def test_pivot_index_only(df_checks):
"""Test output if only `index` is passed."""
result = df_checks.pivot_longer(
index="region",
names_to="year",
values_to="num_nests",
)
df_out = pd.DataFrame(
{
"region": [
"Pacific",
"Southwest",
"Rocky Mountains and Plains",
"Pacific",
"Southwest",
"Rocky Mountains and Plains",
],
"year": ["2007", "2007", "2007", "2009", "2009", "2009"],
"num_nests": [1039, 51, 200, 2587, 176, 338],
}
)
assert_frame_equal(result, df_out)
def test_ignore_index_False():
"""
Test dataframe output if `ignore_index` is False,
and `sort_by_appearance` is False.
"""
df_in = pd.DataFrame(
{
"A": {0: "a", 1: "b", 2: "c"},
"B": {0: 1, 1: 3, 2: 5},
"C": {0: 2, 1: 4, 2: 6},
}
)
result = df_in.pivot_longer(
index="A",
column_names=["B", "C"],
ignore_index=False,
sort_by_appearance=False,
)
df_out = pd.DataFrame(
{
"A": ["a", "b", "c", "a", "b", "c"],
"variable": ["B", "B", "B", "C", "C", "C"],
"value": [1, 3, 5, 2, 4, 6],
},
index=pd.Int64Index([0, 1, 2, 0, 1, 2], dtype="int64"),
)
assert_frame_equal(result, df_out)
def test_ignore_index_false_same_length():
"""
Test output when `ignore_index` is False
and the length of the new dataframe is the same
as the length of the original dataframe.
"""
df_in = pd.DataFrame(
{
"name": {
(67, 56): "Wilbur",
(80, 90): "Petunia",
(64, 50): "Gregory",
}
}
)
df_out = pd.DataFrame(
{
"variable": {(64, 50): "name", (67, 56): "name", (80, 90): "name"},
"value": {
(64, 50): "Gregory",
(67, 56): "Wilbur",
(80, 90): "Petunia",
},
}
)
result = df_in.pivot_longer(ignore_index=False)
assert_frame_equal(result, df_out)
def test_pivot_column_only(df_checks, df_checks_output):
"""Test output if only `column_names` is passed."""
result = df_checks.pivot_longer(
column_names=["2007", "2009"],
names_to="year",
values_to="num_nests",
sort_by_appearance=True,
)
assert_frame_equal(result, df_checks_output)
def test_pivot_index_patterns_only_sort_by_appearance(
df_checks, df_checks_output
):
"""
Test output if the `patterns` function is passed to `index`,
and `sort_by_appearance` is `True`.
"""
result = df_checks.pivot_longer(
index=patterns(r"[^\d+]"),
names_to="year",
values_to="num_nests",
sort_by_appearance=True,
)
assert_frame_equal(result, df_checks_output)
def test_pivot_no_index_no_columns():
"""Test output if neither `index`/`columns_names` is passed."""
test = pd.DataFrame(
{"1": ["fur", "lace"], "2": ["car", "plane"], "3": ["nsw", "vic"]}
)
result = test.pivot_longer()
expected_output = pd.DataFrame(
{
"variable": ["1", "1", "2", "2", "3", "3"],
"value": ["fur", "lace", "car", "plane", "nsw", "vic"],
}
)
assert_frame_equal(result, expected_output)
def test_pivot_columns_patterns_only(df_checks, df_checks_output):
"""Test output if the `patterns` function is passed to `column_names`."""
result = df_checks.pivot_longer(
column_names=patterns(r"\d+"),
names_to="year",
values_to="num_nests",
sort_by_appearance=True,
)
assert_frame_equal(result, df_checks_output)
@pytest.fixture
def data():
return pd.DataFrame(
{
"country": ["United States", "Russia", "China"],
"vault_2012_f": [48.132, 46.366, 44.266],
"vault_2012_m": [46.632, 46.866, 48.316],
"vault_2016_f": [46.866, 45.733, 44.332],
"vault_2016_m": [45.865, 46.033, 45.0],
"floor_2012_f": [45.366, 41.599, 40.833],
"floor_2012_m": [45.266, 45.308, 45.133],
"floor_2016_f": [45.999, 42.032, 42.066],
"floor_2016_m": [43.757, 44.766, 43.799],
}
)
def test_length_mismatch(data):
"""
Raise error if `names_to` is a list/tuple,
and its length does not match
the number of extracted columns.
"""
with pytest.raises(ValueError):
data.pivot_longer(names_to=["event", "year"], names_sep="_")
def test_names_sep_pattern_names_to_index(data):
"""
Raise ValueError if names_sep/names_pattern,
'.value' not in names_to
and names_to intersects with index
"""
with pytest.raises(ValueError):
data.pivot_longer(
names_to=["event", "year", "country"],
names_sep="_",
index="country",
)
@pytest.fixture
def start():
return pd.DataFrame(
{
"id": [1, 2, 3],
"M_start_date_1": [201709, 201709, 201709],
"M_end_date_1": [201905, 201905, 201905],
"M_start_date_2": [202004, 202004, 202004],
"M_end_date_2": [202005, 202005, 202005],
"F_start_date_1": [201803, 201803, 201803],
"F_end_date_1": [201904, 201904, 201904],
"F_start_date_2": [201912, 201912, 201912],
"F_end_date_2": [202007, 202007, 202007],
}
)
def test_dot_value_names_to_index(start):
"""
Raise ValueError if names_sep/names_pattern,
'.value' in names_to,
and names_to intersects with index
"""
with pytest.raises(ValueError):
start.pivot_longer(
"id",
names_to=("id", ".value"),
names_pattern="(M|F)_(start|end)_.+",
)
def test_dot_value_names_to_columns_pattern(start):
"""
Raise ValueError if names_pattern,
'.value' in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(ValueError):
start.pivot_longer(
"id",
names_to=("end", ".value"),
names_pattern="(M|F)_(start|end)_.+",
)
def test_dot_value_names_to_columns_sep(start):
"""
Raise ValueError if names_sep,
'.value' in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(ValueError):
start.pivot_longer(
"id", names_to=("start", ".value", "num", "rar"), names_sep="_"
)
def test_empty_mapping_all(test_df):
"""Raise error if `names_pattern` is a regex and returns no matches."""
with pytest.raises(ValueError):
test_df.pivot_longer(
names_to=[".value", "value"], names_pattern=r"(\d+)([A-Z])"
)
def test_empty_mapping_any(test_df):
"""
Raise error if `names_pattern` is a regex
and returns incomplete matches.
"""
with pytest.raises(ValueError):
test_df.pivot_longer(
names_to=[".value", "value"], names_pattern=r"(.+)_(loc)"
)
def test_len_mapping_gt_len_names_to(test_df):
"""
Raise error if `names_pattern` is a regex,
and the number of matches returned
is more than length of `names_to`.
"""
with pytest.raises(ValueError):
test_df.pivot_longer(
names_to=[".value", "value"], names_pattern="(.+)(_)(.+)"
)
@pytest.fixture
def names_pattern_list_df():
return pd.DataFrame(
[
{
"ID": 1,
"DateRange1Start": "1/1/90",
"DateRange1End": "3/1/90",
"Value1": 4.4,
"DateRange2Start": "4/5/91",
"DateRange2End": "6/7/91",
"Value2": 6.2,
"DateRange3Start": "5/5/95",
"DateRange3End": "6/6/96",
"Value3": 3.3,
}
]
)
def test_names_pattern_list_empty_all(names_pattern_list_df):
"""
Raise ValueError if `names_pattern` is a list,
and nothing is returned.
"""
with pytest.raises(ValueError):
names_pattern_list_df.pivot_longer(
index="ID",
names_to=("DateRangeStart", "DateRangeEnd", "Value"),
names_pattern=("^Start", "^End", "Value$"),
)
def test_names_pattern_list_empty_any(names_pattern_list_df):
"""
Raise ValueError if `names_pattern` is a list,
and not all matches are returned.
"""
with pytest.raises(ValueError):
names_pattern_list_df.pivot_longer(
index="ID",
names_to=("DateRangeStart", "DateRangeEnd", "Value"),
names_pattern=("Start", "End", "Value$"),
)
def test_names_pattern_list(names_pattern_list_df):
"""Test output if `names_pattern` is a list."""
result = names_pattern_list_df.pivot_longer(
index="ID",
names_to=("DateRangeStart", "DateRangeEnd", "Value"),
names_pattern=("Start$", "End$", "^Value"),
)
expected_output = pd.DataFrame(
{
"ID": [1, 1, 1],
"DateRangeStart": ["1/1/90", "4/5/91", "5/5/95"],
"DateRangeEnd": ["3/1/90", "6/7/91", "6/6/96"],
"Value": [4.4, 6.2, 3.3],
}
)
assert_frame_equal(result, expected_output)
multiple_values_pattern = [
(
pd.DataFrame(
{
"country": ["United States", "Russia", "China"],
"vault2012": [48.1, 46.4, 44.3],
"floor2012": [45.4, 41.6, 40.8],
"vault2016": [46.9, 45.7, 44.3],
"floor2016": [46.0, 42.0, 42.1],
}
),
pd.DataFrame(
[
{
"country": "United States",
"event": "vault",
"year": "2012",
"score": 48.1,
},
{
"country": "United States",
"event": "floor",
"year": "2012",
"score": 45.4,
},
{
"country": "United States",
"event": "vault",
"year": "2016",
"score": 46.9,
},
{
"country": "United States",
"event": "floor",
"year": "2016",
"score": 46.0,
},
{
"country": "Russia",
"event": "vault",
"year": "2012",
"score": 46.4,
},
{
"country": "Russia",
"event": "floor",
"year": "2012",
"score": 41.6,
},
{
"country": "Russia",
"event": "vault",
"year": "2016",
"score": 45.7,
},
{
"country": "Russia",
"event": "floor",
"year": "2016",
"score": 42.0,
},
{
"country": "China",
"event": "vault",
"year": "2012",
"score": 44.3,
},
{
"country": "China",
"event": "floor",
"year": "2012",
"score": 40.8,
},
{
"country": "China",
"event": "vault",
"year": "2016",
"score": 44.3,
},
{
"country": "China",
"event": "floor",
"year": "2016",
"score": 42.1,
},
]
),
"country",
("event", "year"),
r"([A-Za-z]+)(\d+)",
True,
)
]
# probably unneccesary as it is just one item
# the idea is that more tests will be added for this
@pytest.mark.parametrize(
"df_in,df_out,index,names_to,names_pattern, sort_by_appearance",
multiple_values_pattern,
)
def test_multiple_values_pattern(
df_in, df_out, index, names_to, names_pattern, sort_by_appearance
):
"""
Test function to extract multiple columns,
using the `names_to` and `names_pattern` arguments.
"""
result = df_in.pivot_longer(
index=index,
names_to=names_to,
names_pattern=names_pattern,
values_to="score",
sort_by_appearance=sort_by_appearance,
)
assert_frame_equal(result, df_out)
multiple_values_sep = [
(
pd.DataFrame(
{
"country": ["United States", "Russia", "China"],
"vault_2012": [48.1, 46.4, 44.3],
"floor_2012": [45.4, 41.6, 40.8],
"vault_2016": [46.9, 45.7, 44.3],
"floor_2016": [46.0, 42.0, 42.1],
}
),
pd.DataFrame(
[
{
"country": "United States",
"event": "vault",
"year": "2012",
"score": 48.1,
},
{
"country": "United States",
"event": "floor",
"year": "2012",
"score": 45.4,
},
{
"country": "United States",
"event": "vault",
"year": "2016",
"score": 46.9,
},
{
"country": "United States",
"event": "floor",
"year": "2016",
"score": 46.0,
},
{
"country": "Russia",
"event": "vault",
"year": "2012",
"score": 46.4,
},
{
"country": "Russia",
"event": "floor",
"year": "2012",
"score": 41.6,
},
{
"country": "Russia",
"event": "vault",
"year": "2016",
"score": 45.7,
},
{
"country": "Russia",
"event": "floor",
"year": "2016",
"score": 42.0,
},
{
"country": "China",
"event": "vault",
"year": "2012",
"score": 44.3,
},
{
"country": "China",
"event": "floor",
"year": "2012",
"score": 40.8,
},
{
"country": "China",
"event": "vault",
"year": "2016",
"score": 44.3,
},
{
"country": "China",
"event": "floor",
"year": "2016",
"score": 42.1,
},
]
),
"country",
("event", "year"),
"_",
True,
),
(
pd.DataFrame(
{
"country": ["United States", "Russia", "China"],
"vault_2012_f": [
48.132,
46.36600000000001,
44.266000000000005,
],
"vault_2012_m": [46.632, 46.86600000000001, 48.316],
"vault_2016_f": [
46.86600000000001,
45.733000000000004,
44.332,
],
"vault_2016_m": [45.865, 46.033, 45.0],
"floor_2012_f": [45.36600000000001, 41.599, 40.833],
"floor_2012_m": [45.266000000000005, 45.308, 45.133],
"floor_2016_f": [45.998999999999995, 42.032, 42.066],
"floor_2016_m": [43.757, 44.766000000000005, 43.799],
}
),
pd.DataFrame(
[
{
"country": "United States",
"event": "vault",
"year": "2012",
"gender": "f",
"score": 48.132,
},
{
"country": "United States",
"event": "vault",
"year": "2012",
"gender": "m",
"score": 46.632,
},
{
"country": "United States",
"event": "vault",
"year": "2016",
"gender": "f",
"score": 46.86600000000001,
},
{
"country": "United States",
"event": "vault",
"year": "2016",
"gender": "m",
"score": 45.865,
},
{
"country": "United States",
"event": "floor",
"year": "2012",
"gender": "f",
"score": 45.36600000000001,
},
{
"country": "United States",
"event": "floor",
"year": "2012",
"gender": "m",
"score": 45.266000000000005,
},
{
"country": "United States",
"event": "floor",
"year": "2016",
"gender": "f",
"score": 45.998999999999995,
},
{
"country": "United States",
"event": "floor",
"year": "2016",
"gender": "m",
"score": 43.757,
},
{
"country": "Russia",
"event": "vault",
"year": "2012",
"gender": "f",
"score": 46.36600000000001,
},
{
"country": "Russia",
"event": "vault",
"year": "2012",
"gender": "m",
"score": 46.86600000000001,
},
{
"country": "Russia",
"event": "vault",
"year": "2016",
"gender": "f",
"score": 45.733000000000004,
},
{
"country": "Russia",
"event": "vault",
"year": "2016",
"gender": "m",
"score": 46.033,
},
{
"country": "Russia",
"event": "floor",
"year": "2012",
"gender": "f",
"score": 41.599,
},
{
"country": "Russia",
"event": "floor",
"year": "2012",
"gender": "m",
"score": 45.308,
},
{
"country": "Russia",
"event": "floor",
"year": "2016",
"gender": "f",
"score": 42.032,
},
{
"country": "Russia",
"event": "floor",
"year": "2016",
"gender": "m",
"score": 44.766000000000005,
},
{
"country": "China",
"event": "vault",
"year": "2012",
"gender": "f",
"score": 44.266000000000005,
},
{
"country": "China",
"event": "vault",
"year": "2012",
"gender": "m",
"score": 48.316,
},
{
"country": "China",
"event": "vault",
"year": "2016",
"gender": "f",
"score": 44.332,
},
{
"country": "China",
"event": "vault",
"year": "2016",
"gender": "m",
"score": 45.0,
},
{
"country": "China",
"event": "floor",
"year": "2012",
"gender": "f",
"score": 40.833,
},
{
"country": "China",
"event": "floor",
"year": "2012",
"gender": "m",
"score": 45.133,
},
{
"country": "China",
"event": "floor",
"year": "2016",
"gender": "f",
"score": 42.066,
},
{
"country": "China",
"event": "floor",
"year": "2016",
"gender": "m",
"score": 43.799,
},
]
),
"country",
("event", "year", "gender"),
"_",
True,
),
]
@pytest.mark.parametrize(
"df_in,df_out,index,names_to,names_sep,sort_by_appearance",
multiple_values_sep,
)
def test_multiple_values_sep(
df_in, df_out, index, names_to, names_sep, sort_by_appearance
):
"""
Test function to extract multiple columns,
using the `names_to` and `names_sep` arguments.
"""
result = df_in.pivot_longer(
index=index,
names_to=names_to,
names_sep=names_sep,
values_to="score",
sort_by_appearance=sort_by_appearance,
)
assert_frame_equal(result, df_out)
paired_columns_pattern = [
(
pd.DataFrame(
{
"id": [1, 2, 3],
"M_start_date_1": [201709, 201709, 201709],
"M_end_date_1": [201905, 201905, 201905],
"M_start_date_2": [202004, 202004, 202004],
"M_end_date_2": [202005, 202005, 202005],
"F_start_date_1": [201803, 201803, 201803],
"F_end_date_1": [201904, 201904, 201904],
"F_start_date_2": [201912, 201912, 201912],
"F_end_date_2": [202007, 202007, 202007],
}
),
pd.DataFrame(
[
{"id": 1, "cod": "M", "start": 201709, "end": 201905},
{"id": 1, "cod": "M", "start": 202004, "end": 202005},
{"id": 1, "cod": "F", "start": 201803, "end": 201904},
{"id": 1, "cod": "F", "start": 201912, "end": 202007},
{"id": 2, "cod": "M", "start": 201709, "end": 201905},
{"id": 2, "cod": "M", "start": 202004, "end": 202005},
{"id": 2, "cod": "F", "start": 201803, "end": 201904},
{"id": 2, "cod": "F", "start": 201912, "end": 202007},
{"id": 3, "cod": "M", "start": 201709, "end": 201905},
{"id": 3, "cod": "M", "start": 202004, "end": 202005},
{"id": 3, "cod": "F", "start": 201803, "end": 201904},
{"id": 3, "cod": "F", "start": 201912, "end": 202007},
]
),
"id",
("cod", ".value"),
"(M|F)_(start|end)_.+",
True,
),
(
pd.DataFrame(
{
"person_id": [1, 2, 3],
"date1": ["12/31/2007", "11/25/2009", "10/06/2005"],
"val1": [2, 4, 6],
"date2": ["12/31/2017", "11/25/2019", "10/06/2015"],
"val2": [1, 3, 5],
"date3": ["12/31/2027", "11/25/2029", "10/06/2025"],
"val3": [7, 9, 11],
}
),
pd.DataFrame(
[
{"person_id": 1, "value": "1", "date": "12/31/2007", "val": 2},
{"person_id": 1, "value": "2", "date": "12/31/2017", "val": 1},
{"person_id": 1, "value": "3", "date": "12/31/2027", "val": 7},
{"person_id": 2, "value": "1", "date": "11/25/2009", "val": 4},
{"person_id": 2, "value": "2", "date": "11/25/2019", "val": 3},
{"person_id": 2, "value": "3", "date": "11/25/2029", "val": 9},
{"person_id": 3, "value": "1", "date": "10/06/2005", "val": 6},
{"person_id": 3, "value": "2", "date": "10/06/2015", "val": 5},
{
"person_id": 3,
"value": "3",
"date": "10/06/2025",
"val": 11,
},
]
),
patterns("^(?!(date|val))"),
(".value", "value"),
r"([a-z]+)(\d)",
True,
),
(
pd.DataFrame(
[
{
"id": 1,
"a1": "a",
"a2": "b",
"a3": "c",
"A1": "A",
"A2": "B",
"A3": "C",
}
]
),
pd.DataFrame(
{
"id": [1, 1, 1],
"instance": ["1", "2", "3"],
"a": ["a", "b", "c"],
"A": ["A", "B", "C"],
},
),
"id",
(".value", "instance"),
r"(\w)(\d)",
True,
),
(
pd.DataFrame(
{
"A1970": ["a", "b", "c"],
"A1980": ["d", "e", "f"],
"B1970": [2.5, 1.2, 0.7],
"B1980": [3.2, 1.3, 0.1],
"X": [-1.085631, 0.997345, 0.282978],
}
),
pd.DataFrame(
{
"X": [
-1.085631,
-1.085631,
0.997345,
0.997345,
0.282978,
0.282978,
],
"year": ["1970", "1980", "1970", "1980", "1970", "1980"],
"A": ["a", "d", "b", "e", "c", "f"],
"B": [2.5, 3.2, 1.2, 1.3, 0.7, 0.1],
}
),
"X",
(".value", "year"),
"([A-Z])(.+)",
True,
),
(
pd.DataFrame(
{
"id": ["A", "B", "C", "D", "E", "F"],
"f_start": ["p", "i", "i", "p", "p", "i"],
"d_start": [
"2018-01-01",
"2019-04-01",
"2018-06-01",
"2019-12-01",
"2019-02-01",
"2018-04-01",
],
"f_end": ["p", "p", "i", "p", "p", "i"],
"d_end": [
"2018-02-01",
"2020-01-01",
"2019-03-01",
"2020-05-01",
"2019-05-01",
"2018-07-01",
],
}
),
pd.DataFrame(
[
{"id": "A", "status": "start", "f": "p", "d": "2018-01-01"},
{"id": "A", "status": "end", "f": "p", "d": "2018-02-01"},
{"id": "B", "status": "start", "f": "i", "d": "2019-04-01"},
{"id": "B", "status": "end", "f": "p", "d": "2020-01-01"},
{"id": "C", "status": "start", "f": "i", "d": "2018-06-01"},
{"id": "C", "status": "end", "f": "i", "d": "2019-03-01"},
{"id": "D", "status": "start", "f": "p", "d": "2019-12-01"},
{"id": "D", "status": "end", "f": "p", "d": "2020-05-01"},
{"id": "E", "status": "start", "f": "p", "d": "2019-02-01"},
{"id": "E", "status": "end", "f": "p", "d": "2019-05-01"},
{"id": "F", "status": "start", "f": "i", "d": "2018-04-01"},
{"id": "F", "status": "end", "f": "i", "d": "2018-07-01"},
]
),
"id",
(".value", "status"),
"(.*)_(.*)",
True,
),
]
@pytest.mark.parametrize(
"""
df_in,df_out,index,names_to,names_pattern,
sort_by_appearance
""",
paired_columns_pattern,
)
def test_extract_column_names_pattern(
df_in,
df_out,
index,
names_to,
names_pattern,
sort_by_appearance,
):
"""
Test output if `.value` is in the `names_to`
argument and `names_pattern` is used.
"""
result = df_in.pivot_longer(
index=index,
names_to=names_to,
names_pattern=names_pattern,
sort_by_appearance=sort_by_appearance,
)
assert_frame_equal(result, df_out)
paired_columns_sep = [
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.wide_to_long.html
(
pd.DataFrame(
{
"A(weekly)-2010": [0.548814, 0.7151890000000001, 0.602763],
"A(weekly)-2011": [0.544883, 0.423655, 0.645894],
"B(weekly)-2010": [
0.437587,
0.8917729999999999,
0.9636629999999999,
],
"B(weekly)-2011": [0.383442, 0.791725, 0.528895],
"X": [0, 1, 1],
}
),
pd.DataFrame(
{
"X": [0, 0, 1, 1, 1, 1],
"year": ["2010", "2011", "2010", "2011", "2010", "2011"],
"A(weekly)": [
0.548814,
0.544883,
0.7151890000000001,
0.423655,
0.602763,
0.645894,
],
"B(weekly)": [
0.437587,
0.383442,
0.8917729999999999,
0.791725,
0.9636629999999999,
0.528895,
],
}
),
"X",
(".value", "year"),
"-",
True,
),
(
pd.DataFrame(
{
"indexer": [0, 1],
"S_1": [1, 1],
"S_2": [0, 1],
"S_3": ["0", np.nan],
"S_4": ["1", np.nan],
}
),
pd.DataFrame(
{
"indexer": [0, 0, 0, 0, 1, 1, 1, 1],
"num": ["1", "2", "3", "4", "1", "2", "3", "4"],
"S": [1, 0, "0", "1", 1, 1, np.nan, np.nan],
}
),
"indexer",
(".value", "num"),
"_",
True,
),
(
pd.DataFrame(
{
"county": [1001, 1003, 1005],
"area": [275, 394, 312],
"pop_2006": [1037, 2399, 1638],
"pop_2007": [1052, 2424, 1647],
"pop_2008": [1102, 2438, 1660],
}
),
pd.DataFrame(
{
"county": [
1001,
1001,
1001,
1003,
1003,
1003,
1005,
1005,
1005,
],
"area": [275, 275, 275, 394, 394, 394, 312, 312, 312],
"year": [
"2006",
"2007",
"2008",
"2006",
"2007",
"2008",
"2006",
"2007",
"2008",
],
"pop": [1037, 1052, 1102, 2399, 2424, 2438, 1638, 1647, 1660],
}
),
["county", "area"],
(".value", "year"),
"_",
True,
),
(
pd.DataFrame(
{
"family": [1, 2, 3, 4, 5],
"dob_child1": [
"1998-11-26",
"1996-06-22",
"2002-07-11",
"2004-10-10",
"2000-12-05",
],
"dob_child2": [
"2000-01-29",
np.nan,
"2004-04-05",
"2009-08-27",
"2005-02-28",
],
"gender_child1": [1, 2, 2, 1, 2],
"gender_child2": [2.0, np.nan, 2.0, 1.0, 1.0],
}
),
pd.DataFrame(
[
{
"family": 1,
"child": "child1",
"dob": "1998-11-26",
"gender": 1.0,
},
{
"family": 1,
"child": "child2",
"dob": "2000-01-29",
"gender": 2.0,
},
{
"family": 2,
"child": "child1",
"dob": "1996-06-22",
"gender": 2.0,
},
{
"family": 2,
"child": "child2",
"dob": np.nan,
"gender": np.nan,
},
{
"family": 3,
"child": "child1",
"dob": "2002-07-11",
"gender": 2.0,
},
{
"family": 3,
"child": "child2",
"dob": "2004-04-05",
"gender": 2.0,
},
{
"family": 4,
"child": "child1",
"dob": "2004-10-10",
"gender": 1.0,
},
{
"family": 4,
"child": "child2",
"dob": "2009-08-27",
"gender": 1.0,
},
{
"family": 5,
"child": "child1",
"dob": "2000-12-05",
"gender": 2.0,
},
{
"family": 5,
"child": "child2",
"dob": "2005-02-28",
"gender": 1.0,
},
]
),
"family",
(".value", "child"),
"_",
True,
),
(
pd.DataFrame(
{
"dob_child2": [
"2000-01-29",
np.nan,
"2004-04-05",
"2009-08-27",
"2005-02-28",
],
"gender_child1": [1, 2, 2, 1, 2],
"gender_child2": [2.0, np.nan, 2.0, 1.0, 1.0],
"dob_child1": [
"1998-11-26",
"1996-06-22",
"2002-07-11",
"2004-10-10",
"2000-12-05",
],
}
),
pd.DataFrame(
{
"child": [
"child2",
"child1",
"child2",
"child1",
"child2",
"child1",
"child2",
"child1",
"child2",
"child1",
],
"dob": [
"2000-01-29",
"1998-11-26",
np.nan,
"1996-06-22",
"2004-04-05",
"2002-07-11",
"2009-08-27",
"2004-10-10",
"2005-02-28",
"2000-12-05",
],
"gender": [2.0, 1, np.nan, 2, 2.0, 2, 1.0, 1, 1.0, 2],
}
),
None,
(".value", "child"),
"_",
True,
),
(
pd.DataFrame(
[
{
"id": "A",
"Q1r1_pepsi": 1,
"Q1r1_cola": 0,
"Q1r2_pepsi": 1,
"Q1r2_cola": 0,
},
{
"id": "B",
"Q1r1_pepsi": 0,
"Q1r1_cola": 0,
"Q1r2_pepsi": 1,
"Q1r2_cola": 1,
},
{
"id": "C",
"Q1r1_pepsi": 1,
"Q1r1_cola": 1,
"Q1r2_pepsi": 1,
"Q1r2_cola": 1,
},
]
),
pd.DataFrame(
[
{"id": "A", "brand": "pepsi", "Q1r1": 1, "Q1r2": 1},
{"id": "A", "brand": "cola", "Q1r1": 0, "Q1r2": 0},
{"id": "B", "brand": "pepsi", "Q1r1": 0, "Q1r2": 1},
{"id": "B", "brand": "cola", "Q1r1": 0, "Q1r2": 1},
{"id": "C", "brand": "pepsi", "Q1r1": 1, "Q1r2": 1},
{"id": "C", "brand": "cola", "Q1r1": 1, "Q1r2": 1},
]
),
"id",
(".value", "brand"),
"_",
True,
),
(
pd.DataFrame(
{
"event": [1, 2],
"url_1": ["g1", "g3"],
"name_1": ["dc", "nyc"],
"url_2": ["g2", "g4"],
"name_2": ["sf", "la"],
}
),
pd.DataFrame(
{
"event": [1, 1, 2, 2],
"item": ["1", "2", "1", "2"],
"url": ["g1", "g2", "g3", "g4"],
"name": ["dc", "sf", "nyc", "la"],
}
),
"event",
(".value", "item"),
"_",
True,
),
]
@pytest.mark.parametrize(
"""
df_in,df_out,index,names_to,names_sep,
sort_by_appearance
""",
paired_columns_sep,
)
def test_extract_column_names_sep(
df_in,
df_out,
index,
names_to,
names_sep,
sort_by_appearance,
):
"""
Test output if `.value` is in the `names_to` argument
and `names_sep` is used.
"""
result = df_in.pivot_longer(
index=index,
names_to=names_to,
names_sep=names_sep,
sort_by_appearance=sort_by_appearance,
)
assert_frame_equal(result, df_out)
# https://community.rstudio.com/t/pivot-longer-on-multiple-column-sets-pairs/43958/10
paired_columns_no_index_pattern = [
(
pd.DataFrame(
{
"off_loc": ["A", "B", "C", "D", "E", "F"],
"pt_loc": ["G", "H", "I", "J", "K", "L"],
"pt_lat": [
100.07548220000001,
75.191326,
122.65134479999999,
124.13553329999999,
124.13553329999999,
124.01028909999998,
],
"off_lat": [
121.271083,
75.93845266,
135.043791,
134.51128400000002,
134.484374,
137.962195,
],
"pt_long": [
4.472089953,
-144.387785,
-40.45611048,
-46.07156181,
-46.07156181,
-46.01594293,
],
"off_long": [
-7.188632000000001,
-143.2288569,
21.242563,
40.937416999999996,
40.78472,
22.905889000000002,
],
}
),
pd.DataFrame(
{
"set": [
"off",
"pt",
"off",
"pt",
"off",
"pt",
"off",
"pt",
"off",
"pt",
"off",
"pt",
],
"loc": [
"A",
"G",
"B",
"H",
"C",
"I",
"D",
"J",
"E",
"K",
"F",
"L",
],
"lat": [
121.271083,
100.07548220000001,
75.93845266,
75.191326,
135.043791,
122.65134479999999,
134.51128400000002,
124.13553329999999,
134.484374,
124.13553329999999,
137.962195,
124.01028909999998,
],
"long": [
-7.188632000000001,
4.472089953,
-143.2288569,
-144.387785,
21.242563,
-40.45611048,
40.937416999999996,
-46.07156181,
40.78472,
-46.07156181,
22.905889000000002,
-46.01594293,
],
}
),
("set", ".value"),
"(.+)_(.+)",
True,
)
]
@pytest.mark.parametrize(
"""
df_in,df_out,names_to,names_pattern,
sort_by_appearance
""",
paired_columns_no_index_pattern,
)
def test_paired_columns_no_index_pattern(
df_in, df_out, names_to, names_pattern, sort_by_appearance
):
"""
Test function where `.value` is in the `names_to` argument,
names_pattern is used,
and no `index` is supplied.
"""
result = df_in.pivot_longer(
names_to=names_to,
names_pattern=names_pattern,
sort_by_appearance=sort_by_appearance,
)
assert_frame_equal(result, df_out)
names_single_value = [
(
pd.DataFrame(
{
"event": [1, 2],
"url_1": ["g1", "g3"],
"name_1": ["dc", "nyc"],
"url_2": ["g2", "g4"],
"name_2": ["sf", "la"],
}
),
pd.DataFrame(
{
"event": [1, 1, 2, 2],
"url": ["g1", "g2", "g3", "g4"],
"name": ["dc", "sf", "nyc", "la"],
}
),
"event",
"(.+)_.",
True,
True,
),
(
pd.DataFrame(
{
"id": [1, 2, 3],
"x1": [4, 5, 6],
"x2": [5, 6, 7],
"y1": [7, 8, 9],
"y2": [10, 11, 12],
}
),
pd.DataFrame(
{
"id": [1, 1, 2, 2, 3, 3],
"x": [4, 5, 5, 6, 6, 7],
"y": [7, 10, 8, 11, 9, 12],
},
),
"id",
"(.).",
True,
True,
),
(
pd.DataFrame(
{
"x1": [4, 5, 6],
"x2": [5, 6, 7],
"y1": [7, 8, 9],
"y2": [10, 11, 12],
}
),
pd.DataFrame({"x": [4, 5, 5, 6, 6, 7], "y": [7, 10, 8, 11, 9, 12]}),
None,
"(.).",
True,
True,
),
(
pd.DataFrame(
{
"x1": [4, 5, 6],
"x2": [5, 6, 7],
"y1": [7, 8, 9],
"y2": [10, 11, 12],
}
),
pd.DataFrame(
{"x": [4, 5, 5, 6, 6, 7], "y": [7, 10, 8, 11, 9, 12]},
index=[0, 0, 1, 1, 2, 2],
),
None,
"(.).",
False,
True,
),
]
@pytest.mark.parametrize(
"""
df_in,df_out,index, names_pattern,
ignore_index,sort_by_appearance
""",
names_single_value,
)
def test_single_value(
df_in,
df_out,
index,
names_pattern,
ignore_index,
sort_by_appearance,
):
"""Test function where names_to is a string and == `.value`."""
result = df_in.pivot_longer(
index=index,
names_to=".value",
names_pattern=names_pattern,
ignore_index=ignore_index,
sort_by_appearance=sort_by_appearance,
)
assert_frame_equal(result, df_out)
single_column_names_pattern = [
(
pd.DataFrame(
{
"sepal_length": [5.1, 7.0],
"sepal_width": [3.5, 3.2],
"petal_length": [1.4, 4.7],
"petal_width": [0.2, 1.4],
"species": ["setosa", "versicolor"],
}
),
pd.DataFrame(
[
{"species": "setosa", "part": "sepal", "value": 5.1},
{"species": "versicolor", "part": "sepal", "value": 7.0},
{"species": "setosa", "part": "sepal", "value": 3.5},
{"species": "versicolor", "part": "sepal", "value": 3.2},
{"species": "setosa", "part": "petal", "value": 1.4},
{"species": "versicolor", "part": "petal", "value": 4.7},
{"species": "setosa", "part": "petal", "value": 0.2},
{"species": "versicolor", "part": "petal", "value": 1.4},
]
),
"species",
None,
"part",
r"(.*)[_].*",
False,
),
(
pd.DataFrame(
{
"sepal_length": [5.1, 7.0],
"sepal_width": [3.5, 3.2],
"petal_length": [1.4, 4.7],
"petal_width": [0.2, 1.4],
"species": ["setosa", "versicolor"],
"jargon": ["blabla", "blacksheep"],
}
),
pd.DataFrame(
[
{"species": "setosa", "part": "sepal", "value": 5.1},
{"species": "setosa", "part": "sepal", "value": 3.5},
{"species": "setosa", "part": "petal", "value": 1.4},
{"species": "setosa", "part": "petal", "value": 0.2},
{"species": "versicolor", "part": "sepal", "value": 7.0},
{"species": "versicolor", "part": "sepal", "value": 3.2},
{"species": "versicolor", "part": "petal", "value": 4.7},
{"species": "versicolor", "part": "petal", "value": 1.4},
]
),
"species",
patterns("_"),
"part",
r"(.*)[_].*",
True,
),
]
@pytest.mark.parametrize(
"""
df_in,df_out,index,column_names,names_to,
names_pattern,sort_by_appearance
""",
single_column_names_pattern,
)
def test_single_column_names_pattern(
df_in,
df_out,
index,
column_names,
names_to,
names_pattern,
sort_by_appearance,
):
"""
Test output if `names_to` is a string and
`names_pattern` returns a single column.
Also tests when both `index` and `column_names`
are supplied, and only a subset of the dataframe
is transformed.
"""
result = df_in.pivot_longer(
index=index,
column_names=column_names,
names_to=names_to,
names_pattern=names_pattern,
sort_by_appearance=sort_by_appearance,
)
assert_frame_equal(result, df_out)
# not relevant anymore;
# leaving it though
# the more tests, the merrier
def test_group_present():
"""Test output if 'group' is in `names_to`."""
df_in = pd.DataFrame(
{
"id": [1, 2, 3],
"x1": [4, 5, 6],
"x2": [5, 6, 7],
"y1": [7, 8, 9],
"y2": [10, 11, 12],
}
)
df_out = pd.DataFrame(
{
"id": [1, 2, 3, 1, 2, 3],
"group": ["1", "1", "1", "2", "2", "2"],
"x": [4, 5, 6, 5, 6, 7],
"y": [7, 8, 9, 10, 11, 12],
}
)
result = df_in.pivot_longer(
index="id",
names_to=[".value", "group"],
names_pattern="(.)(.)",
)
assert_frame_equal(result, df_out)
# copied from :
# https://github.com/pandas-dev/pandas/blob/master/pandas/tests/reshape/test_melt.py
def test_float_suffix_irregular():
"""
Test output for floating suffixes for stubnames;
the suffixes are unique and differ for the stubnames.
"""
df = pd.DataFrame(
{
"treatment_1.1": [1.0, 2.0],
"treatment_2.1": [3.0, 4.0],
"result_1.2": [5.0, 6.0],
"result_1": [0, 9],
"A": ["X1", "X2"],
}
)
expected = pd.DataFrame(
{
"A": ["X1", "X1", "X1", "X1", "X2", "X2", "X2", "X2"],
"colname": ["1.1", "2.1", "1.2", "1", "1.1", "2.1", "1.2", "1"],
"treatment": [1.0, 3.0, np.nan, np.nan, 2.0, 4.0, np.nan, np.nan],
"result": [np.nan, np.nan, 5.0, 0.0, np.nan, np.nan, 6.0, 9.0],
}
)
result = df.pivot_longer(
index="A",
names_to=(".value", "colname"),
names_sep="_",
sort_by_appearance=True,
)
assert_frame_equal(result, expected)
multiple_column_names = [
(
pd.DataFrame(
{
"Sony | TV | Model | value": {
0: "A222",
1: "A234",
2: "A4345",
},
"Sony | TV | Quantity | value": {0: 5, 1: 5, 2: 4},
"Sony | TV | Max-quant | value": {0: 10, 1: 9, 2: 9},
"Panasonic | TV | Model | value": {
0: "T232",
1: "S3424",
2: "X3421",
},
"Panasonic | TV | Quantity | value": {0: 1, 1: 5, 2: 1},
"Panasonic | TV | Max-quant | value": {0: 10, 1: 12, 2: 11},
"Sanyo | Radio | Model | value": {
0: "S111",
1: "S1s1",
2: "S1s2",
},
"Sanyo | Radio | Quantity | value": {0: 4, 1: 2, 2: 4},
"Sanyo | Radio | Max-quant | value": {0: 9, 1: 9, 2: 10},
}
),
pd.DataFrame(
[
{
"Manufacturer": "Sony ",
"Device": " TV ",
" Model ": "A222",
" Quantity ": 5,
" Max-quant ": 10,
},
{
"Manufacturer": "Sony ",
"Device": " TV ",
" Model ": "A234",
" Quantity ": 5,
" Max-quant ": 9,
},
{
"Manufacturer": "Sony ",
"Device": " TV ",
" Model ": "A4345",
" Quantity ": 4,
" Max-quant ": 9,
},
{
"Manufacturer": "Panasonic ",
"Device": " TV ",
" Model ": "T232",
" Quantity ": 1,
" Max-quant ": 10,
},
{
"Manufacturer": "Panasonic ",
"Device": " TV ",
" Model ": "S3424",
" Quantity ": 5,
" Max-quant ": 12,
},
{
"Manufacturer": "Panasonic ",
"Device": " TV ",
" Model ": "X3421",
" Quantity ": 1,
" Max-quant ": 11,
},
{
"Manufacturer": "Sanyo ",
"Device": " Radio ",
" Model ": "S111",
" Quantity ": 4,
" Max-quant ": 9,
},
{
"Manufacturer": "Sanyo ",
"Device": " Radio ",
" Model ": "S1s1",
" Quantity ": 2,
" Max-quant ": 9,
},
{
"Manufacturer": "Sanyo ",
"Device": " Radio ",
" Model ": "S1s2",
" Quantity ": 4,
" Max-quant ": 10,
},
]
),
),
(
pd.DataFrame(
{
"Sony | TV | Model | value": {
0: "A222",
1: "A234",
2: "A4345",
},
"Sony | TV | Quantity | value": {0: 5, 1: 5, 2: 4},
"Sony | TV | Max-quant | value": {0: 10, 1: 9, 2: 9},
"Panasonic | TV | Model | value": {
0: "T232",
1: "S3424",
2: "X3421",
},
"Panasonic | TV | Quantity | value": {0: 1, 1: 5, 2: 1},
"Panasonic | TV | Max-quant | value": {0: 10, 1: 12, 2: 11},
"Sanyo | Radio | Model | value": {
0: "S111",
1: "S1s1",
2: "S1s2",
},
"Sanyo | Radio | Quantity | value": {0: 4, 1: 2, 2: 4},
"Sanyo | radio | Max-quant | value": {0: 9, 1: 9, 2: 10},
}
),
pd.DataFrame(
[
{
"Manufacturer": "Sony ",
"Device": " TV ",
" Model ": "A222",
" Quantity ": 5.0,
" Max-quant ": 10.0,
},
{
"Manufacturer": "Sony ",
"Device": " TV ",
" Model ": "A234",
" Quantity ": 5.0,
" Max-quant ": 9.0,
},
{
"Manufacturer": "Sony ",
"Device": " TV ",
" Model ": "A4345",
" Quantity ": 4.0,
" Max-quant ": 9.0,
},
{
"Manufacturer": "Panasonic ",
"Device": " TV ",
" Model ": "T232",
" Quantity ": 1.0,
" Max-quant ": 10.0,
},
{
"Manufacturer": "Panasonic ",
"Device": " TV ",
" Model ": "S3424",
" Quantity ": 5.0,
" Max-quant ": 12.0,
},
{
"Manufacturer": "Panasonic ",
"Device": " TV ",
" Model ": "X3421",
" Quantity ": 1.0,
" Max-quant ": 11.0,
},
{
"Manufacturer": "Sanyo ",
"Device": " Radio ",
" Model ": "S111",
" Quantity ": 4.0,
" Max-quant ": np.nan,
},
{
"Manufacturer": "Sanyo ",
"Device": " Radio ",
" Model ": "S1s1",
" Quantity ": 2.0,
" Max-quant ": np.nan,
},
{
"Manufacturer": "Sanyo ",
"Device": " Radio ",
" Model ": "S1s2",
" Quantity ": 4.0,
" Max-quant ": np.nan,
},
{
"Manufacturer": "Sanyo ",
"Device": " radio ",
" Model ": np.nan,
" Quantity ": np.nan,
" Max-quant ": 9.0,
},
{
"Manufacturer": "Sanyo ",
"Device": " radio ",
" Model ": np.nan,
" Quantity ": np.nan,
" Max-quant ": 9.0,
},
{
"Manufacturer": "Sanyo ",
"Device": " radio ",
" Model ": np.nan,
" Quantity ": np.nan,
" Max-quant ": 10.0,
},
]
),
),
]
@pytest.mark.parametrize("df_in, df_out", multiple_column_names)
def test_multiple_column_names(df_in, df_out):
"""
Test output for scenario where the pairing
in the column name is more than 2.
"""
result = df_in.pivot_longer(
names_to=("Manufacturer", "Device", ".value"),
names_pattern=r"(.+)\|(.+)\|(.+)\|.*",
)
assert_frame_equal(result, df_out)
def test_names_sep_non_unique_columns():
"""Test output for non_unique columns with names_sep"""
df = pd.DataFrame(
{
"event": [1, 2],
"url_1": ["g1", "g3"],
"name_1": ["dc", "nyc"],
"url_2": ["g2", "g4"],
"name_2": ["sf", "la"],
}
)
df.columns = ["event", "url_1", "name_1", "url_1", "name_2"]
expected = pd.DataFrame(
[
{"event": 1, "num": "1", "url": "g1", "name": "dc"},
{"event": 2, "num": "1", "url": "g3", "name": "nyc"},
{"event": 1, "num": "1", "url": "g2", "name": np.nan},
{"event": 2, "num": "1", "url": "g4", "name": np.nan},
{"event": 1, "num": "2", "url": np.nan, "name": "sf"},
{"event": 2, "num": "2", "url": np.nan, "name": "la"},
]
)
result = df.pivot_longer(
"event", names_to=(".value", "num"), names_sep="_", ignore_index=True
)
assert_frame_equal(result, expected)
def test_names_pattern_sequence_single_unique_column():
"""
Test output if names_pattern is a sequence,
and there is just one unique column.
"""
df = pd.DataFrame(
{
"id": [1, 2, 3],
"x1": [4, 5, 6],
"x2": [5, 6, 7],
}
)
result = df.pivot_longer("id", names_to=["x"], names_pattern=("x",))
expected = pd.DataFrame(
[
{"id": 1, "x": 4},
{"id": 2, "x": 5},
{"id": 3, "x": 6},
{"id": 1, "x": 5},
{"id": 2, "x": 6},
{"id": 3, "x": 7},
]
)
assert_frame_equal(result, expected)
| 43,233 |
2,542 | <reponame>gridgentoo/ServiceFabricAzure<filename>src/prod/src/Reliability/Failover/common/FailoverUnitMessageBody.cpp
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "Common.Stdafx.h"
using namespace Reliability;
void FailoverUnitMessageBody::WriteTo(Common::TextWriter& w, Common::FormatOptions const& options) const
{
fudesc_.WriteTo(w, options);
}
void FailoverUnitMessageBody::WriteToEtw(uint16 contextSequenceId) const
{
ReliabilityEventSource::Events->FailoverUnitMessageBody(contextSequenceId, fudesc_);
}
| 208 |
360 | package uk.co.flax.luwak.assertions;
import org.assertj.core.api.AbstractAssert;
import org.assertj.core.api.Assertions;
import uk.co.flax.luwak.matchers.HighlightsMatch;
/**
* Copyright (c) 2013 Lemur Consulting Ltd.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class HighlightingMatchHitsAssert extends AbstractAssert<HighlightingMatchHitsAssert, HighlightsMatch> {
final HighlightingMatchAssert parent;
protected HighlightingMatchHitsAssert(HighlightsMatch actual, HighlightingMatchAssert parent) {
super(actual, HighlightingMatchHitsAssert.class);
this.parent = parent;
}
public HighlightingMatchHitsAssert withHitCount(int count) {
Assertions.assertThat(actual.getHitCount()).isEqualTo(count);
return this;
}
public FieldMatchAssert inField(String fieldname) {
Assertions.assertThat(actual.getHits(fieldname).size()).isGreaterThan(0);
return new FieldMatchAssert(this, actual.getHits(fieldname));
}
public HighlightingMatchHitsAssert withErrorMessage(String message) {
Assertions.assertThat(actual.error).isNotNull();
Assertions.assertThat(actual.error.getMessage()).contains(message);
return this;
}
public HighlightingMatchHitsAssert matchesQuery(String queryId, String docId) {
return parent.matchesQuery(queryId, docId);
}
}
| 626 |
1,473 | <gh_stars>1000+
/*
* Autopsy Forensic Browser
*
* Copyright 2011-2017 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.keywordsearch;
import com.google.common.base.CharMatcher;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Range;
import com.google.common.collect.RangeMap;
import com.google.common.collect.TreeRangeMap;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.validator.routines.checkdigit.LuhnCheckDigit;
/**
* Utility class to validate Credit Card Numbers. Validation entails checking
* that numbers are compatible but not necessarily 'real'. Validation checks the
* following properties:
* <ul>
* <li> A number can have obly one of dashes, spaces, or none as a seperator
* character. </li>
* <li> If a number has seperator character, the digits must be grouped into a
* valid pattern for the number length</li>
* <li> A number must pass the luhn check.</li>
* </ul>
*
*/
final class CreditCardValidator {
private CreditCardValidator() {
}
private static final LuhnCheckDigit CREDIT_CARD_NUM_LUHN_CHECK = new LuhnCheckDigit();
/**
* map from ccn IIN to allowed lengths
*/
static private final RangeMap<Integer, Set<Integer>> allowedLengths = TreeRangeMap.create();
private static final ImmutableSet<Integer> Set12to19 = ImmutableSet.of(12, 13, 14, 15, 16, 17, 18, 19);
private static final ImmutableSet<Integer> Set14to19 = ImmutableSet.of(14, 15, 16, 17, 18, 19);
private static final ImmutableSet<Integer> Set16to19 = ImmutableSet.of(16, 17, 18, 29);
static {
//amex
allowedLengths.put(Range.closedOpen(34000000, 35000000), ImmutableSet.of(15));
allowedLengths.put(Range.closedOpen(37000000, 38000000), ImmutableSet.of(15));
//visa
allowedLengths.put(Range.closedOpen(40000000, 50000000), Set12to19);
//visa electron
allowedLengths.put(Range.closedOpen(40260000, 40270000), ImmutableSet.of(16));
allowedLengths.put(Range.closedOpen(41750000, 41750100), ImmutableSet.of(16));
allowedLengths.put(Range.closedOpen(44050000, 44060000), ImmutableSet.of(16));
allowedLengths.put(Range.closedOpen(45080000, 45090000), ImmutableSet.of(16));
allowedLengths.put(Range.closedOpen(48440000, 48450000), ImmutableSet.of(16));
allowedLengths.put(Range.closedOpen(49130000, 49140000), ImmutableSet.of(16));
allowedLengths.put(Range.closedOpen(49170000, 49180000), ImmutableSet.of(16));
//China UnionPay
allowedLengths.put(Range.closedOpen(62000000, 63000000), Set16to19);
//MasterCard
allowedLengths.put(Range.closedOpen(51000000, 56000000), ImmutableSet.of(16));
allowedLengths.put(Range.closedOpen(22210000, 27210000), ImmutableSet.of(16));
//Verve, these over lap with discover
allowedLengths.put(Range.closedOpen(50609900, 50619900), ImmutableSet.of(16, 19));
allowedLengths.put(Range.closedOpen(65000200, 65002700), ImmutableSet.of(16, 19));
//Maestro
allowedLengths.put(Range.closedOpen(50000000, 50100000), Set12to19);
allowedLengths.put(Range.closedOpen(56000000, 59000000), Set12to19);
allowedLengths.put(Range.closedOpen(60000000, 70000000), Set12to19);
allowedLengths.put(Range.closedOpen(63900000, 63910000), Set12to19);
allowedLengths.put(Range.closedOpen(67000000, 68000000), Set12to19);
//Diners Club International (processed by discover
allowedLengths.put(Range.closedOpen(30000000, 30600000), Set16to19);
allowedLengths.put(Range.closedOpen(30950000, 30960000), Set16to19);
allowedLengths.put(Range.closedOpen(36000000, 37000000), Set14to19);
allowedLengths.put(Range.closedOpen(38000000, 40000000), Set16to19);
//Diners Club USA & Canada (MasterCard co brand)
allowedLengths.put(Range.closedOpen(54000000, 56000000), Set14to19);
//Discover
allowedLengths.put(Range.closedOpen(60110000, 60120000), Set16to19);
allowedLengths.put(Range.closedOpen(62212600, 62292600), Set16to19);
allowedLengths.put(Range.closedOpen(64400000, 66000000), Set16to19);
//JCB //process by discover
allowedLengths.put(Range.closedOpen(35280000, 35900000), Set16to19);
//Dankort
allowedLengths.put(Range.closedOpen(50190000, 50200000), Set16to19);
//InterPayment
allowedLengths.put(Range.closedOpen(63600000, 63700000), Set16to19);
}
/**
* Does the given string represent a valid credit card number? It must have
* no separators, or only '-', or only ' '. Checks digit grouping for
* 15,16,and 19 digit numbers. All other length numbers must be contiguous
* or begin with a group of 4 digits.
*
* @param rawCCN
*
* @return True if rawCCN represents a valid credit card number.
*/
static public boolean isValidCCN(String rawCCN) {
//check for a valid separator
boolean hasSpace = StringUtils.contains(rawCCN, ' ');
boolean hasDash = StringUtils.contains(rawCCN, '-');
if (hasSpace && hasDash) {
return false; //can only have dashes or spaces, not both.
}
Character separator = null;
if (hasSpace) {
separator = ' ';
} else if (hasDash) {
separator = '-';
}
final String cannonicalCCN;
String[] splitCCN;
if (separator != null) {
//there is a seperator, strip if for canoncial form of CCN
cannonicalCCN = CharMatcher.anyOf(separator.toString()).removeFrom(rawCCN);
splitCCN = rawCCN.split(separator.toString());
} else {
//else use 'defualt'values
cannonicalCCN = rawCCN;
splitCCN = new String[]{cannonicalCCN};
}
if (false == lengthMatchesBin(cannonicalCCN)) {
return false;
}
// validate digit grouping for 15, 16, and 19 digit cards
switch (cannonicalCCN.length()) {
case 15:
if (false == isValid15DigitGrouping(splitCCN)) {
return false;
}
break;
case 16:
if (false == isValid16DigitGrouping(splitCCN)) {
return false;
}
break;
case 19:
if (false == isValid19DigitGrouping(splitCCN)) {
return false;
}
break;
default:
if (false == isValidOtherDigitGrouping(splitCCN)) {
return false;
}
}
return CREDIT_CARD_NUM_LUHN_CHECK.isValid(cannonicalCCN);
}
static private boolean lengthMatchesBin(String cannonicalCCN) {
String BIN = cannonicalCCN.substring(0, 8);
final Set<Integer> lengthsForBIN = allowedLengths.get(Integer.valueOf(BIN));
return null == lengthsForBIN || lengthsForBIN.contains(cannonicalCCN.length());
}
static private boolean isValidOtherDigitGrouping(String[] splitCCN) {
if (splitCCN.length == 1) {
return true;
} else {
return splitCCN[0].length() == 4;
}
}
static private boolean isValid19DigitGrouping(String[] splitCCN) {
switch (splitCCN.length) {
case 1:
return true;
case 2:
return splitCCN[0].length() == 6
&& splitCCN[1].length() == 13;
case 5:
return splitCCN[0].length() == 4
&& splitCCN[1].length() == 4
&& splitCCN[2].length() == 4
&& splitCCN[3].length() == 4
&& splitCCN[4].length() == 3;
default:
return false;
}
}
static private boolean isValid16DigitGrouping(String[] splitCCN) {
switch (splitCCN.length) {
case 1:
return true;
case 4:
return splitCCN[0].length() == 4
&& splitCCN[1].length() == 4
&& splitCCN[2].length() == 4
&& splitCCN[3].length() == 4;
default:
return false;
}
}
static private boolean isValid15DigitGrouping(String[] splitCCN) {
switch (splitCCN.length) {
case 1:
return true;
case 3:
return (splitCCN[0].length() == 4 && splitCCN[1].length() == 6 && splitCCN[2].length() == 5);
// UATP || ((splitCCN[0].length() == 4 && splitCCN[1].length() == 5 && splitCCN[2].length() == 6));
default:
return false;
}
}
}
| 4,118 |
5,300 | <filename>frameworks/Java/vertx-web/src/main/java/io/vertx/benchmark/model/World.java
package io.vertx.benchmark.model;
import io.vertx.core.json.JsonObject;
/**
* The model for the "world" database table.
*/
public final class World implements Comparable<World> {
private final int id;
private final int randomNumber;
/**
* Constructs a new world object with the given parameters.
*
* @param id the ID of the world
* @param randomNumber the random number of the world
*/
public World(int id, int randomNumber) {
this.id = id;
this.randomNumber = randomNumber;
}
public World(JsonObject doc) {
this.id = doc.getInteger("id");
this.randomNumber = doc.getInteger("randomNumber");
}
public int getId() {
return id;
}
public int getRandomNumber() {
return randomNumber;
}
@Override
public int compareTo(World o) {
return Integer.compare(getId(), o.getId());
}
} | 316 |
1,001 | /*
Copyright 2011 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef STRINGHELPER_H
#define STRINGHELPER_H
#include <string>
namespace StringHelper
{
bool IsNullOrEmpty(const char* psz);
void ToLower(std::string& str);
void Trim(std::string& str);
int ValidateNumberString(const char* psz, int nMinValue, int nMaxValue, int* pnResult);
}
#endif /* STRINGHELPER_H */
| 309 |
743 | <gh_stars>100-1000
import hashlib
import os
import re
import secrets
import subprocess
import sys
import tempfile
from binascii import hexlify, unhexlify
from enum import Enum
from random import random, randint
from pefile import *
from pathlib import Path
import struct
from enums.Architectures import Arch
class ImageFileMachine(Enum):
IMAGE_FILE_MACHINE_I386 = 332
IMAGE_FILE_MACHINE_IA64 = 512
IMAGE_FILE_MACHINE_AMD64 = 34404
@staticmethod
def from_bytes(_bytes: bytes):
if len(_bytes) > 2:
_bytes = _bytes[:2]
machine = struct.unpack("<H", _bytes)[0]
if machine == ImageFileMachine.IMAGE_FILE_MACHINE_I386.value:
return Arch.x86
elif machine == ImageFileMachine.IMAGE_FILE_MACHINE_IA64.value:
return Arch.x64
elif machine == ImageFileMachine.IMAGE_FILE_MACHINE_AMD64.value:
return Arch.x64
else:
_hex_value = hexlify(struct.pack('H', machine)).decode()
raise ValueError(
f"Unknown architecture.\n"
f" Raw: {struct.pack('H', machine)}\n"
f" Hex: {_hex_value[:2]} {_hex_value[2:]}"
)
def get_project_root() -> Path:
return Path(__file__).parent.parent
def bin2sh(filename):
if not os.path.isfile(filename):
raise FileNotFoundError("[-] Missing Bin2Sh target file")
utility = os.path.join(get_project_root(), "libs", "public", "adon.exe")
if not os.path.isfile(utility):
raise FileNotFoundError("[-] Missing Bin2Sh utility file")
return subprocess.check_output(f"{utility} \"{filename}\"").decode().strip()
def bin2hex4pe2sh(filename):
if not os.path.isfile(filename):
raise FileNotFoundError("[-] Missing bin2hex (pe2sh) target file")
utility = os.path.join(get_project_root(), "libs", "public", "chunlie.exe")
if not os.path.isfile(utility):
raise FileNotFoundError("[-] Missing bin2hex (pe2sh) utility file")
return unhexlify(subprocess.check_output(f"{utility} \"{filename}\"").decode().strip())
def py_bin2sh(filename):
if not os.path.isfile(filename):
raise FileNotFoundError("[-] Missing PyBin2Sh target file")
content = hexlify(open(filename, "rb").read()).decode()
shellcode = "{" + ",".join([f"0x{content[i:i + 2]}" for i in range(0, len(content), 2)]) + "}"
return shellcode
def sgn(shellcode, arch="x64"):
architecture = "64" if arch in ["x64", None] else "32"
filename = None
temp_filename = tempfile.NamedTemporaryFile(suffix=".raw",
delete=False,
dir=os.path.join(get_project_root(), "temp")).name
with open(temp_filename, "wb") as temp:
temp.write(shellcode)
if not os.path.isfile(temp_filename):
raise FileNotFoundError("[-] Missing Shikata-Ga-Nai target file")
utility = os.path.join(get_project_root(), "libs", "public", "sgn.exe")
if not os.path.isfile(utility):
raise FileNotFoundError("[-] Missing Shikata-Ga-Nai utility file")
try:
cmd = f"{utility} -safe -a {architecture} \"{temp_filename}\""
# print(cmd)
output = subprocess.check_output(cmd).decode().strip()
for line in output.split("\n"):
if line.find("Outfile:") > - 1:
filename = line.split(": ")[1]
print(f" [*] Encoded filename: {filename}")
shellcode = open(filename, "rb").read()
except subprocess.CalledProcessError:
print("[-] Failed to encode payload with Shikata-Ga-Nai")
if os.path.isfile(temp_filename):
os.unlink(temp_filename)
if filename and os.path.isfile(filename):
os.unlink(filename)
return shellcode
def isDotNet(filename):
try:
pe = PE(filename)
clr_metadata = pe.OPTIONAL_HEADER.DATA_DIRECTORY[14]
return not (clr_metadata.VirtualAddress == 0 and clr_metadata.Size == 0)
except PEFormatError:
return False
def shellcode_signature(shellcode):
if isinstance(shellcode, str):
shellcode = shellcode.encode()
return hashlib.sha1(shellcode).hexdigest()
def file_signature(filename):
_, ext = os.path.splitext(filename)
with open(filename, "rb") as file:
signature = hashlib.sha1(file.read()).hexdigest()
return signature
def choose(choices: list):
for n, ver in enumerate(choices):
print(f" {n}: {ver}")
choice = -1
while not (0 <= choice < len(choices)):
try:
choice = int(input("> "))
return choices[choice]
except ValueError:
continue
except TypeError:
continue
def mssql_hex(file):
with open(file=file, mode="rb") as dll:
content = dll.read()
return f"0x{hexlify(content).decode()}"
def static_random_ascii_string(min_size=None, max_size=None):
if not min_size:
min_size = 3
if not max_size:
max_size = 10
return ''.join(secrets.choice(string.ascii_letters) for _ in range(randint(min_size, max_size)))
def detect_arch(file):
f = open(file, "rb").read()
if f[:2] != b"MZ":
print("[-] Unknown file format")
sys.exit(1)
else:
header_offset = struct.unpack("<L", f[60:64])[0]
raw = f[header_offset + 4:header_offset + 6]
return ImageFileMachine.from_bytes(raw)
if __name__ == '__main__':
try:
payload = mssql_hex(sys.argv[1])
if len(payload) <= 8000:
print("[+] Success. payload length is under MAX_LENGTH")
else:
print(f"[-] Warning: payload length is above MAX_LENGTH: {len(payload)}")
print(payload)
except:
print("[-] Error: MSSQL Hexlify needs a file to encode")
| 2,578 |
7,158 | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Itseez Inc or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/datasets/tr_chars.hpp"
#include "opencv2/datasets/util.hpp"
namespace cv
{
namespace datasets
{
using namespace std;
class TR_charsImp CV_FINAL : public TR_chars
{
public:
TR_charsImp() {}
//TR_charsImp(const string &path, int number = 0);
virtual ~TR_charsImp() CV_OVERRIDE {}
virtual void load(const string &path) CV_OVERRIDE;
private:
void loadDatasetSplit(const string &path, int number);
void loadDataset(const string &path);
void parseLine(const string &line, vector<int> &currSet, int number);
inline void convert(vector<int> &from, vector< Ptr<Object> > &to, vector<int> &allLabels, vector<string> &allNames);
inline void parseSet(const string &line, const string &pattern, bool &flag, vector<int> &set, int number);
};
void TR_charsImp::parseLine(const string &line, vector<int> &currSet, int number)
{
vector<string> elems;
split(line, elems, ' ');
if (number >= (int)elems.size())
{
return;
}
unsigned int ind = atoi(elems[number].c_str());
if (ind > 0)
{
currSet.push_back(ind-1);
}
}
inline void TR_charsImp::convert(vector<int> &from, vector< Ptr<Object> > &to, vector<int> &allLabels, vector<string> &allNames)
{
for (vector<int>::iterator it=from.begin(); it!=from.end(); ++it)
{
if (*it>=(int)allNames.size() || *it>=(int)allLabels.size())
{
printf("incorrect index: %u\n", *it);
continue;
}
Ptr<TR_charsObj> curr(new TR_charsObj);
curr->imgName = allNames[*it];
curr->label = allLabels[*it];
to.push_back(curr);
}
}
inline void TR_charsImp::parseSet(const string &line, const string &pattern, bool &flag, vector<int> &set, int number)
{
size_t pos = line.find(pattern);
if (string::npos != pos)
{
flag = true;
string s(line.substr(pos + pattern.length()));
parseLine(s, set, number);
} else
if (flag)
{
parseLine(line, set, number);
}
}
/*TR_charsImp::TR_charsImp(const string &path, int number)
{
loadDataset(path, number);
}*/
void TR_charsImp::load(const string &path)
{
loadDataset(path);
}
void TR_charsImp::loadDataset(const string &path)
{
int number = 0;
do
{
loadDatasetSplit(path, number);
number++;
} while (train.back().size()>0);
train.pop_back(); // remove last empty split
test.pop_back(); // remove last empty split
validation.pop_back(); // remove last empty split
}
void TR_charsImp::loadDatasetSplit(const string &path, int number)
{
train.push_back(vector< Ptr<Object> >());
test.push_back(vector< Ptr<Object> >());
validation.push_back(vector< Ptr<Object> >());
vector<int> allLabels, trainSet, testSet, validationSet;
vector<string> allNames;
ifstream infile((path + "list_English_Img.m").c_str());
string line;
bool labels = false, names = false, isTrain = false, isTest = false, isValidation = false;
while (getline(infile, line))
{
size_t pos = line.find("];");
if (string::npos != pos)
{
labels = false;
names = false;
isTrain = false;
isTest = false;
isValidation = false;
}
string slabels("list.ALLlabels = [");
pos = line.find(slabels);
if (string::npos != pos)
{
labels = true;
string s(line.substr(pos+slabels.length()));
allLabels.push_back(atoi(s.c_str()));
} else
if (labels)
{
allLabels.push_back(atoi(line.c_str()));
}
string snames("list.ALLnames = [");
pos = line.find(snames);
if (string::npos != pos)
{
names = true;
size_t start = pos+snames.length();
string s(line.substr(start+1, line.length()-start-2));
allNames.push_back(s);
} else
if (names)
{
string s(line.substr(1, line.length()-2));
allNames.push_back(s);
}
string trainStr("list.TRNind = [");
parseSet(line, trainStr, isTrain, trainSet, number);
string testStr("list.TSTind = [");
parseSet(line, testStr, isTest, testSet, number);
string validationStr("list.VALind = [");
parseSet(line, validationStr, isValidation, validationSet, number);
/*"list.classlabels = ["
"list.classnames = ["
"list.NUMclasses = 62;"
"list.TXNind = ["*/
}
convert(trainSet, train.back(), allLabels, allNames);
convert(testSet, test.back(), allLabels, allNames);
convert(validationSet, validation.back(), allLabels, allNames);
}
Ptr<TR_chars> TR_chars::create()
{
return Ptr<TR_charsImp>(new TR_charsImp);
}
}
}
| 2,683 |
341 | import numpy as np
from lib.tools_pinyin import *
def get_maxLengthListinList(ls):
length = 0
for l in ls:
if len(l)>length: length = len(l)
return length
def sparse_tuple_from(sequences, dtype=np.int32):
"""
Create a sparse representention of x.
Args:
sequences: a list of lists of type dtype where each element is a sequence
Returns:
A tuple with (indices, values, shape)
"""
indices = []
values = []
for n, seq in enumerate(sequences):
indices.extend(zip([n] * len(seq), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)
return indices, values, shape
def sparseTuples2dense(sparseTensor):
pred_dense = -np.ones(sparseTensor[2])
for i in range(len(sparseTensor[0])):
pred_dense[sparseTensor[0][i][0],sparseTensor[0][i][1]] = sparseTensor[1][i]
return pred_dense
def report_accuracy(decoded_list, test_targets, pyParser):
original_list = sparseTuples2dense(test_targets)
detected_list = sparseTuples2dense(decoded_list)
print("-------------------")
for i in range(len(original_list)):
original_line = []
detected_line = []
for stuff in original_list[i]:
if stuff!=-1:
original_line.append(stuff)
for stuff in detected_list[i]:
if stuff!=-1:
detected_line.append(stuff)
print(i)
print(original_line)
print(detected_line)
print(pyParser.decodeIndices(original_line, useUnderline = True))
print(pyParser.decodeIndices(detected_line, useUnderline = True))
print("-------------------")
| 788 |
760 | /*
* Copyright 2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.codecentric.spring.boot.chaos.monkey.assaults;
import static org.junit.jupiter.api.Assertions.assertThrows;
import de.codecentric.spring.boot.chaos.monkey.component.MetricEventPublisher;
import de.codecentric.spring.boot.chaos.monkey.configuration.AssaultException;
import de.codecentric.spring.boot.chaos.monkey.configuration.AssaultProperties;
import de.codecentric.spring.boot.chaos.monkey.configuration.ChaosMonkeySettings;
import java.io.IOException;
import java.util.Collections;
import org.junit.jupiter.api.Test;
import org.mockito.Mock;
/** @author <NAME> */
class ExceptionAssaultTest {
@Mock MetricEventPublisher metricsMock;
@Test
void throwsRuntimeExceptionWithDefaultAssaultSettings() {
ExceptionAssault exceptionAssault = new ExceptionAssault(getChaosMonkeySettings(), metricsMock);
assertThrows(RuntimeException.class, exceptionAssault::attack);
}
@Test
void throwsRuntimeExceptionWithNullTypeAndNullArgument() {
ChaosMonkeySettings settings = getChaosMonkeySettings();
settings.getAssaultProperties().setException(null);
ExceptionAssault exceptionAssault = new ExceptionAssault(settings, metricsMock);
assertThrows(RuntimeException.class, exceptionAssault::attack);
}
@Test
void throwsDefaultRuntimeExceptionWithNullTypeAndNonNullArgument() {
String exceptionArgumentClassName = "java.lang.String";
String exceptionArgumentValue = "Chaos Monkey - RuntimeException";
ChaosMonkeySettings settings = getChaosMonkeySettings();
settings
.getAssaultProperties()
.setException(
getAssaultException(null, exceptionArgumentClassName, exceptionArgumentValue));
ExceptionAssault exceptionAssault = new ExceptionAssault(settings, metricsMock);
assertThrows(RuntimeException.class, exceptionAssault::attack, exceptionArgumentValue);
}
@Test
void throwsRuntimeExceptionWithNonNullTypeAndNullArgument() {
ChaosMonkeySettings settings = getChaosMonkeySettings();
settings
.getAssaultProperties()
.setException(getAssaultException("java.lang.ArithmeticException", null, null));
ExceptionAssault exceptionAssault = new ExceptionAssault(settings, metricsMock);
assertThrows(ArithmeticException.class, exceptionAssault::attack);
}
@Test
void throwsRuntimeExceptionWithNonnullTypeAndNonNullArgument() {
String exceptionArgumentClassName = "java.lang.String";
String exceptionArgumentValue = "ArithmeticException Test";
ChaosMonkeySettings settings = getChaosMonkeySettings();
settings
.getAssaultProperties()
.setException(
getAssaultException(
"java.lang.ArithmeticException",
exceptionArgumentClassName,
exceptionArgumentValue));
ExceptionAssault exceptionAssault = new ExceptionAssault(settings, metricsMock);
assertThrows(ArithmeticException.class, exceptionAssault::attack, exceptionArgumentValue);
}
@Test
void throwsGeneralException() {
ChaosMonkeySettings settings = getChaosMonkeySettings();
settings
.getAssaultProperties()
.setException(getAssaultException("java.io.IOException", null, null));
ExceptionAssault exceptionAssault = new ExceptionAssault(settings, metricsMock);
assertThrows(IOException.class, exceptionAssault::attack);
}
@Test
void throwsError() {
ChaosMonkeySettings settings = getChaosMonkeySettings();
settings
.getAssaultProperties()
.setException(getAssaultException("java.lang.OutOfMemoryError", null, null));
ExceptionAssault exceptionAssault = new ExceptionAssault(settings, metricsMock);
assertThrows(OutOfMemoryError.class, exceptionAssault::attack);
}
private ChaosMonkeySettings getChaosMonkeySettings() {
ChaosMonkeySettings settings = new ChaosMonkeySettings();
settings.setAssaultProperties(getDefaultAssaultProperties());
return settings;
}
private AssaultProperties getDefaultAssaultProperties() {
AssaultProperties assaultProperties = new AssaultProperties();
assaultProperties.setLevel(5);
assaultProperties.setLatencyRangeStart(1000);
assaultProperties.setLatencyRangeEnd(3000);
assaultProperties.setLatencyActive(true);
assaultProperties.setExceptionsActive(false);
assaultProperties.setException(getAssaultException(null, null, null));
assaultProperties.setKillApplicationActive(false);
assaultProperties.setWatchedCustomServices(null);
return assaultProperties;
}
private AssaultException getAssaultException(
String exceptionClassName, String argumentClass, String argumentValue) {
AssaultException assaultException = new AssaultException();
if (exceptionClassName != null) {
assaultException.setType(exceptionClassName);
}
if (argumentClass != null) {
AssaultException.ExceptionArgument argument = new AssaultException.ExceptionArgument();
argument.setClassName(argumentClass);
argument.setValue(argumentValue);
assaultException.setArguments(Collections.singletonList(argument));
}
return assaultException;
}
}
| 1,746 |
4,054 | // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.feedhandler;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.document.DocumentTypeManager;
import com.yahoo.feedapi.FeedContext;
import com.yahoo.feedapi.MessagePropertyProcessor;
import com.yahoo.feedapi.SharedSender;
import com.yahoo.search.query.ParameterParser;
import java.io.InputStream;
public abstract class VespaFeedHandlerBase {
protected FeedContext context;
private final long defaultTimeoutMillis;
VespaFeedHandlerBase(FeedContext context) {
this(context, context.getPropertyProcessor().getDefaultTimeoutMillis());
}
private VespaFeedHandlerBase(FeedContext context, long defaultTimeoutMillis) {
this.context = context;
this.defaultTimeoutMillis = defaultTimeoutMillis;
}
SharedSender getSharedSender(String route) {
return context.getSharedSender(route);
}
MessagePropertyProcessor getPropertyProcessor() {
return context.getPropertyProcessor();
}
/**
* @param request Request object to get the POST data stream from
* @return An InputStream that either is a GZIP wrapper or simply the
* original data stream.
* @throws IllegalArgumentException if GZIP stream creation failed
*/
InputStream getRequestInputStream(HttpRequest request) {
return request.getData();
}
protected DocumentTypeManager getDocumentTypeManager() {
return context.getDocumentTypeManager();
}
protected long getTimeoutMillis(HttpRequest request) {
return ParameterParser.asMilliSeconds(request.getProperty("timeout"), defaultTimeoutMillis);
}
}
| 557 |
4,184 | package se.citerus.dddsample.acceptance.pages;
import org.openqa.selenium.By;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.support.ui.Select;
import static junit.framework.TestCase.assertTrue;
public class CargoDestinationPage {
private final WebDriver driver;
public CargoDestinationPage(WebDriver driver) {
this.driver = driver;
WebElement cargoDestinationHeader = driver.findElement(By.cssSelector("table caption"));
assertTrue(cargoDestinationHeader.getText().startsWith("Change destination for cargo "));
}
public CargoDetailsPage selectDestinationTo(String destination) {
WebElement destinationPicker = driver.findElement(By.name("unlocode"));
Select select = new Select(destinationPicker);
select.selectByVisibleText(destination);
destinationPicker.submit();
return new CargoDetailsPage(driver);
}
}
| 320 |
777 | <filename>base/allocator/allocator_shim_win.h
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_WIN_H_
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_WIN_H_
#include <stddef.h>
namespace base {
namespace allocator {
// Used to indicate that the shim is actually in place.
extern bool g_is_win_shim_layer_initialized;
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_WIN_H_
| 192 |
1,338 | /*
* Copyright (C) 2006-2008 <NAME> <<EMAIL>>. All rights reserved.
* Copyright (C) 2008 <NAME> <<EMAIL>>. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
#ifndef __VIDEO_ADD_ON_H
#define __VIDEO_ADD_ON_H
#include <MediaAddOn.h>
class VideoWindowAddOn : public BMediaAddOn
{
public:
VideoWindowAddOn(image_id);
~VideoWindowAddOn();
bool WantsAutoStart();
int32 CountFlavors();
status_t GetFlavorAt(int32, const flavor_info**);
BMediaNode* InstantiateNodeFor(const flavor_info*, BMessage*, status_t*);
private:
flavor_info fInfo;
media_format fInputFormat;
};
extern "C" BMediaAddOn *make_media_addon(image_id id);
#endif
| 286 |
13,006 | <reponame>mjlorenzo305/deeplearning4j
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver119 on 10.11.2017.
//
#include "testlayers.h"
#include <ops/declarable/CustomOperations.h>
#include <array/NDArray.h>
#include <legacy/NativeOps.h>
#include <helpers/BitwiseUtils.h>
using namespace sd;
using namespace sd::graph;
class BitwiseUtilsTests : public testing::Test {
public:
};
// oviously, this test will fail on big-endian machines, but who cares
TEST_F(BitwiseUtilsTests, Test_Runtime_Endianess_1) {
bool isBE = BitwiseUtils::isBE();
ASSERT_FALSE(isBE);
}
TEST_F(BitwiseUtilsTests, Test_ValueBit_1) {
int idx = BitwiseUtils::valueBit(1);
ASSERT_EQ(0, idx);
}
TEST_F(BitwiseUtilsTests, Test_ValueBit_2) {
int idx = BitwiseUtils::valueBit(2);
ASSERT_EQ(1, idx);
}
TEST_F(BitwiseUtilsTests, Test_ValueBits_1) {
std::vector<int> expected({1, 1});
while (expected.size() < 32)
expected.push_back(0);
std::vector<int> result = BitwiseUtils::valueBits(3);
ASSERT_EQ(32, result.size());
ASSERT_EQ(expected, result);
}
TEST_F(BitwiseUtilsTests, Test_ValueBits_2) {
int value = 48;
int flipped = BitwiseUtils::flip_bits(value);
ASSERT_NE(value, flipped);
auto o = BitwiseUtils::valueBits(value);
auto f = BitwiseUtils::valueBits(flipped);
for (int e = 0; e < o.size(); e++)
ASSERT_NE(o.at(e), f.at(e));
} | 757 |
2,829 | /* Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <iostream>
#include <string>
#include <memory>
#include "gtest/gtest.h"
#include "euler/core/index/hash_sample_index.h"
#include "euler/common/env.h"
using std::string;
using std::vector;
using std::cout;
using std::endl;
using std::shared_ptr;
namespace euler {
TEST(HashSampleIndexTest, AddItem) {
HashSampleIndex<int32_t, string> hsIndex("hash");
vector<int32_t> ids = {0, 1, 2, 3, 4};
vector<float> weights = {2.0, 4.0, 8.0, 8.0, 16.0};
ASSERT_TRUE(hsIndex.AddItem("name", ids, weights));
ASSERT_TRUE(hsIndex.AddItem("age", ids, weights));
ASSERT_FALSE(hsIndex.AddItem("name", ids, weights));
}
TEST(HashSampleIndexTest, Merge) {
HashSampleIndex<int32_t, string> hsIndex("hash");
{
vector<int32_t> ids = {0, 1, 2};
vector<float> weights = {2.0, 4.0, 8.0};
ASSERT_TRUE(hsIndex.AddItem("name", ids, weights));
}
{
vector<int32_t> ids = {4, 5, 6};
vector<float> weights = {1.0, 1.0, 2.0};
ASSERT_TRUE(hsIndex.AddItem("age", ids, weights));
}
HashSampleIndex<int32_t, string> newIndex("hash");
{
vector<int32_t> ids = {3, 4, 5};
vector<float> weights = {2.0, 2.0, 1.0};
ASSERT_TRUE(newIndex.AddItem("name", ids, weights));
}
{
vector<int32_t> ids = {24, 25, 26};
vector<float> weights = {2.0, 2.0, 4.0};
ASSERT_TRUE(newIndex.AddItem("xxx", ids, weights));
}
ASSERT_TRUE(hsIndex.Merge(newIndex));
ASSERT_EQ(hsIndex.GetKeys().size(), 3);
{
auto r = hsIndex.Search(EQ, "name");
auto v = r->Sample(300000);
vector<int32_t> cnts(6);
for (auto& i : v) {
cnts[i.first] += 1;
}
ASSERT_TRUE(cnts[1]*1.0/cnts[0] > 1.9 && cnts[1]*1.0/cnts[0] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 1.9 && cnts[2]*1.0/cnts[1] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[3] > 3.9 && cnts[2]*1.0/cnts[3] < 4.1);
ASSERT_TRUE(cnts[3]*1.0/cnts[4] > 0.9 && cnts[3]*1.0/cnts[4] < 1.1);
ASSERT_TRUE(cnts[4]*1.0/cnts[5] > 1.9 && cnts[4]*1.0/cnts[5] < 2.1);
}
{
auto r = hsIndex.Search(EQ, "xxx")->Sample(100000);
vector<int32_t> cnts(3);
for (auto& i : r) {
cnts[i.first - 24] += 1;
}
ASSERT_TRUE(cnts[1]*1.0/cnts[0] > 0.9 && cnts[1]*1.0/cnts[0] < 1.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 1.9 && cnts[2]*1.0/cnts[1] < 2.1);
}
}
TEST(HashSampleIndexTest, MergeVec) {
HashSampleIndex<int32_t, string> hsIndex("hash");
{
vector<int32_t> ids = {0, 1, 2};
vector<float> weights = {2.0, 1.0, 4.0};
ASSERT_TRUE(hsIndex.AddItem("name1", ids, weights));
}
{
vector<int32_t> ids = {4, 5, 6};
vector<float> weights = {1.0, 1.0, 2.0};
ASSERT_TRUE(hsIndex.AddItem("name2", ids, weights));
}
HashSampleIndex<int32_t, string>* index2 =
new HashSampleIndex<int32_t, string>("hash");
{
vector<int32_t> ids = {3, 4, 5};
vector<float> weights = {2.0, 2.0, 1.0};
ASSERT_TRUE(index2->AddItem("name1", ids, weights));
}
HashSampleIndex<int32_t, string>* index3 =
new HashSampleIndex<int32_t, string>("hash");
{
vector<int32_t> ids = {7, 8};
vector<float> weights = {2.0, 4.0};
ASSERT_TRUE(index3->AddItem("name3", ids, weights));
}
vector<shared_ptr<SampleIndex>> vh;
vh.push_back(shared_ptr<SampleIndex>(index2));
vh.push_back(shared_ptr<SampleIndex>(index3));
ASSERT_TRUE(hsIndex.Merge(vh));
ASSERT_EQ(hsIndex.GetKeys().size(), 3);
{
auto v = hsIndex.Search(EQ, "name1")->Sample(300000);
vector<int32_t> cnts(9);
for (auto& i : v) {
cnts[i.first] += 1;
}
cout << "distribution should be 2:1:4:2:2:1:0:0:0" << endl;
cout << "distribution:" << endl;
for (auto i : cnts) {
cout << i << " ";
}
cout << endl;
ASSERT_TRUE(cnts[0]*1.0/cnts[1] > 1.9 && cnts[0]*1.0/cnts[1] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 3.9 && cnts[2]*1.0/cnts[1] < 4.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[3] > 1.9 && cnts[2]*1.0/cnts[3] < 2.1);
ASSERT_TRUE(cnts[3]*1.0/cnts[4] > 0.9 && cnts[3]*1.0/cnts[4] < 1.1);
ASSERT_TRUE(cnts[4]*1.0/cnts[5] > 1.9 && cnts[4]*1.0/cnts[5] < 2.1);
ASSERT_EQ(cnts[6], 0);
ASSERT_EQ(cnts[7], 0);
ASSERT_EQ(cnts[8], 0);
}
{
auto v = hsIndex.Search(EQ, "name3")->GetSortedIds();
ASSERT_EQ(v.size(), 2);
ASSERT_EQ(v[0], 7);
ASSERT_EQ(v[1], 8);
}
{
auto v = hsIndex.Search(NOT_EQ, "name1")->GetSortedIds();
ASSERT_EQ(v.size(), 5);
ASSERT_EQ(v[0], 4);
ASSERT_EQ(v[4], 8);
}
}
TEST(HashSampleIndexTest, SearchSample) {
HashSampleIndex<int32_t, string> hsIndex("hash");
vector<int32_t> ids = {0, 1, 2, 3, 4};
vector<float> weights = {2.0, 4.0, 8.0, 8.0, 16.0};
hsIndex.AddItem("name", ids, weights);
auto v = hsIndex.Search(EQ, "name")->Sample(100000);
vector<int32_t> cnts(5);
for (auto& i : v) {
cnts[i.first] += 1;
ASSERT_EQ(i.second, weights[i.first]);
}
ASSERT_TRUE(cnts[1]*1.0/cnts[0] > 1.9 && cnts[1]*1.0/cnts[0] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 1.9 && cnts[2]*1.0/cnts[1] < 2.1);
ASSERT_TRUE(cnts[3]*1.0/cnts[2] > 0.9 && cnts[3]*1.0/cnts[2] < 1.1);
ASSERT_TRUE(cnts[4]*1.0/cnts[3] > 1.9 && cnts[4]*1.0/cnts[3] < 2.1);
auto empty = hsIndex.Search(EQ, "age")->Sample(10000);
ASSERT_EQ(empty.size(), 0);
}
TEST(HashSampleIndexTest, Search) {
HashSampleIndex<int32_t, string> hsIndex("hash");
vector<int32_t> ids = {0, 1, 2, 3, 4};
vector<float> weights = {2.0, 4.0, 8.0, 8.0, 16.0};
hsIndex.AddItem("name", ids, weights);
auto v = hsIndex.Search(EQ, "name")->GetIds();
ASSERT_EQ(v.size(), 5);
ASSERT_EQ(v[0], 0);
auto empty = hsIndex.Search(EQ, "age")->GetIds();
ASSERT_EQ(empty.size(), 0);
HashSampleIndex<int32_t, int32_t> hsIndex2("hash");
hsIndex2.AddItem(12, ids, weights);
auto v2 = hsIndex2.Search(EQ, "12")->GetIds();
ASSERT_EQ(v2.size(), 5);
ASSERT_EQ(v2[0], 0);
}
TEST(HashSampleIndexTest, NOTEQ) {
HashSampleIndex<int32_t, string> hsIndex("hash");
{
vector<int32_t> ids = {0, 1, 2};
vector<float> weights = {2.0, 2.0, 4.0};
hsIndex.AddItem("name", ids, weights);
}
{
vector<int32_t> ids = {3, 4};
vector<float> weights = {2.0, 4.0};
hsIndex.AddItem("name2", ids, weights);
}
{
vector<int32_t> ids = {5, 6};
vector<float> weights = {4.0, 4.0};
hsIndex.AddItem("name3", ids, weights);
}
{
auto v = hsIndex.Search(NOT_EQ, "name")->GetIds();
ASSERT_EQ(v.size(), 4);
}
{
auto v = hsIndex.Search(NOT_EQ, "name2")->GetSortedIds();
ASSERT_EQ(v.size(), 5);
}
{
auto v = hsIndex.Search(NOT_EQ, "name")->Sample(100000);
vector<int32_t> cnts(7);
for (auto& i : v) {
cnts[i.first] += 1;
}
ASSERT_EQ(cnts[0], 0);
ASSERT_EQ(cnts[1], 0);
ASSERT_EQ(cnts[2], 0);
ASSERT_TRUE(cnts[4]*1.0/cnts[3] > 1.9 && cnts[4]*1.0/cnts[3] < 2.1);
ASSERT_TRUE(cnts[5]*1.0/cnts[4] > 0.9 && cnts[5]*1.0/cnts[4] < 1.1);
ASSERT_TRUE(cnts[6]*1.0/cnts[5] > 0.9 && cnts[6]*1.0/cnts[5] < 1.1);
}
{
auto v = hsIndex.Search(NOT_EQ, "name3")->Sample(100000);
vector<int32_t> cnts(7);
for (auto& i : v) {
cnts[i.first] += 1;
}
ASSERT_EQ(cnts[5], 0);
ASSERT_EQ(cnts[6], 0);
ASSERT_TRUE(cnts[1]*1.0/cnts[0] > 0.9 && cnts[1]*1.0/cnts[0] < 1.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 1.9 && cnts[2]*1.0/cnts[1] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[3] > 1.9 && cnts[2]*1.0/cnts[3] < 2.1);
ASSERT_TRUE(cnts[4]*1.0/cnts[3] > 1.9 && cnts[4]*1.0/cnts[3] < 2.1);
}
{
auto v = hsIndex.Search(NOT_EQ, "name");
v = v->Intersection(hsIndex.Search(EQ, "name2"));
auto r = v->GetSortedIds();
ASSERT_EQ(r.size(), 2);
ASSERT_EQ(r[0], 3);
ASSERT_EQ(r[1], 4);
}
{
auto v = hsIndex.Search(NOT_EQ, "name");
v = v->Intersection(hsIndex.Search(NOT_EQ, "name2"));
auto r = v->GetSortedIds();
ASSERT_EQ(r.size(), 2);
ASSERT_EQ(r[0], 5);
ASSERT_EQ(r[1], 6);
auto v3 = v->Sample(100000);
vector<int32_t> cnts(7);
for (auto& i : v3) {
cnts[i.first] += 1;
}
ASSERT_EQ(cnts[0], 0);
ASSERT_EQ(cnts[1], 0);
ASSERT_EQ(cnts[2], 0);
ASSERT_EQ(cnts[3], 0);
ASSERT_EQ(cnts[4], 0);
ASSERT_TRUE(cnts[6]*1.0/cnts[5] > 0.9 && cnts[6]*1.0/cnts[5] < 1.1);
}
}
TEST(HashSampleIndexTest, IN) {
HashSampleIndex<int32_t, string> hsIndex("hash");
{
vector<int32_t> ids = {0, 1, 2};
vector<float> weights = {2.0, 2.0, 4.0};
hsIndex.AddItem("name", ids, weights);
}
{
vector<int32_t> ids = {3, 4};
vector<float> weights = {2.0, 4.0};
hsIndex.AddItem("name2", ids, weights);
}
{
vector<int32_t> ids = {5, 6};
vector<float> weights = {4.0, 4.0};
hsIndex.AddItem("name3", ids, weights);
}
{
auto v = hsIndex.Search(IN, "name")->GetIds();
ASSERT_EQ(v.size(), 3);
}
{
auto v = hsIndex.Search(IN, "name2")->GetSortedIds();
ASSERT_EQ(v.size(), 2);
}
{
auto v = hsIndex.Search(IN, "name::name2")->Sample(100000);
vector<int32_t> cnts(7);
for (auto& i : v) {
cnts[i.first] += 1;
}
ASSERT_EQ(cnts[5], 0);
ASSERT_EQ(cnts[6], 0);
ASSERT_TRUE(cnts[1]*1.0/cnts[0] > 0.9 && cnts[1]*1.0/cnts[0] < 1.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 1.9 && cnts[2]*1.0/cnts[1] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[3] > 1.9 && cnts[2]*1.0/cnts[3] < 2.1);
ASSERT_TRUE(cnts[4]*1.0/cnts[3] > 1.9 && cnts[4]*1.0/cnts[3] < 2.1);
}
{
auto v = hsIndex.Search(IN, "name3::name2")->Sample(100000);
vector<int32_t> cnts(7);
for (auto& i : v) {
cnts[i.first] += 1;
}
ASSERT_EQ(cnts[0], 0);
ASSERT_EQ(cnts[1], 0);
ASSERT_EQ(cnts[2], 0);
ASSERT_TRUE(cnts[4]*1.0/cnts[3] > 1.9 && cnts[4]*1.0/cnts[3] < 2.1);
ASSERT_TRUE(cnts[5]*1.0/cnts[4] > 0.9 && cnts[5]*1.0/cnts[4] < 1.1);
ASSERT_TRUE(cnts[6]*1.0/cnts[5] > 0.9 && cnts[6]*1.0/cnts[5] < 1.1);
}
}
TEST(HashSampleIndexTest, NOT_IN) {
HashSampleIndex<int32_t, int32_t> hsIndex("hash");
{
vector<int32_t> ids = {0, 1, 2};
vector<float> weights = {2.0, 2.0, 4.0};
hsIndex.AddItem(100, ids, weights);
}
{
vector<int32_t> ids = {3, 4};
vector<float> weights = {2.0, 4.0};
hsIndex.AddItem(200, ids, weights);
}
{
vector<int32_t> ids = {5, 6};
vector<float> weights = {4.0, 4.0};
hsIndex.AddItem(300, ids, weights);
}
{
auto v = hsIndex.Search(NOT_IN, "100")->GetIds();
ASSERT_EQ(v.size(), 4);
}
{
auto v = hsIndex.Search(NOT_IN, "200")->GetSortedIds();
ASSERT_EQ(v.size(), 5);
}
{
auto v = hsIndex.Search(NOT_IN, "100::200")->Sample(100000);
vector<int32_t> cnts(7);
for (auto& i : v) {
cnts[i.first] += 1;
}
ASSERT_EQ(cnts[0], 0);
ASSERT_EQ(cnts[1], 0);
ASSERT_EQ(cnts[2], 0);
ASSERT_EQ(cnts[3], 0);
ASSERT_EQ(cnts[4], 0);
ASSERT_TRUE(cnts[6]*1.0/cnts[5] > 0.9 && cnts[6]*1.0/cnts[5] < 1.1);
}
{
auto v = hsIndex.Search(NOT_IN, "100::200::300")->Sample(100000);
vector<int32_t> cnts(7);
for (auto& i : v) {
cnts[i.first] += 1;
}
for (auto& i : cnts) {
ASSERT_EQ(i, 0);
}
}
}
TEST(HashSampleIndexTest, DeserializeFileIO) {
std::string filename = "hash_sample_test.dat";
HashSampleIndex<int32_t, string> hsIndex("hash");
vector<int32_t> ids = {0, 1, 2};
vector<float> weights = {2.0, 4.0, 8.0};
hsIndex.AddItem("name", ids, weights);
hsIndex.AddItem("name2", ids, weights);
{
std::unique_ptr<FileIO> writer;
ASSERT_TRUE(Env::Default()->NewFileIO(filename, false, &writer).ok());
ASSERT_TRUE(hsIndex.Serialize(writer.get()));
}
HashSampleIndex<int32_t, string> hsIndex2("hash");
{
std::unique_ptr<FileIO> reader;
ASSERT_TRUE(Env::Default()->NewFileIO(filename, true, &reader).ok());
ASSERT_TRUE(hsIndex2.Deserialize(reader.get()));
}
if (system("rm -f hash_sample_test.dat") < 0) {
return;
}
auto v = hsIndex2.Search(EQ, "name")->Sample(100000);
vector<int32_t> cnts(3);
for (auto& i : v) {
cnts[i.first] += 1;
ASSERT_EQ(i.second, weights[i.first]);
}
ASSERT_TRUE(cnts[1]*1.0/cnts[0] > 1.9 && cnts[1]*1.0/cnts[0] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 1.9 && cnts[2]*1.0/cnts[1] < 2.1);
}
TEST(HashSampleIndexTest, Union) {
HashSampleIndex<int32_t, string> hsIndex("hash");
{
vector<int32_t> ids = {0, 1, 2, 3, 4};
vector<float> weights = {2.0, 4.0, 8.0, 8.0, 16.0};
hsIndex.AddItem("name", ids, weights);
}
{
vector<int32_t> ids = {5, 6, 7};
vector<float> weights = {4.0, 8.0, 8.0};
hsIndex.AddItem("name2", ids, weights);
}
{
vector<int32_t> ids = {9, 10};
vector<float> weights = {4.0, 4.0};
hsIndex.AddItem("name3", ids, weights);
}
{
auto v = hsIndex.Search(NOT_EQ, "name");
v = v->Union(hsIndex.Search(EQ, "name2"));
auto vv = v->GetSortedIds();
ASSERT_EQ(vv.size(), 5);
ASSERT_EQ(vv[0], 5);
ASSERT_EQ(vv[4], 10);
auto r = v->Sample(100000);
vector<int32_t> cnts(11);
for (auto& i : r) {
cnts[i.first] += 1;
}
ASSERT_TRUE(cnts[6]*1.0/cnts[5] > 1.9 && cnts[6]*1.0/cnts[5] < 2.1);
ASSERT_TRUE(cnts[7]*1.0/cnts[6] > 0.9 && cnts[7]*1.0/cnts[6] < 1.1);
ASSERT_TRUE(cnts[7]*1.0/cnts[9] > 1.9 && cnts[7]*1.0/cnts[9] < 2.1);
ASSERT_TRUE(cnts[10]*1.0/cnts[9] > 0.9 && cnts[10]*1.0/cnts[9] < 1.1);
}
{
auto v = hsIndex.Search(EQ, "name");
v = v->Union(hsIndex.Search(EQ, "name2"));
auto vv = v->GetSortedIds();
ASSERT_EQ(vv.size(), 8);
ASSERT_EQ(vv[0], 0);
ASSERT_EQ(vv[7], 7);
auto r = v->Sample(1000000);
vector<int32_t> cnts(8);
for (auto& i : r) {
cnts[i.first] += 1;
}
ASSERT_TRUE(cnts[1]*1.0/cnts[0] > 1.9 && cnts[1]*1.0/cnts[0] < 2.1);
ASSERT_TRUE(cnts[2]*1.0/cnts[1] > 1.9 && cnts[2]*1.0/cnts[1] < 2.1);
ASSERT_TRUE(cnts[3]*1.0/cnts[2] > 0.9 && cnts[3]*1.0/cnts[2] < 1.1);
ASSERT_TRUE(cnts[4]*1.0/cnts[3] > 1.9 && cnts[4]*1.0/cnts[3] < 2.1);
ASSERT_TRUE(cnts[4]*1.0/cnts[5] > 3.9 && cnts[4]*1.0/cnts[5] < 4.1);
ASSERT_TRUE(cnts[6]*1.0/cnts[5] > 1.9 && cnts[6]*1.0/cnts[5] < 2.1);
ASSERT_TRUE(cnts[7]*1.0/cnts[6] > 0.9 && cnts[7]*1.0/cnts[6] < 1.1);
}
}
} // namespace euler
| 7,971 |
5,169 | <reponame>Gantios/Specs<filename>Specs/7/b/4/YXMPersonalLibs/1.0.6/YXMPersonalLibs.podspec.json
{
"name": "YXMPersonalLibs",
"version": "1.0.6",
"summary": "Libs for personal use.",
"description": "Libs for personal use.",
"homepage": "https://github.com/YouXianMing/YXMPersonalLibs",
"license": "MIT",
"authors": {
"YouXianMing": "<EMAIL>"
},
"platforms": {
"ios": "10.0"
},
"source": {
"git": "https://github.com/YouXianMing/YXMPersonalLibs.git",
"tag": "1.0.6"
},
"source_files": "YXMPersonalLibs/YXMPersonalLibs/UtilitiesLibsHeaders.h",
"public_header_files": "YXMPersonalLibs/YXMPersonalLibs/UtilitiesLibsHeaders.h",
"requires_arc": true,
"subspecs": [
{
"name": "UIButton+Category",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/UIButton+Category/*.{h,m}"
},
{
"name": "UIView+Category",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/UIView+Category/*.{h,m}"
},
{
"name": "NSString+Category",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/NSString+Category/*.{h,m}"
},
{
"name": "IrregularElementsCreator",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/IrregularElementsCreator/*.{h,m,gif}"
},
{
"name": "BaseEncodeObject",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/BaseEncodeObject/*.{h,m}"
},
{
"name": "JSONKit",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/JSONKit/*.{h,m}"
},
{
"name": "MD5",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/MD5/*.{h,m}"
},
{
"name": "GCD",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/GCD/*.{h,m}"
},
{
"name": "DateFormatter",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/DateFormatter/*.{h,m}"
},
{
"name": "DeepLinkModel",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/DeepLinkModel/*.{h,m}"
},
{
"name": "DeviceInfo",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/DeviceInfo/*.{h,m}"
},
{
"name": "HexColors",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/HexColors/*.{h,m}"
},
{
"name": "FileManager",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/FileManager/*.{h,m}"
},
{
"name": "DocumentManager",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/DocumentManager/*.{h,m}"
},
{
"name": "RegexManager",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/RegexManager/*.{h,m}"
},
{
"name": "MacroDefinitions",
"source_files": "YXMPersonalLibs/YXMPersonalLibs/MacroDefinitions/*.{h}"
}
]
}
| 1,326 |
679 | <reponame>Grosskopf/openoffice<filename>main/comphelper/source/property/propertycontainer.cxx
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_comphelper.hxx"
#include <comphelper/propertycontainer.hxx>
#include <comphelper/property.hxx>
#include <cppuhelper/typeprovider.hxx>
#include <osl/diagnose.h>
#include <uno/data.h>
#include <com/sun/star/uno/genfunc.h>
#include <com/sun/star/beans/PropertyAttribute.hpp>
#include <rtl/ustrbuf.hxx>
#include <algorithm>
//.........................................................................
namespace comphelper
{
//.........................................................................
using namespace ::com::sun::star::uno;
using namespace ::com::sun::star::lang;
using namespace ::com::sun::star::beans;
//==========================================================================
//= OPropertyContainer
//==========================================================================
//--------------------------------------------------------------------------
OPropertyContainer::OPropertyContainer(::cppu::OBroadcastHelper& _rBHelper)
:OPropertyContainer_Base(_rBHelper)
{
}
// -------------------------------------------------------------------------
OPropertyContainer::~OPropertyContainer()
{
}
//--------------------------------------------------------------------------
Sequence< Type > SAL_CALL OPropertyContainer::getTypes() throw (RuntimeException)
{
// just the types from our one and only base class
::cppu::OTypeCollection aTypes(
::getCppuType( static_cast< Reference< XPropertySet >* >(NULL)),
::getCppuType( static_cast< Reference< XFastPropertySet >* >(NULL)),
::getCppuType( static_cast< Reference< XMultiPropertySet >* >(NULL))
);
return aTypes.getTypes();
}
//--------------------------------------------------------------------------
void SAL_CALL OPropertyContainer::setFastPropertyValue( sal_Int32 nHandle, const Any& rValue ) throw ( UnknownPropertyException, PropertyVetoException, IllegalArgumentException, WrappedTargetException, RuntimeException)
{
OPropertyContainer_Base::setFastPropertyValue( nHandle, rValue );
}
//--------------------------------------------------------------------------
sal_Bool OPropertyContainer::convertFastPropertyValue(
Any& _rConvertedValue, Any& _rOldValue, sal_Int32 _nHandle, const Any& _rValue ) throw (IllegalArgumentException)
{
return OPropertyContainerHelper::convertFastPropertyValue( _rConvertedValue, _rOldValue, _nHandle, _rValue );
}
//--------------------------------------------------------------------------
void OPropertyContainer::setFastPropertyValue_NoBroadcast(sal_Int32 _nHandle, const Any& _rValue) throw (Exception)
{
OPropertyContainerHelper::setFastPropertyValue( _nHandle, _rValue );
}
//--------------------------------------------------------------------------
void OPropertyContainer::getFastPropertyValue(Any& _rValue, sal_Int32 _nHandle) const
{
OPropertyContainerHelper::getFastPropertyValue( _rValue, _nHandle );
}
//.........................................................................
} // namespace comphelper
//.........................................................................
| 1,002 |
841 | <filename>testsuite/integration-tests/src/test/java/org/jboss/resteasy/test/validation/cdi/resource/CDIValidationCoreSubResource.java
package org.jboss.resteasy.test.validation.cdi.resource;
import jakarta.validation.constraints.Min;
import jakarta.ws.rs.GET;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.PathParam;
import jakarta.ws.rs.Produces;
import jakarta.ws.rs.core.MediaType;
public class CDIValidationCoreSubResource {
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("{subparam}")
@Min(17)
public int submethod(@Min(13) @PathParam("subparam") int subparam) {
return subparam;
}
}
| 239 |
2,508 |
extern zend_class_entry *protodir_connectionexception_ce;
ZEPHIR_INIT_CLASS(ProtoDir_ConnectionException);
PHP_METHOD(ProtoDir_ConnectionException, getRequest);
ZEND_BEGIN_ARG_INFO_EX(arginfo_protodir_connectionexception_getrequest, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEPHIR_INIT_FUNCS(protodir_connectionexception_method_entry) {
#if PHP_VERSION_ID >= 80000
PHP_ME(ProtoDir_ConnectionException, getRequest, arginfo_protodir_connectionexception_getrequest, ZEND_ACC_PUBLIC)
#else
PHP_ME(ProtoDir_ConnectionException, getRequest, NULL, ZEND_ACC_PUBLIC)
#endif
PHP_FE_END
};
| 223 |
4,339 | <reponame>geertjanw/ignite
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.mvcc;
import java.util.concurrent.CyclicBarrier;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.IgniteCheckedException;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.configuration.TransactionConfiguration;
import org.apache.ignite.internal.IgniteInternalFuture;
import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
import org.apache.ignite.internal.util.typedef.X;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
import org.apache.ignite.transactions.Transaction;
import org.junit.After;
import org.junit.Test;
import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
/** */
public class MvccDeadlockDetectionConfigTest extends GridCommonAbstractTest {
/** */
private boolean deadlockDetectionEnabled;
/** */
@After
public void stopCluster() {
stopAllGrids();
}
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
int timeout = deadlockDetectionEnabled ? 1 : 0;
return super.getConfiguration(igniteInstanceName)
.setTransactionConfiguration(new TransactionConfiguration().setDeadlockTimeout(timeout));
}
/** */
@Test
public void deadlockDetectionDisabled() throws Exception {
deadlockDetectionEnabled = false;
Ignite ign = startGrid();
IgniteCache<Object, Object> cache = ign.createCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME)
.setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
CyclicBarrier b = new CyclicBarrier(2);
int txTimeout = 3_000;
IgniteInternalFuture<?> futA = GridTestUtils.runAsync(() -> {
try (Transaction tx = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
cache.put(1, 'a');
b.await();
cache.put(2, 'a');
}
return null;
});
IgniteInternalFuture<?> futB = GridTestUtils.runAsync(() -> {
try (Transaction tx = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
cache.put(2, 'b');
b.await();
cache.put(1, 'b');
}
return null;
});
IgniteCheckedException e = awaitCompletion(futA, futB);
assertTrue(e.toString(), e.hasCause(IgniteTxTimeoutCheckedException.class));
}
/** */
@Test
public void deadlockDetectionEnabled() throws Exception {
deadlockDetectionEnabled = true;
Ignite ign = startGrid();
IgniteCache<Object, Object> cache = ign.createCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME)
.setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
CyclicBarrier b = new CyclicBarrier(2);
IgniteInternalFuture<?> futA = GridTestUtils.runAsync(() -> {
try (Transaction tx = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.put(1, 'a');
b.await();
cache.put(2, 'a');
}
return null;
});
IgniteInternalFuture<?> futB = GridTestUtils.runAsync(() -> {
try (Transaction tx = ign.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.put(2, 'b');
b.await();
cache.put(1, 'b');
}
return null;
});
IgniteCheckedException e = awaitCompletion(futA, futB);
assertTrue(e.toString(), X.hasCause(e, "Deadlock", IgniteTxRollbackCheckedException.class));
}
/** */
private IgniteCheckedException awaitCompletion(IgniteInternalFuture<?> fut1, IgniteInternalFuture<?> fut2) {
IgniteCheckedException e = null;
try {
fut1.get(10_000);
}
catch (IgniteCheckedException e1) {
e = e1;
}
try {
fut2.get(10_000);
}
catch (IgniteCheckedException e1) {
e = e1;
}
return e;
}
}
| 2,138 |
3,384 | <filename>XcodeClasses/Xcode6.0/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator8.0.sdk/System/Library/PrivateFrameworks/WirelessDiagnostics.h<gh_stars>1000+
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>.
//
#pragma mark Blocks
typedef void (^CDUnknownBlockType)(void); // return type and parameters are unknown
#pragma mark Named Structures
struct AWDServerConnection {
struct shared_ptr<awd::ServerConnectionInfo> _field1;
};
struct ServerConnectionInfo;
struct shared_ptr<awd::ServerConnectionInfo> {
struct ServerConnectionInfo *_field1;
struct __shared_weak_count *_field2;
};
#pragma mark -
//
// File: /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/PrivateFrameworks/WirelessDiagnostics.framework/WirelessDiagnostics
// UUID: 43E35C83-78BD-3E6E-A365-13CECFA84C56
//
// Arch: x86_64
// Current version: 649.0.0
// Compatibility version: 1.0.0
// Source version: 649.0.0.0.0
// Minimum iOS version: 8.0.0
// SDK version: 8.0.0
//
// Objective-C Garbage Collection: Unsupported
//
@interface AWDMetricContainer : NSObject
{
PBCodable *_metric;
unsigned int _metricId;
}
@property(readonly, nonatomic) unsigned int metricId; // @synthesize metricId=_metricId;
@property(retain, nonatomic) PBCodable *metric; // @synthesize metric=_metric;
- (void)dealloc;
- (id)initWithMetricId:(unsigned int)arg1;
@end
@interface AWDMetricManager : NSObject
{
}
+ (void)postMetricWithId:(unsigned long long)arg1 object:(id)arg2;
+ (void)postMetricWithId:(unsigned long long)arg1 numberValue:(id)arg2;
+ (void)postMetricWithId:(unsigned long long)arg1 stringValue:(id)arg2;
+ (void)postMetricWithId:(unsigned long long)arg1 unsignedIntegerValue:(unsigned long long)arg2;
+ (void)postMetricWithId:(unsigned long long)arg1 integerValue:(long long)arg2;
+ (void)postMetricWithId:(unsigned long long)arg1 boolValue:(_Bool)arg2;
+ (void)postMetricWithId:(unsigned long long)arg1;
@end
@interface AWDServerConnection : NSObject
{
struct AWDServerConnection *fServerConnection;
}
- (void)flushToQueue:(struct dispatch_queue_s *)arg1 block:(CDUnknownBlockType)arg2;
- (unsigned long long)getAWDTimestamp;
- (_Bool)registerConfigChangeCallback:(CDUnknownBlockType)arg1;
- (_Bool)registerQueriableMetricCallback:(CDUnknownBlockType)arg1 forIdentifier:(unsigned int)arg2;
- (_Bool)registerQueriableMetric:(unsigned int)arg1 callback:(CDUnknownBlockType)arg2;
- (_Bool)submitMetric:(id)arg1;
- (id)newMetricContainerWithIdentifier:(unsigned int)arg1;
- (void)dealloc;
- (id)initWithComponentId:(unsigned int)arg1 andBlockOnConfiguration:(_Bool)arg2;
- (id)initWithComponentId:(unsigned int)arg1;
@end
| 1,077 |
2,338 | //===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <strstream>
// class strstream
// : public basic_iostream<char>
// {
// public:
// // Types
// typedef char char_type;
// typedef char_traits<char>::int_type int_type;
// typedef char_traits<char>::pos_type pos_type;
// typedef char_traits<char>::off_type off_type;
#include <strstream>
#include <type_traits>
#include "test_macros.h"
int main(int, char**)
{
static_assert((std::is_base_of<std::iostream, std::strstream>::value), "");
static_assert((std::is_same<std::strstream::char_type, char>::value), "");
static_assert((std::is_same<std::strstream::int_type, std::char_traits<char>::int_type>::value), "");
static_assert((std::is_same<std::strstream::pos_type, std::char_traits<char>::pos_type>::value), "");
static_assert((std::is_same<std::strstream::off_type, std::char_traits<char>::off_type>::value), "");
return 0;
}
| 466 |
1,738 | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
// AZ
#include <AzCore/Serialization/SerializeContext.h>
#include <AzCore/std/smart_ptr/make_shared.h>
// Graph Model
#include <GraphModel/Model/Module/InputOutputNodes.h>
#include <GraphModel/Model/Graph.h>
#include <GraphModel/Model/Slot.h>
#include <GraphModel/Model/DataType.h>
namespace GraphModel
{
//////////////////////////////////////////////////////////////////////////////
// BaseInputOutputNode
void BaseInputOutputNode::Reflect(AZ::ReflectContext* context)
{
AZ::SerializeContext* serializeContext = azrtti_cast<AZ::SerializeContext*>(context);
if (serializeContext)
{
serializeContext->Class<BaseInputOutputNode, Node>()
->Version(0)
->Field("m_dataType", &BaseInputOutputNode::m_dataType)
;
}
}
BaseInputOutputNode::BaseInputOutputNode(GraphPtr graph, DataTypePtr dataType)
: Node(graph)
{
// Copy because m_dataType has to be non-const for use with SerializeContext, and dataType is const
m_dataType = AZStd::make_shared<DataType>(*dataType);
}
const char* BaseInputOutputNode::GetTitle() const
{
return m_title.c_str();
}
GraphModel::DataTypePtr BaseInputOutputNode::GetNodeDataType() const
{
return m_dataType;
}
AZStd::string BaseInputOutputNode::GetName() const
{
return GetSlot("name")->GetValue<AZStd::string>();
}
AZStd::string BaseInputOutputNode::GetDisplayName() const
{
return GetSlot("displayName")->GetValue<AZStd::string>();
}
AZStd::string BaseInputOutputNode::GetDescription() const
{
return GetSlot("description")->GetValue<AZStd::string>();
}
void BaseInputOutputNode::RegisterCommonSlots(AZStd::string_view directionName)
{
GraphModel::DataTypePtr stringDataType = GetGraphContext()->GetDataType<AZStd::string>();
RegisterSlot(GraphModel::SlotDefinition::CreateProperty("name", "Name", stringDataType, stringDataType->GetDefaultValue(),
AZStd::string::format("The official name for this %s", directionName.data())));
RegisterSlot(GraphModel::SlotDefinition::CreateProperty("displayName", "Display Name", stringDataType, stringDataType->GetDefaultValue(),
AZStd::string::format("The name for this %s, displayed to the user. Will use the above Name if left blank.", directionName.data())));
RegisterSlot(GraphModel::SlotDefinition::CreateProperty("description", "Description", stringDataType, stringDataType->GetDefaultValue(),
AZStd::string::format("A description of this %s, used for tooltips", directionName.data())));
}
//////////////////////////////////////////////////////////////////////////////
// GraphInputNode
void GraphInputNode::Reflect(AZ::ReflectContext* context)
{
AZ::SerializeContext* serializeContext = azrtti_cast<AZ::SerializeContext*>(context);
if (serializeContext)
{
serializeContext->Class<GraphInputNode, BaseInputOutputNode>()
->Version(0)
;
}
}
GraphInputNode::GraphInputNode(GraphModel::GraphPtr graph, DataTypePtr dataType)
: BaseInputOutputNode(graph, dataType)
{
m_title = m_dataType->GetDisplayName() + " Input";
RegisterSlots();
CreateSlotData();
}
void GraphInputNode::PostLoadSetup(GraphPtr graph, NodeId id)
{
m_title = m_dataType->GetDisplayName() + " Input";
Node::PostLoadSetup(graph, id);
}
AZStd::any GraphInputNode::GetDefaultValue() const
{
return GetSlot("defaultValue")->GetValue();
}
void GraphInputNode::RegisterSlots()
{
// Register just a single output slot for the data that is input through this node
RegisterSlot(GraphModel::SlotDefinition::CreateOutputData("value", "Value", m_dataType, "An external value provided as input to this graph"));
// Register meta-data properties
RegisterCommonSlots("input");
RegisterSlot(GraphModel::SlotDefinition::CreateProperty("defaultValue", "Default Value", m_dataType, m_dataType->GetDefaultValue(),
"The default value for this input when no data is provided externally"));
}
//////////////////////////////////////////////////////////////////////////////
// GraphOutputNode
void GraphOutputNode::Reflect(AZ::ReflectContext* context)
{
AZ::SerializeContext* serializeContext = azrtti_cast<AZ::SerializeContext*>(context);
if (serializeContext)
{
serializeContext->Class<GraphOutputNode, BaseInputOutputNode>()
->Version(0)
;
}
}
GraphOutputNode::GraphOutputNode(GraphModel::GraphPtr graph, DataTypePtr dataType)
: BaseInputOutputNode(graph, dataType)
{
m_title = m_dataType->GetDisplayName() + " Output";
RegisterSlots();
CreateSlotData();
}
void GraphOutputNode::PostLoadSetup(GraphPtr graph, NodeId id)
{
m_title = m_dataType->GetDisplayName() + " Output";
Node::PostLoadSetup(graph, id);
}
void GraphOutputNode::RegisterSlots()
{
// Register just a single input slot for the data that is output through this node
RegisterSlot(GraphModel::SlotDefinition::CreateInputData("value", "Value", m_dataType, m_dataType->GetDefaultValue(), "A value output by this graph for external use"));
// Register meta-data properties
RegisterCommonSlots("output");
}
}
| 2,254 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/prerender/prerender_handle_impl.h"
namespace content {
PrerenderHandleImpl::PrerenderHandleImpl(
base::WeakPtr<PrerenderHostRegistry> prerender_host_registry,
int frame_tree_node_id,
const GURL& prerendering_url)
: prerender_host_registry_(std::move(prerender_host_registry)),
frame_tree_node_id_(frame_tree_node_id),
prerendering_url_(prerendering_url) {
DCHECK(!prerendering_url_.is_empty());
}
PrerenderHandleImpl::~PrerenderHandleImpl() {
// TODO(https://crbug.com/1166085): Use proper FinalStatus after the
// specification of Prerender2 metrics is finalized.
if (prerender_host_registry_) {
prerender_host_registry_->CancelHost(
frame_tree_node_id_, PrerenderHost::FinalStatus::kDestroyed);
}
}
GURL PrerenderHandleImpl::GetInitialPrerenderingUrl() {
return prerendering_url_;
}
} // namespace content
| 363 |
1,523 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import errno
import getpass
import glob
import logging
import math
import os
import shutil
import subprocess
import sys
import tarfile
import time
import tempfile
from collections import namedtuple
from contextlib import closing
from struct import Struct
from threading import Timer
# This script is for automating the collection of following diagnostics from a host
# running an Impala service daemon (catalogd/statestored/impalad). Following diagnostics
# are supported.
#
# 1. Native core dump (+ shared libs)
# 2. GDB/Java thread dump (pstack + jstack)
# 3. Java heap dump (jmap)
# 4. Minidumps (using breakpad)
# 5. Profiles
#
# Dependencies:
# 1. gdb package should be installed to collect native thread stacks/coredump. The binary
# location is picked up from the system path. In case of pstacks, the script falls back
# to the breakpad minidumps if the 'pstack' binary is not in system path.
# 2. jstack/jmap from a JRE/JDK. Default location is picked up from system path but can be
# overriden with --java_home PATH_TO_JAVA_HOME.
# 3. Mindumps are collected by sending a SIGUSR1 signal to the Impala process. Impala
# versions without full breakpad support (<= release 2.6) will reliably crash if
# we attempt to do that since those versions do not have the corresponding signal
# handler. Hence it is suggested to run this script only on releases 2.7 and later.
# 4. python >= 2.6
#
# Usage: python collect_diagnostics.py --help
#
# Few example usages:
#
# Collect 3 jstacks, pstacks from an impalad process 3s apart.
# python collect_diagnostics.py --pid $(pidof impalad) --stacks 3 3
#
# Collect core dump and a Java heapdump from the catalogd process
# python collect_diagnostics.py --pid $(pidof impalad) --jmap --gcore
#
# Collect 5 breakpad minidumps from a statestored process 5s apart.
# python collect_diagnostics.py --pid $(pidof statestored) --minidumps 5 5
# --minidumps_dir /var/log/impala-minidumps
#
#
class Command(object):
"""Wrapper around subprocess.Popen() that is canceled after a configurable timeout."""
def __init__(self, cmd, timeout=30):
self.cmd = cmd
self.timeout = timeout
self.child_killed_by_timeout = False
def run(self, cmd_stdin=None, cmd_stdout=subprocess.PIPE):
"""Runs the command 'cmd' by setting the appropriate stdin/out. The command is killed
if hits a timeout (controlled by self.timeout)."""
cmd_string = " ".join(self.cmd)
logging.info("Starting command %s with a timeout of %s"
% (cmd_string, str(self.timeout)))
self.child = subprocess.Popen(self.cmd, stdin=cmd_stdin, stdout=cmd_stdout)
timer = Timer(self.timeout, self.kill_child)
try:
timer.start()
# self.stdout is set to None if cmd_stdout is anything other than PIPE. The actual
# stdout is written to the file corresponding to cmd_stdout.
self.stdout = self.child.communicate()[0]
if self.child.returncode == 0:
logging.info("Command finished successfully: " + cmd_string)
else:
cmd_status = "timed out" if self.child_killed_by_timeout else "failed"
logging.error("Command %s: %s" % (cmd_status, cmd_string))
return self.child.returncode
finally:
timer.cancel()
return -1
def kill_child(self):
"""Kills the running command (self.child)."""
self.child_killed_by_timeout = True
self.child.kill()
class ImpalaDiagnosticsHandler(object):
IMPALA_PROCESSES = ["impalad", "catalogd", "statestored"]
OUTPUT_DIRS_TO_CREATE = ["stacks", "gcores", "jmaps", "profiles",
"shared_libs", "minidumps"]
MINIDUMP_HEADER = namedtuple("MDRawHeader", "signature version stream_count \
stream_directory_rva checksum time_date_stamp flags")
def __init__(self, args):
"""Initializes the state by setting the paths of required executables."""
self.args = args
if args.pid <= 0:
return
self.script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
# Name of the Impala process for which diagnostics should be collected.
self.target_process_name = self.get_target_process_name()
self.minidump_search_path = os.path.join(self.args.minidumps_dir,
self.target_process_name)
self.java_home = self.get_java_home_from_env()
if not self.java_home and args.java_home:
self.java_home = os.path.abspath(args.java_home)
self.jstack_cmd = os.path.join(self.java_home, "bin/jstack")
self.java_cmd = os.path.join(self.java_home, "bin/java")
self.jmap_cmd = os.path.join(self.java_home, "bin/jmap")
self.gdb_cmd = self.get_command_from_path("gdb")
self.gcore_cmd = self.get_command_from_path("gcore")
self.pstack_cmd = self.get_command_from_path("pstack")
def create_output_dir_structure(self):
"""Creates the skeleton directory structure for the diagnostics output collection."""
self.collection_root_dir = tempfile.mkdtemp(prefix="impala-diagnostics-%s" %
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-"),
dir=os.path.abspath(self.args.output_dir))
for dirname in self.OUTPUT_DIRS_TO_CREATE:
os.mkdir(os.path.join(self.collection_root_dir, dirname))
def get_command_from_path(self, cmd):
"""Returns the path to a given command executable, if one exists in the
system PATH."""
for path in os.environ["PATH"].split(os.pathsep):
cmd_path = os.path.join(path, cmd)
if os.access(cmd_path, os.X_OK):
return cmd_path
return ""
def get_target_process_name(self):
"""Returns the process name of the target process for which diagnostics
should be collected."""
try:
return open("/proc/%s/comm" % self.args.pid).read().strip()
except Exception:
logging.exception("Failed to get target process name.")
return ""
def get_num_child_proc(self, name):
"""Returns number of processes with the given name and target Impala pid
as parent."""
# Not all pgrep versions support -c parameter. So fetch the stdout and
# count the number of items in the list.
cmd = Command(["pgrep", "-P", str(self.args.pid), name])
cmd.run()
return len(cmd.stdout.split("\n")) - 1
def get_java_home_from_env(self):
"""Returns JAVA_HOME set in the env of the target process."""
try:
envs = open("/proc/%s/environ" % self.args.pid).read().split("\0")
for s in envs:
k, v = s.split("=", 1)
if k == "JAVA_HOME":
return v
except Exception:
logging.exception("Failed to determine JAVA_HOME from proc env.")
return ""
def get_free_disk_space_gbs(self, path):
"""Returns free disk space (in GBs) of the partition hosting the given path."""
s = os.statvfs(path)
return (s.f_bsize * s.f_bavail)/(1024.0 * 1024.0 * 1024.0)
def get_minidump_create_timestamp(self, minidump_path):
"""Returns the unix timestamp of the minidump create time. It is extracted from
the minidump header."""
# Read the minidump's header to extract the create time stamp. More information about
# the mindump header format can be found here: https://goo.gl/uxKZVe
#
# typedef struct {
# uint32_t signature;
# uint32_t version;
# uint32_t stream_count;
# MDRVA stream_directory_rva; /* A |stream_count|-sized array of
# * MDRawDirectory structures. */
# uint32_t checksum; /* Can be 0. In fact, that's all that's
# * been found in minidump files. */
# uint32_t time_date_stamp; /* time_t */
# uint64_t flags;
# } MDRawHeader; /* MINIDUMP_HEADER */
s = Struct("IIIiIIQ")
data = open(minidump_path, "rb").read(s.size)
header = self.MINIDUMP_HEADER(*s.unpack_from(data))
return header.time_date_stamp
def wait_for_minidump(self):
"""Minidump collection is async after sending the SIGUSR1 signal. So this method
waits till it is written to the disk. Since minidump forks off a new process from
the parent Impala process we need to wait till the forked process exits.
Returns after 30s to prevent infinite waiting. Should be called after sending the
SIGUSR1 signal to the Impala process."""
MAX_WAIT_TIME_S = 30
start_time = time.time()
while time.time() < start_time + MAX_WAIT_TIME_S:
# Sleep for a bit to ensure that the process fork to write minidump has started.
# Otherwise the subsequent check on the process count could pass even when the
# fork didn't succeed. This sleep reduces the likelihood of such race.
time.sleep(1)
if self.get_num_child_proc(self.target_process_name) == 0:
break
return
def validate_args(self):
"""Returns True if self.args are valid, false otherwise"""
if self.args.pid <= 0:
logging.critical("Invalid PID provided.")
return False
if self.target_process_name not in self.IMPALA_PROCESSES:
logging.critical("No valid Impala process with the given PID %s" % str(self.args.pid))
return False
if not self.java_home:
logging.critical("JAVA_HOME could not be inferred from process env.\
Please specify --java_home.")
return False
if self.args.jmap and not os.path.exists(self.jmap_cmd):
logging.critical("jmap binary not found, required to collect a Java heap dump.")
return False
if self.args.gcore and not os.path.exists(self.gcore_cmd):
logging.critical("gcore binary not found, required to collect a core dump.")
return False
if self.args.profiles_dir and not os.path.isdir(self.args.profiles_dir):
logging.critical("No valid profiles directory at path: %s" % self.args.profiles_dir)
return False
return True
def collect_thread_stacks(self):
"""Collects jstack/jstack-m/pstack for the given pid in that order. pstack collection
falls back to minidumps if pstack binary is missing from the system path. Minidumps
are collected by sending a SIGUSR1 to the Impala process and then archiving the
contents of the minidump directory. The number of times stacks are collected and the
sleep time between the collections are controlled by --stacks argument."""
stacks_count, stacks_interval_secs = self.args.stacks
if stacks_count <= 0 or stacks_interval_secs < 0:
return
# Skip jstack collection if the jstack binary does not exist.
skip_jstacks = not os.path.exists(self.jstack_cmd)
if skip_jstacks:
logging.info("Skipping jstack collection since jstack binary couldn't be located.")
# Fallback to breakpad minidump collection if pstack binaries are missing.
fallback_to_minidump = False
if not self.pstack_cmd:
# Fall back to collecting a minidump if pstack is not installed.
if not os.path.exists(self.minidump_search_path):
logging.info("Skipping pstacks since pstack binary couldn't be located. Provide "
+ "--minidumps_dir for collecting minidumps instead.")
# At this point, we can't proceed since we have nothing to collect.
if skip_jstacks:
return
else:
fallback_to_minidump = True;
logging.info("Collecting breakpad minidumps since pstack/gdb binaries are " +
"missing.")
stacks_dir = os.path.join(self.collection_root_dir, "stacks")
# Populate the commands to run in 'cmds_to_run' depending on what kinds of thread
# stacks to collect. Each entry is a tuple of form
# (Command, stdout_prefix, is_minidump). 'is_minidump' tells whether the command
# is trying to trigger a minidump collection.
cmds_to_run = []
if not skip_jstacks:
cmd_args = [self.jstack_cmd, str(self.args.pid)]
cmds_to_run.append((Command(cmd_args, self.args.timeout), "jstack", False))
# Collect mixed-mode jstack, contains native stack frames.
cmd_args_mixed_mode = [self.jstack_cmd, "-m", str(self.args.pid)]
cmds_to_run.append(
(Command(cmd_args_mixed_mode, self.args.timeout), "jstack-m", False))
if fallback_to_minidump:
cmd_args = ["kill", "-SIGUSR1", str(self.args.pid)]
cmds_to_run.append((Command(cmd_args, self.args.timeout), None, True))
elif self.pstack_cmd:
cmd_args = [self.pstack_cmd, str(self.args.pid)]
cmds_to_run.append((Command(cmd_args, self.args.timeout), "pstack", False))
collection_start_ts = time.time()
for i in xrange(stacks_count):
for cmd, file_prefix, is_minidump in cmds_to_run:
if file_prefix:
stdout_file = os.path.join(stacks_dir, file_prefix + "-" + str(i) + ".txt")
with open(stdout_file, "w") as output:
cmd.run(cmd_stdout=output)
else:
cmd.run()
# Incase of minidump collection, wait for it to be written.
if is_minidump:
self.wait_for_minidump()
time.sleep(stacks_interval_secs)
# Copy minidumps if required.
if fallback_to_minidump:
minidump_out_dir = os.path.join(self.collection_root_dir, "minidumps")
self.copy_minidumps(minidump_out_dir, collection_start_ts);
def collect_minidumps(self):
"""Collects minidumps on the Impala process based on argument --minidumps. The
minidumps are collected by sending a SIGUSR1 signal to the Impala process and then
the resulting minidumps are copied to the target directory."""
minidump_count, minidump_interval_secs = self.args.minidumps
if minidump_count <= 0 or minidump_interval_secs < 0:
return
# Impala process writes a minidump when it encounters a SIGUSR1.
cmd_args = ["kill", "-SIGUSR1", str(self.args.pid)]
cmd = Command(cmd_args, self.args.timeout)
collection_start_ts = time.time()
for i in xrange(minidump_count):
cmd.run()
self.wait_for_minidump()
time.sleep(minidump_interval_secs)
out_dir = os.path.join(self.collection_root_dir, "minidumps")
self.copy_minidumps(out_dir, collection_start_ts);
def copy_minidumps(self, target, start_ts):
"""Copies mindumps with create time >= start_ts to 'target' directory."""
logging.info("Copying minidumps from %s to %s with ctime >= %s"
% (self.minidump_search_path, target, start_ts))
for filename in glob.glob(os.path.join(self.minidump_search_path, "*.dmp")):
try:
minidump_ctime = self.get_minidump_create_timestamp(filename)
if minidump_ctime >= math.floor(start_ts):
shutil.copy2(filename, target)
else:
logging.info("Ignored mindump: %s ctime: %s" % (filename, minidump_ctime))
except Exception:
logging.exception("Error processing minidump at path: %s. Skipping it." % filename)
def collect_java_heapdump(self):
"""Generates the Java heap dump of the Impala process using the 'jmap' command."""
if not self.args.jmap:
return
jmap_dir = os.path.join(self.collection_root_dir, "jmaps")
out_file = os.path.join(jmap_dir, self.target_process_name + "_heap.bin")
# jmap command requires it to be run as the process owner.
# Command: jmap -dump:format=b,file=<outfile> <pid>
cmd_args = [self.jmap_cmd, "-dump:format=b,file=" + out_file, str(self.args.pid)]
Command(cmd_args, self.args.timeout).run()
def collect_native_coredump(self):
"""Generates the core dump of the Impala process using the 'gcore' command"""
if not self.args.gcore:
return
# Command: gcore -o <outfile> <pid>
gcore_dir = os.path.join(self.collection_root_dir, "gcores")
out_file_name = self.target_process_name + "-" +\
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + ".core"
out_file = os.path.join(gcore_dir, out_file_name)
cmd_args = [self.gcore_cmd, "-o", out_file, str(self.args.pid)]
Command(cmd_args, self.args.timeout).run()
def collect_query_profiles(self):
"""Collects Impala query profiles from --profiles_dir. Enforces an uncompressed limit
of --profiles_max_size_limit bytes on the copied profile logs."""
if not self.args.profiles_dir:
return
out_dir = os.path.join(self.collection_root_dir, "profiles")
# Hardcoded in Impala
PROFILE_LOG_FILE_PATTERN = "impala_profile_log_1.1-*";
logging.info("Collecting profile data, limiting size to %f GB" %
(self.args.profiles_max_size_limit/(1024 * 1024 * 1024)))
profiles_path = os.path.join(self.args.profiles_dir, PROFILE_LOG_FILE_PATTERN)
# Sort the profiles by creation time and copy the most recent ones in that order.
sorted_profiles =\
sorted(glob.iglob(profiles_path), key=os.path.getctime, reverse=True)
profile_size_included_so_far = 0
for profile_path in sorted_profiles:
try:
file_size = os.path.getsize(profile_path)
if file_size == 0:
continue
if profile_size_included_so_far + file_size > self.args.profiles_max_size_limit:
# Copying the whole file violates profiles_max_size_limit. Copy a part of it.
# Profile logs are newline delimited with a single profile per line.
num_bytes_to_copy =\
self.args.profiles_max_size_limit - profile_size_included_so_far
file_name = os.path.basename(profile_path)
copied_bytes = 0
with open(profile_path, "rb") as in_file:
with open(os.path.join(out_dir, file_name), "wb") as out_file:
for line in in_file.readlines():
if copied_bytes + len(line) > num_bytes_to_copy:
break
out_file.write(line)
copied_bytes += len(line)
return
profile_size_included_so_far += file_size
shutil.copy2(profile_path, out_dir)
except:
logging.exception("Encountered an error while collecting profile %s. Skipping it."
% profile_path)
def collect_shared_libs(self):
"""Collects shared libraries loaded by the target Impala process."""
# Shared libs are collected if either of core dump or minidumps are enabled.
if not (self.args.gcore or self.args.minidumps_dir):
return
# If gdb binary is missing, we cannot extract the shared library list
if not self.gdb_cmd:
logging.info("'gdb' executable missing. Skipping shared library collection.")
return
out_dir = os.path.join(self.collection_root_dir, "shared_libs")
script_path = os.path.join(self.script_dir, "collect_shared_libs.sh")
cmd_args = [script_path, self.gdb_cmd, str(self.args.pid), out_dir]
Command(cmd_args, self.args.timeout).run()
def archive_diagnostics(self):
"""Creates a gztar of the collected diagnostics and cleans up the original
directory. Returns True if successful, False otherwise."""
try:
# tarfile does not support context managers in python 2.6. We use closing() to work
# around that.
with closing(tarfile.open(self.collection_root_dir + '.tar.gz', mode='w:gz')) as\
archive:
# collection_root_dir is an absoulte path. There is no point in preserving its
# entire directory structure in the archive, so set the arcname accordingly.
archive.add(self.collection_root_dir,
arcname=os.path.basename(self.collection_root_dir))
return True
except Exception:
logging.exception("Encountered an exception archiving diagnostics, cleaning up.")
return False
finally:
self.cleanup()
def cleanup(self):
"""Cleans up the directory to which diagnostics were written."""
shutil.rmtree(self.collection_root_dir, ignore_errors=True)
def get_diagnostics(self):
"""Calls all collect_*() methods to collect diagnostics. Returns True if no errors
were encountered during diagnostics collection, False otherwise."""
if not self.validate_args():
return False
logging.info("Using JAVA_HOME: %s" % self.java_home)
self.create_output_dir_structure()
logging.info("Free disk space: %.2fGB" %
self.get_free_disk_space_gbs(self.collection_root_dir))
os.chdir(self.args.output_dir)
collection_methods = [self.collect_shared_libs, self.collect_query_profiles,
self.collect_native_coredump, self.collect_java_heapdump, self.collect_minidumps,
self.collect_thread_stacks]
exception_encountered = False
for method in collection_methods:
try:
method()
except IOError as e:
if e.errno == errno.ENOSPC:
# Clean up and abort if we are low on disk space. Other IOErrors are logged and
# ignored.
logging.exception("Disk space low, aborting.")
self.cleanup()
return False
logging.exception("Encountered an IOError calling: %s" % method.__name__)
exception_encountered = True
except Exception:
exception_encountered = True
logging.exception("Encountered an exception calling: %s" % method.__name__)
if exception_encountered:
logging.error("Encountered an exception collecting diagnostics. Final output " +
"could be partial.\n")
# Archive the directory, even if it is partial.
archive_path = self.collection_root_dir + ".tar.gz"
logging.info("Archiving diagnostics to path: %s" % archive_path)
if self.archive_diagnostics():
logging.info("Diagnostics collected at path: %s" % archive_path)
return not exception_encountered
def get_args_parser():
"""Creates the argument parser and adds the flags"""
parser = argparse.ArgumentParser(
description="Impala diagnostics collection",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--pid", required=True, action="store", dest="pid", type=int,
default=0, help="PID of the Impala process for which to collect diagnostics.")
parser.add_argument("--java_home", action="store", dest="java_home", default="",
help="If not set, it is set to the JAVA_HOME from the pid's environment.")
parser.add_argument("--timeout", action="store", dest="timeout", default=300,
type=int, help="Timeout (in seconds) for each of the diagnostics commands")
parser.add_argument("--stacks", action="store", dest="stacks", nargs=2, type=int,
default=[0, 0], metavar=("COUNT", "INTERVAL (in seconds)"),
help="Collect jstack, mixed-mode jstack and pstacks of the Impala process.\
Breakpad minidumps are collected in case of missing pstack binaries.")
parser.add_argument("--jmap", action="store_true", dest="jmap", default=False,
help="Collect heap dump of the Java process")
parser.add_argument("--gcore", action="store_true", dest="gcore", default=False,
help="Collect the native core dump using gdb. Requires gdb to be installed.")
parser.add_argument("--minidumps", action="store", dest="minidumps", type=int,
nargs=2, default=[0, 0], metavar=("COUNT", "INTERVAL (in seconds)"),
help="Collect breakpad minidumps for the Impala process. Requires --minidumps_dir\
be set.")
parser.add_argument("--minidumps_dir", action="store", dest="minidumps_dir", default="",
help="Path of the directory to which Impala process' minidumps are written. Looks\
for minidumps in this path's subdirectory that is named after the target process\
name.")
parser.add_argument("--profiles_dir", action="store", dest="profiles_dir", default="",
help="Path of the profiles directory to be included in the diagnostics output.")
parser.add_argument("--profiles_max_size_limit", action="store",
dest="profiles_max_size_limit", default=3 * 1024 * 1024 * 1024, type=float,
help="Uncompressed limit (in Bytes) on profile logs collected from --profiles_dir.")
parser.add_argument("--output_dir", action="store", dest="output_dir",
default = tempfile.gettempdir(), help="Output directory that contains the final "
"diagnostics data. Defaults to %s" % tempfile.gettempdir())
return parser
if __name__ == "__main__":
parser = get_args_parser()
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, datefmt="%Y-%m-%d %H:%M:%S",
format="%(asctime)s %(levelname)-8s %(message)s")
diagnostics_handler = ImpalaDiagnosticsHandler(parser.parse_args())
logging.info("Running as user: %s" % getpass.getuser())
logging.info("Input args: %s" % " ".join(sys.argv))
sys.exit(0 if diagnostics_handler.get_diagnostics() else 1)
| 9,343 |
310 | {
"name": "Z400",
"description": "A PC tower computer.",
"url": "https://www.amazon.com/HP-Z400-Workstation-Processor-Operating/dp/B006LAN2CG"
} | 59 |
11,356 | <reponame>Bpowers4/turicreate
#define BOOST_TEST_MODULE
#include <boost/test/unit_test.hpp>
#include <core/util/test_macros.hpp>
#include <core/storage/query_engine/execution/execution_node.hpp>
#include <core/storage/query_engine/operators/sarray_source.hpp>
#include <core/storage/query_engine/operators/logical_filter.hpp>
#include <core/storage/sframe_data/sarray.hpp>
#include <core/storage/sframe_data/algorithm.hpp>
#include "check_node.hpp"
using namespace turi;
using namespace turi::query_eval;
struct logical_filter_test {
public:
void test_filter_empty_array() {
auto data_sa = std::make_shared<sarray<flexible_type>>();
data_sa->open_for_write();
data_sa->close();
auto filter_sa = std::make_shared<sarray<flexible_type>>();
filter_sa->open_for_write();
filter_sa->close();
std::vector<flexible_type> expected;
auto node = make_node(op_sarray_source(data_sa), op_sarray_source(filter_sa));
check_node(node, expected);
}
void test_filter_none() {
auto data_sa = get_data_sarray();
std::vector<flexible_type> filter(data_sa->size(), 0);
auto filter_sa = std::make_shared<sarray<flexible_type>>();
filter_sa->open_for_write();
turi::copy(filter.begin(), filter.end(), *filter_sa);
filter_sa->close();
std::vector<flexible_type> expected;
auto node = make_node(op_sarray_source(data_sa), op_sarray_source(filter_sa));
check_node(node, expected);
}
void test_filter_all() {
auto data_sa = get_data_sarray();
std::vector<flexible_type> filter(data_sa->size(), 1);
auto filter_sa = std::make_shared<sarray<flexible_type>>();
filter_sa->open_for_write();
turi::copy(filter.begin(), filter.end(), *filter_sa);
filter_sa->close();
std::vector<flexible_type> expected;
data_sa->get_reader()->read_rows(0, data_sa->size(), expected);
auto node = make_node(op_sarray_source(data_sa), op_sarray_source(filter_sa));
check_node(node, expected);
}
void test_filter_even() {
auto data_sa = get_data_sarray();
std::vector<flexible_type> filter;
for (size_t i = 0; i < data_sa->size(); i++) {
if (i % 2 == 0) {
filter.push_back(0);
} else {
filter.push_back(1);
}
}
auto filter_sa = std::make_shared<sarray<flexible_type>>();
filter_sa->open_for_write();
turi::copy(filter.begin(), filter.end(), *filter_sa);
filter_sa->close();
std::vector<flexible_type> data;
data_sa->get_reader()->read_rows(0, data_sa->size(), data);
std::vector<flexible_type> expected;
for (size_t i =0 ; i < data.size(); ++i) {
if (filter[i]) {
expected.push_back(data[i]);
}
}
auto node = make_node(op_sarray_source(data_sa), op_sarray_source(filter_sa));
check_node(node, expected);
}
std::shared_ptr<sarray<flexible_type>> get_data_sarray() {
std::vector<flexible_type> data{0,1,2,3,4,5};
auto sa = std::make_shared<sarray<flexible_type>>();
sa->open_for_write();
turi::copy(data.begin(), data.end(), *sa);
sa->close();
return sa;
}
template <typename Source>
std::shared_ptr<execution_node> make_node(const Source& source_left, const Source& source_right) {
auto left_node = std::make_shared<execution_node>(std::make_shared<Source>(source_left));
auto right_node = std::make_shared<execution_node>(std::make_shared<Source>(source_right));
auto node = std::make_shared<execution_node>(std::make_shared<op_logical_filter>(),
std::vector<std::shared_ptr<execution_node>>({left_node, right_node}));
return node;
}
};
BOOST_FIXTURE_TEST_SUITE(_logical_filter_test, logical_filter_test)
BOOST_AUTO_TEST_CASE(test_filter_empty_array) {
logical_filter_test::test_filter_empty_array();
}
BOOST_AUTO_TEST_CASE(test_filter_none) {
logical_filter_test::test_filter_none();
}
BOOST_AUTO_TEST_CASE(test_filter_all) {
logical_filter_test::test_filter_all();
}
BOOST_AUTO_TEST_CASE(test_filter_even) {
logical_filter_test::test_filter_even();
}
BOOST_AUTO_TEST_SUITE_END()
| 1,685 |
5,169 | {
"name": "MIRToolTip",
"version": "0.1.1",
"summary": "A convenience cocoa pod that extends view controller class that displays a tooltip.",
"description": "\"A convenience cocoa pod that extends view controller class for presenting customizable tooltip messages.\"",
"homepage": "https://github.com/mir-taqi/MIRToolTip",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"mir-taqi": "<EMAIL>"
},
"source": {
"git": "https://github.com/mir-taqi/MIRToolTip.git",
"tag": "0.1.1"
},
"platforms": {
"ios": "8.0"
},
"source_files": [
"MIRToolTip/Classes/*.h",
"MIRToolTip/Classes/*.m"
],
"frameworks": "UIKit"
}
| 272 |
581 | <reponame>dokime7/flask-jwt
"""
Flask-JWT
=========
Flask-JWT is a Flask extension that adds basic Json Web Token features to any application.
Resources
---------
* `Documentation <http://packages.python.org/Flask-JWT/>`_
* `Issue Tracker <https://github.com/mattupstate/flask-jwt/issues>`_
* `Source <https://github.com/mattupstate/flask-jwt>`_
* `Development Version
<https://github.com/mattupstate/flask-jwt/raw/develop#egg=Flask-JWT-dev>`_
"""
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def get_requirements(suffix=''):
with open('requirements%s.txt' % suffix) as f:
rv = f.read().splitlines()
return rv
def get_long_description():
with open('README.rst') as f:
rv = f.read()
return rv
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = [
'-xrs',
'--cov', 'flask_jwt',
'--cov-report', 'term-missing',
'--pep8',
'--flakes',
'--clearcache',
'tests'
]
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='Flask-JWT',
version='0.3.2',
url='https://github.com/mattupstate/flask-jwt',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='JWT token authentication for Flask apps',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=get_requirements(),
tests_require=get_requirements('-dev'),
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 920 |
575 | <gh_stars>100-1000
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_NG_MATHML_NG_MATH_ROW_LAYOUT_ALGORITHM_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_NG_MATHML_NG_MATH_ROW_LAYOUT_ALGORITHM_H_
#include "third_party/blink/renderer/core/layout/ng/ng_block_break_token.h"
#include "third_party/blink/renderer/core/layout/ng/ng_block_node.h"
#include "third_party/blink/renderer/core/layout/ng/ng_box_fragment_builder.h"
#include "third_party/blink/renderer/core/layout/ng/ng_layout_algorithm.h"
namespace blink {
class LayoutUnit;
class CORE_EXPORT NGMathRowLayoutAlgorithm
: public NGLayoutAlgorithm<NGBlockNode,
NGBoxFragmentBuilder,
NGBlockBreakToken> {
public:
explicit NGMathRowLayoutAlgorithm(const NGLayoutAlgorithmParams& params);
struct ChildWithOffsetAndMargins {
DISALLOW_NEW();
ChildWithOffsetAndMargins(const NGBlockNode& child,
const NGBoxStrut& margins,
LogicalOffset offset,
scoped_refptr<const NGPhysicalFragment> fragment)
: child(child),
margins(margins),
offset(offset),
fragment(std::move(fragment)) {}
NGBlockNode child;
NGBoxStrut margins;
LogicalOffset offset;
scoped_refptr<const NGPhysicalFragment> fragment;
};
typedef Vector<ChildWithOffsetAndMargins, 4> ChildrenVector;
private:
scoped_refptr<const NGLayoutResult> Layout() final;
MinMaxSizesResult ComputeMinMaxSizes(
const MinMaxSizesFloatInput&) const final;
void LayoutRowItems(ChildrenVector*,
LayoutUnit* max_row_block_baseline,
LogicalSize* row_total_size);
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_NG_MATHML_NG_MATH_ROW_LAYOUT_ALGORITHM_H_
| 897 |
14,668 | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FUCHSIA_CDM_FUCHSIA_DECRYPTOR_H_
#define MEDIA_FUCHSIA_CDM_FUCHSIA_DECRYPTOR_H_
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "base/task/single_thread_task_runner.h"
#include "base/thread_annotations.h"
#include "media/base/decryptor.h"
#include "media/fuchsia/cdm/fuchsia_stream_decryptor.h"
namespace media {
class FuchsiaCdmContext;
class FuchsiaDecryptor : public Decryptor {
public:
// Caller should make sure |cdm| lives longer than this class.
explicit FuchsiaDecryptor(FuchsiaCdmContext* cdm_context);
FuchsiaDecryptor(const FuchsiaDecryptor&) = delete;
FuchsiaDecryptor& operator=(const FuchsiaDecryptor&) = delete;
~FuchsiaDecryptor() override;
// media::Decryptor implementation:
void Decrypt(StreamType stream_type,
scoped_refptr<DecoderBuffer> encrypted,
DecryptCB decrypt_cb) override;
void CancelDecrypt(StreamType stream_type) override;
void InitializeAudioDecoder(const AudioDecoderConfig& config,
DecoderInitCB init_cb) override;
void InitializeVideoDecoder(const VideoDecoderConfig& config,
DecoderInitCB init_cb) override;
void DecryptAndDecodeAudio(scoped_refptr<DecoderBuffer> encrypted,
AudioDecodeCB audio_decode_cb) override;
void DecryptAndDecodeVideo(scoped_refptr<DecoderBuffer> encrypted,
VideoDecodeCB video_decode_cb) override;
void ResetDecoder(StreamType stream_type) override;
void DeinitializeDecoder(StreamType stream_type) override;
bool CanAlwaysDecrypt() override;
private:
FuchsiaCdmContext* const cdm_context_;
// TaskRunner for the thread on which |audio_decryptor_| was created.
scoped_refptr<base::SingleThreadTaskRunner> audio_decryptor_task_runner_;
};
} // namespace media
#endif // MEDIA_FUCHSIA_CDM_FUCHSIA_DECRYPTOR_H_
| 799 |
1,085 | <filename>sabot/kernel/src/main/java/com/dremio/exec/store/sys/CacheManagerStatsProvider.java<gh_stars>1000+
/*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.exec.store.sys;
import java.util.List;
import org.rocksdb.RocksIterator;
import com.dremio.exec.work.CacheManagerDatasetInfo;
import com.dremio.exec.work.CacheManagerFilesInfo;
import com.dremio.exec.work.CacheManagerMountPointInfo;
import com.dremio.exec.work.CacheManagerStoragePluginInfo;
/**
* This interface provides way to get the data for the below system tables.
* sys."cache_manager_mount_points", sys."cache_manager_storage_plugins",
* sys."cache_manager_datasets" and sys."cache_manager_files"
*
* Due to the lazy instantiation of cache manager, the handle to cache manager instance is
* buried in the CacheFileSystemCreator object. Sabot context does not have handle to the cache manager
* instance but has the handle to file system creator. At the system table iterator creation,
* we have handle only to the sabot context. So need to have the below interface implemented by
* file system creator and get the cache manager statistics.
*/
public interface CacheManagerStatsProvider {
/**
* Retrieves the statistics about the mount points configured for the executor node.
* @return
*/
List<CacheManagerMountPointInfo> getMountPointStats();
/**
* Retrieves the statistics about the storage plugins cached by cache manager.
* @return
*/
List<CacheManagerStoragePluginInfo> getStoragePluginStats();
/**
* Gets the iterator for the datasets information in the cache manager.
* @return
*/
RocksIterator getDatasetIterator();
/**
* Retrieves the statistics about the datasets cached in the executor node.
* @param dsIterator
* @return
*/
List<CacheManagerDatasetInfo> getDatasetStats(RocksIterator dsIterator);
/**
* Gets the iterator for the cached files in the executor node.
* @return
*/
RocksIterator getCachedFilesIterator();
/**
* Retrives the statistics about the cached files in the executor node.
* @param fileIterator
* @return
*/
List<CacheManagerFilesInfo> getCachedFilesStats(RocksIterator fileIterator);
}
| 776 |
461 | #include "common.h"
#include <amdev.h>
// implement some function of device
size_t serial_write(const void *buf, size_t offset, size_t len) {
for(int i=0;i<len;i++)
_putc(((char *)buf)[i]);
return len;
}
#define NAME(key) \
[_KEY_##key] = #key,
static const char *keyname[256] __attribute__((used)) = {
[_KEY_NONE] = "NONE",
_KEYS(NAME)
};
size_t events_read(void *buf, size_t offset, size_t len) {
int key=read_key();
int down=0;
if(key&0x8000){
key^=0x8000;
down=1;
}
if(key!=_KEY_NONE){
len=sprintf(buf,"%s %s\n",down ?"kd":"ku",keyname[key]);
}
else{
int time=uptime();
len=sprintf(buf,"t %d\n",time);
}
return len;
}
static char dispinfo[128] __attribute__((used)) = {};
size_t dispinfo_read(void *buf, size_t offset, size_t len) {
len=sprintf(buf,dispinfo+offset);
return len;
}
size_t fb_write(const void *buf, size_t offset, size_t len) {
int x=(offset/4)%screen_width();
int y=(offset/4)/screen_width();
draw_rect((uint32_t*)buf, x, y, len / 4, 1);
return len;
}
size_t fbsync_write(const void *buf, size_t offset, size_t len) {
draw_sync();
return len;
}
void init_device() {
Log("Initializing devices...");
_ioe_init();
sprintf(dispinfo,"WIDTH:%d\nHEIGHT:%d\n",screen_width(),screen_height());
}
| 565 |
892 | <reponame>github/advisory-database
{
"schema_version": "1.2.0",
"id": "GHSA-29m2-93j9-hrcp",
"modified": "2022-05-13T01:53:16Z",
"published": "2022-05-13T01:53:16Z",
"aliases": [
"CVE-2018-6977"
],
"details": "VMware ESXi (6.7, 6.5, 6.0), Workstation (15.x and 14.x) and Fusion (11.x and 10.x) contain a denial-of-service vulnerability due to an infinite loop in a 3D-rendering shader. Successfully exploiting this issue may allow an attacker with normal user privileges in the guest to make the VM unresponsive, and in some cases, possibly result other VMs on the host or the host itself becoming unresponsive.",
"severity": [
{
"type": "CVSS_V3",
"score": "CVSS:3.0/AV:L/AC:L/PR:L/UI:N/S:C/C:N/I:N/A:H"
}
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2018-6977"
},
{
"type": "WEB",
"url": "https://www.vmware.com/security/advisories/VMSA-2018-0025.html"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/105549"
},
{
"type": "WEB",
"url": "http://www.securitytracker.com/id/1041821"
},
{
"type": "WEB",
"url": "http://www.securitytracker.com/id/1041822"
}
],
"database_specific": {
"cwe_ids": [
"CWE-835"
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 652 |
345 | <gh_stars>100-1000
/*
* Copyright 2016 HuntBugs contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package one.util.huntbugs.testdata;
import java.io.File;
import java.nio.file.FileSystems;
import one.util.huntbugs.registry.anno.AssertNoWarning;
import one.util.huntbugs.registry.anno.AssertWarning;
/**
* @author <NAME>
*
*/
public class TestRegexProblems {
@AssertWarning("RegexBadSyntax")
public void testBadRegex(String test) {
if(test.matches("***")) {
System.out.println("Matches");
}
}
@AssertWarning("RegexUnintended")
public void testPipe(String test) {
for(String part : test.split("|")) {
System.out.println(part);
}
}
@AssertWarning("RegexUnintended")
public void testDot(String test) {
for(String part : test.split(".")) {
System.out.println(part);
}
}
@AssertNoWarning("RegexUnintended")
public String testDotReplace(String test) {
return test.replaceAll(".", " ");
}
@AssertWarning("RegexUnintended")
public String testDotReplaceFirst(String test) {
return test.replaceFirst(".", " ");
}
@AssertWarning("RegexFileSeparator")
public String[] testFileSeparator(String test) {
return test.split(File.separator);
}
@AssertWarning("RegexFileSeparator")
public String[] testFileSeparator2(String test) {
return test.split(FileSystems.getDefault().getSeparator());
}
}
| 754 |
6,989 | from warnings import warn
warn("IPython.utils.localinterfaces has moved to jupyter_client.localinterfaces")
from jupyter_client.localinterfaces import *
| 46 |
32,544 | package com.baeldung.map.entry;
import java.util.HashMap;
import java.util.Map;
public class MapEntryEfficiencyExample {
public static void main(String[] args) {
MapEntryEfficiencyExample mapEntryEfficiencyExample = new MapEntryEfficiencyExample();
Map<String, String> map = new HashMap<>();
map.put("<NAME>", "Clean Code");
map.put("<NAME>", "Effective Java");
System.out.println("Iterating Using Map.KeySet - 2 operations");
mapEntryEfficiencyExample.usingKeySet(map);
System.out.println("Iterating Using Map.Entry - 1 operation");
mapEntryEfficiencyExample.usingEntrySet(map);
}
public void usingKeySet(Map<String, String> bookMap) {
for (String key : bookMap.keySet()) {
System.out.println("key: " + key + " value: " + bookMap.get(key));
}
}
public void usingEntrySet(Map<String, String> bookMap) {
for (Map.Entry<String, String> book: bookMap.entrySet()) {
System.out.println("key: " + book.getKey() + " value: " + book.getValue());
}
}
}
| 428 |
651 | <gh_stars>100-1000
/* -----------------------------------------------------------------------
ffi.c - Copyright (c) 2015 <NAME> <<EMAIL>>
2015 <NAME> <<EMAIL>>
2018 <NAME> <<EMAIL>>
Based on MIPS N32/64 port
RISC-V Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
``Software''), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
#include <ffi.h>
#include <ffi_common.h>
#include <stdlib.h>
#include <stdint.h>
#if __riscv_float_abi_double
#define ABI_FLEN 64
#define ABI_FLOAT double
#elif __riscv_float_abi_single
#define ABI_FLEN 32
#define ABI_FLOAT float
#endif
#define NARGREG 8
#define STKALIGN 16
#define MAXCOPYARG (2 * sizeof(double))
typedef struct call_context
{
#if ABI_FLEN
ABI_FLOAT fa[8];
#endif
size_t a[8];
/* used by the assembly code to in-place construct its own stack frame */
char frame[16];
} call_context;
typedef struct call_builder
{
call_context *aregs;
int used_integer;
int used_float;
size_t *used_stack;
} call_builder;
/* integer (not pointer) less than ABI XLEN */
/* FFI_TYPE_INT does not appear to be used */
#if __SIZEOF_POINTER__ == 8
#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64)
#else
#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32)
#endif
#if ABI_FLEN
typedef struct {
char as_elements, type1, offset2, type2;
} float_struct_info;
#if ABI_FLEN >= 64
#define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE)
#else
#define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT)
#endif
static ffi_type **flatten_struct(ffi_type *in, ffi_type **out, ffi_type **out_end) {
int i;
if (out == out_end) return out;
if (in->type != FFI_TYPE_STRUCT) {
*(out++) = in;
} else {
for (i = 0; in->elements[i]; i++)
out = flatten_struct(in->elements[i], out, out_end);
}
return out;
}
/* Structs with at most two fields after flattening, one of which is of
floating point type, are passed in multiple registers if sufficient
registers are available. */
static float_struct_info struct_passed_as_elements(call_builder *cb, ffi_type *top) {
float_struct_info ret = {0, 0, 0, 0};
ffi_type *fields[3];
int num_floats, num_ints;
int num_fields = flatten_struct(top, fields, fields + 3) - fields;
if (num_fields == 1) {
if (IS_FLOAT(fields[0]->type)) {
ret.as_elements = 1;
ret.type1 = fields[0]->type;
}
} else if (num_fields == 2) {
num_floats = IS_FLOAT(fields[0]->type) + IS_FLOAT(fields[1]->type);
num_ints = IS_INT(fields[0]->type) + IS_INT(fields[1]->type);
if (num_floats == 0 || num_floats + num_ints != 2)
return ret;
if (cb->used_float + num_floats > NARGREG || cb->used_integer + (2 - num_floats) > NARGREG)
return ret;
if (!IS_FLOAT(fields[0]->type) && !IS_FLOAT(fields[1]->type))
return ret;
ret.type1 = fields[0]->type;
ret.type2 = fields[1]->type;
ret.offset2 = FFI_ALIGN(fields[0]->size, fields[1]->alignment);
ret.as_elements = 1;
}
return ret;
}
#endif
/* allocates a single register, float register, or XLEN-sized stack slot to a datum */
static void marshal_atom(call_builder *cb, int type, void *data) {
size_t value = 0;
switch (type) {
case FFI_TYPE_UINT8: value = *(uint8_t *)data; break;
case FFI_TYPE_SINT8: value = *(int8_t *)data; break;
case FFI_TYPE_UINT16: value = *(uint16_t *)data; break;
case FFI_TYPE_SINT16: value = *(int16_t *)data; break;
/* 32-bit quantities are always sign-extended in the ABI */
case FFI_TYPE_UINT32: value = *(int32_t *)data; break;
case FFI_TYPE_SINT32: value = *(int32_t *)data; break;
#if __SIZEOF_POINTER__ == 8
case FFI_TYPE_UINT64: value = *(uint64_t *)data; break;
case FFI_TYPE_SINT64: value = *(int64_t *)data; break;
#endif
case FFI_TYPE_POINTER: value = *(size_t *)data; break;
/* float values may be recoded in an implementation-defined way
by hardware conforming to 2.1 or earlier, so use asm to
reinterpret floats as doubles */
#if ABI_FLEN >= 32
case FFI_TYPE_FLOAT:
asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(float *)data));
return;
#endif
#if ABI_FLEN >= 64
case FFI_TYPE_DOUBLE:
asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(double *)data));
return;
#endif
default: FFI_ASSERT(0); break;
}
if (cb->used_integer == NARGREG) {
*cb->used_stack++ = value;
} else {
cb->aregs->a[cb->used_integer++] = value;
}
}
static void unmarshal_atom(call_builder *cb, int type, void *data) {
size_t value;
switch (type) {
#if ABI_FLEN >= 32
case FFI_TYPE_FLOAT:
asm("" : "=f"(*(float *)data) : "0"(cb->aregs->fa[cb->used_float++]));
return;
#endif
#if ABI_FLEN >= 64
case FFI_TYPE_DOUBLE:
asm("" : "=f"(*(double *)data) : "0"(cb->aregs->fa[cb->used_float++]));
return;
#endif
}
if (cb->used_integer == NARGREG) {
value = *cb->used_stack++;
} else {
value = cb->aregs->a[cb->used_integer++];
}
switch (type) {
case FFI_TYPE_UINT8: *(uint8_t *)data = value; break;
case FFI_TYPE_SINT8: *(uint8_t *)data = value; break;
case FFI_TYPE_UINT16: *(uint16_t *)data = value; break;
case FFI_TYPE_SINT16: *(uint16_t *)data = value; break;
case FFI_TYPE_UINT32: *(uint32_t *)data = value; break;
case FFI_TYPE_SINT32: *(uint32_t *)data = value; break;
#if __SIZEOF_POINTER__ == 8
case FFI_TYPE_UINT64: *(uint64_t *)data = value; break;
case FFI_TYPE_SINT64: *(uint64_t *)data = value; break;
#endif
case FFI_TYPE_POINTER: *(size_t *)data = value; break;
default: FFI_ASSERT(0); break;
}
}
/* adds an argument to a call, or a not by reference return value */
static void marshal(call_builder *cb, ffi_type *type, int var, void *data) {
size_t realign[2];
#if ABI_FLEN
if (!var && type->type == FFI_TYPE_STRUCT) {
float_struct_info fsi = struct_passed_as_elements(cb, type);
if (fsi.as_elements) {
marshal_atom(cb, fsi.type1, data);
if (fsi.offset2)
marshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2);
return;
}
}
if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) {
marshal_atom(cb, type->type, data);
return;
}
#endif
if (type->size > 2 * __SIZEOF_POINTER__) {
/* pass by reference */
marshal_atom(cb, FFI_TYPE_POINTER, &data);
} else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) {
marshal_atom(cb, type->type, data);
} else {
/* overlong integers, soft-float floats, and structs without special
float handling are treated identically from this point on */
/* variadics are aligned even in registers */
if (type->alignment > __SIZEOF_POINTER__) {
if (var)
cb->used_integer = FFI_ALIGN(cb->used_integer, 2);
cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__);
}
memcpy(realign, data, type->size);
if (type->size > 0)
marshal_atom(cb, FFI_TYPE_POINTER, realign);
if (type->size > __SIZEOF_POINTER__)
marshal_atom(cb, FFI_TYPE_POINTER, realign + 1);
}
}
/* for arguments passed by reference returns the pointer, otherwise the arg is copied (up to MAXCOPYARG bytes) */
static void *unmarshal(call_builder *cb, ffi_type *type, int var, void *data) {
size_t realign[2];
void *pointer;
#if ABI_FLEN
if (!var && type->type == FFI_TYPE_STRUCT) {
float_struct_info fsi = struct_passed_as_elements(cb, type);
if (fsi.as_elements) {
unmarshal_atom(cb, fsi.type1, data);
if (fsi.offset2)
unmarshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2);
return data;
}
}
if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) {
unmarshal_atom(cb, type->type, data);
return data;
}
#endif
if (type->size > 2 * __SIZEOF_POINTER__) {
/* pass by reference */
unmarshal_atom(cb, FFI_TYPE_POINTER, (char*)&pointer);
return pointer;
} else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) {
unmarshal_atom(cb, type->type, data);
return data;
} else {
/* overlong integers, soft-float floats, and structs without special
float handling are treated identically from this point on */
/* variadics are aligned even in registers */
if (type->alignment > __SIZEOF_POINTER__) {
if (var)
cb->used_integer = FFI_ALIGN(cb->used_integer, 2);
cb->used_stack = (size_t *)FFI_ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__);
}
if (type->size > 0)
unmarshal_atom(cb, FFI_TYPE_POINTER, realign);
if (type->size > __SIZEOF_POINTER__)
unmarshal_atom(cb, FFI_TYPE_POINTER, realign + 1);
memcpy(data, realign, type->size);
return data;
}
}
static int passed_by_ref(call_builder *cb, ffi_type *type, int var) {
#if ABI_FLEN
if (!var && type->type == FFI_TYPE_STRUCT) {
float_struct_info fsi = struct_passed_as_elements(cb, type);
if (fsi.as_elements) return 0;
}
#endif
return type->size > 2 * __SIZEOF_POINTER__;
}
/* Perform machine dependent cif processing */
ffi_status ffi_prep_cif_machdep(ffi_cif *cif) {
cif->riscv_nfixedargs = cif->nargs;
return FFI_OK;
}
/* Perform machine dependent cif processing when we have a variadic function */
ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) {
cif->riscv_nfixedargs = nfixedargs;
return FFI_OK;
}
/* Low level routine for calling functions */
extern void ffi_call_asm (void *stack, struct call_context *regs,
void (*fn) (void), void *closure) FFI_HIDDEN;
static void
ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue,
void *closure)
{
/* this is a conservative estimate, assuming a complex return value and
that all remaining arguments are long long / __int128 */
size_t arg_bytes = cif->nargs <= 3 ? 0 :
FFI_ALIGN(2 * sizeof(size_t) * (cif->nargs - 3), STKALIGN);
size_t rval_bytes = 0;
if (rvalue == NULL && cif->rtype->size > 2*__SIZEOF_POINTER__)
rval_bytes = FFI_ALIGN(cif->rtype->size, STKALIGN);
size_t alloc_size = arg_bytes + rval_bytes + sizeof(call_context);
/* the assembly code will deallocate all stack data at lower addresses
than the argument region, so we need to allocate the frame and the
return value after the arguments in a single allocation */
size_t alloc_base;
/* Argument region must be 16-byte aligned */
if (_Alignof(max_align_t) >= STKALIGN) {
/* since sizeof long double is normally 16, the compiler will
guarantee alloca alignment to at least that much */
alloc_base = (size_t)alloca(alloc_size);
} else {
alloc_base = FFI_ALIGN(alloca(alloc_size + STKALIGN - 1), STKALIGN);
}
if (rval_bytes)
rvalue = (void*)(alloc_base + arg_bytes);
call_builder cb;
cb.used_float = cb.used_integer = 0;
cb.aregs = (call_context*)(alloc_base + arg_bytes + rval_bytes);
cb.used_stack = (void*)alloc_base;
int return_by_ref = passed_by_ref(&cb, cif->rtype, 0);
if (return_by_ref)
marshal(&cb, &ffi_type_pointer, 0, &rvalue);
int i;
for (i = 0; i < cif->nargs; i++)
marshal(&cb, cif->arg_types[i], i >= cif->riscv_nfixedargs, avalue[i]);
ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure);
cb.used_float = cb.used_integer = 0;
if (!return_by_ref && rvalue)
unmarshal(&cb, cif->rtype, 0, rvalue);
}
void
ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue)
{
ffi_call_int(cif, fn, rvalue, avalue, NULL);
}
void
ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue,
void **avalue, void *closure)
{
ffi_call_int(cif, fn, rvalue, avalue, closure);
}
extern void ffi_closure_asm(void) FFI_HIDDEN;
ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc)
{
uint32_t *tramp = (uint32_t *) &closure->tramp[0];
uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm;
if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI)
return FFI_BAD_ABI;
/* we will call ffi_closure_inner with codeloc, not closure, but as long
as the memory is readable it should work */
tramp[0] = 0x00000317; /* auipc t1, 0 (i.e. t0 <- codeloc) */
#if __SIZEOF_POINTER__ == 8
tramp[1] = 0x01033383; /* ld t2, 16(t1) */
#else
tramp[1] = 0x01032383; /* lw t2, 16(t1) */
#endif
tramp[2] = 0x00038067; /* jr t2 */
tramp[3] = 0x00000013; /* nop */
tramp[4] = fn;
tramp[5] = fn >> 32;
closure->cif = cif;
closure->fun = fun;
closure->user_data = user_data;
__builtin___clear_cache(codeloc, codeloc + FFI_TRAMPOLINE_SIZE);
return FFI_OK;
}
extern void ffi_go_closure_asm (void) FFI_HIDDEN;
ffi_status
ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif,
void (*fun) (ffi_cif *, void *, void **, void *))
{
if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI)
return FFI_BAD_ABI;
closure->tramp = (void *) ffi_go_closure_asm;
closure->cif = cif;
closure->fun = fun;
return FFI_OK;
}
/* Called by the assembly code with aregs pointing to saved argument registers
and stack pointing to the stacked arguments. Return values passed in
registers will be reloaded from aregs. */
void FFI_HIDDEN
ffi_closure_inner (ffi_cif *cif,
void (*fun) (ffi_cif *, void *, void **, void *),
void *user_data,
size_t *stack, call_context *aregs)
{
void **avalue = alloca(cif->nargs * sizeof(void*));
/* storage for arguments which will be copied by unmarshal(). We could
theoretically avoid the copies in many cases and use at most 128 bytes
of memory, but allocating disjoint storage for each argument is
simpler. */
char *astorage = alloca(cif->nargs * MAXCOPYARG);
void *rvalue;
call_builder cb;
int return_by_ref;
int i;
cb.aregs = aregs;
cb.used_integer = cb.used_float = 0;
cb.used_stack = stack;
return_by_ref = passed_by_ref(&cb, cif->rtype, 0);
if (return_by_ref)
unmarshal(&cb, &ffi_type_pointer, 0, &rvalue);
else
rvalue = alloca(cif->rtype->size);
for (i = 0; i < cif->nargs; i++)
avalue[i] = unmarshal(&cb, cif->arg_types[i],
i >= cif->riscv_nfixedargs, astorage + i*MAXCOPYARG);
fun (cif, rvalue, avalue, user_data);
if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) {
cb.used_integer = cb.used_float = 0;
marshal(&cb, cif->rtype, 0, rvalue);
}
}
| 7,233 |
1,430 | <filename>tests/test_exceptions.py
# -*- coding: utf-8 -*-
import pytest
import json
from schematics.exceptions import *
def test_error_from_string():
e = ConversionError('hello')
assert e.messages == ['hello']
e = ValidationError('hello', 'world', '!')
assert e.messages == ['hello', 'world', '!']
assert len(e) == 3
def _assert(e):
assert e.messages == ['hello'] and e.messages[0].info == 99
def test_error_from_args():
_assert(ValidationError('hello', info=99))
def test_error_from_tuple():
_assert(ValidationError(('hello', 99)))
def test_error_from_message():
_assert(ValidationError(ErrorMessage('hello', info=99)))
def test_error_from_error():
_assert(ValidationError(ValidationError(('hello', 99))))
def test_error_from_mixed_args():
e = ValidationError(
('hello', 99),
'world',
ErrorMessage('from_msg', info=0),
ValidationError('from_err', info=1))
assert e == e.messages == ['hello', 'world', 'from_msg', 'from_err']
assert [msg.info for msg in e] == [99, None, 0, 1]
def test_error_from_mixed_list():
e = ConversionError([
('hello', 99),
'world',
ErrorMessage('from_msg', info=0),
ConversionError('from_err', info=1)])
assert e.messages == ['hello', 'world', 'from_msg', 'from_err']
assert [msg.info for msg in e.messages] == [99, None, 0, 1]
def test_error_str():
assert str(ValidationError('foo')) == '["foo"]'
e = ValidationError(
('foo', None),
('bar', 98),
('baz', [1, 2, 3]))
assert str(e) == '["foo", "bar: 98", "baz: [1, 2, 3]"]'
def test_error_list_conversion():
err = ValidationError("A", "B", "C")
assert list(err) == err.messages
def test_error_eq():
assert ValidationError("A") == ValidationError("A") == ["A"]
assert ValidationError("A") != ConversionError("A")
assert ValidationError("A", "B") == ValidationError("A", "B") == ["A", "B"]
assert ValidationError("A") != ValidationError("A", "B")
def test_error_message_object():
assert ErrorMessage('foo') == 'foo'
assert ErrorMessage('foo') != 'bar'
assert ErrorMessage('foo', 1) == ErrorMessage('foo', 1)
assert ErrorMessage('foo', 1) != ErrorMessage('foo', 2)
@pytest.mark.parametrize("error", [
ErrorMessage('foo', info='bar'),
BaseError([ErrorMessage('foo', info='bar')]),
BaseError({"foo": "bar"}),
ErrorMessage(u'é'),
ValidationError(u'é')
])
def test_exception_repr(error):
assert error == eval(repr(error))
def test_error_failures():
with pytest.raises(NotImplementedError):
FieldError()
with pytest.raises(TypeError):
ValidationError()
with pytest.raises(TypeError):
ValidationError('hello', 99)
with pytest.raises(TypeError):
ConversionError(ValidationError('hello'))
with pytest.raises(TypeError):
CompoundError(['hello'])
def test_to_primitive():
error = BaseError({
'a': [ErrorMessage('a1'), ErrorMessage('a2')],
'b': {
'd': ErrorMessage('d_val'),
'e': ErrorMessage('e_val'),
},
'c': ErrorMessage('this is an error')
})
assert error.to_primitive() == {
'a': ['a1', 'a2'],
'b': {
'd': 'd_val',
'e': 'e_val'
},
'c': 'this is an error'
}
def test_to_primitive_list():
error = BaseError([ErrorMessage('a1'), ErrorMessage('a2')])
assert error.to_primitive() == ['a1', 'a2']
def test_autopopulate_message_on_none():
errors = {
'a': [ErrorMessage('a1'), ErrorMessage('a2')],
'b': {
'd': ErrorMessage('d_val'),
'e': ErrorMessage('e_val'),
},
'c': ErrorMessage('this is an error')
}
e = BaseError(errors)
assert json.loads(str(e)) == BaseError._to_primitive(errors)
@pytest.mark.parametrize("e", [
BaseError(["a", "b"]),
ConversionError(ErrorMessage("foo"), ErrorMessage("bar")),
CompoundError({"a": ValidationError(ErrorMessage("foo"))})
])
def test_exceptions_is_hashable(e):
"""exceptions must be hashable, as the logging module expects this
for log.exception()
"""
hash(e)
@pytest.mark.parametrize("inp,out", [
(ConversionError(ErrorMessage("foo")), ["foo"])
])
def test_clean_str_representation(inp, out):
"""
the string representation should be human-readable. json's format
provides a legible format for complex data types.
"""
assert str(inp) == json.dumps(out)
| 1,925 |
6,457 | package com.intel.realsense.librealsense;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.hardware.usb.UsbDevice;
import android.hardware.usb.UsbManager;
import android.util.Log;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class UsbUtilities {
private static final String TAG = "librs UsbUtilities";
public static final String ACTION_USB_PERMISSION = "USB_CONTROL_PERMISSION";
public static boolean isIntel(UsbDevice usbDevice){
if (usbDevice.getVendorId() == 0x8086)
return true;
return false;
}
private static List<UsbDevice> getUsbDevices(Context context, Integer vId) {
return getUsbDevices(context, vId, 0);
}
private static List<UsbDevice> getUsbDevices(Context context, Integer vId, Integer pId) {
ArrayList<UsbDevice> res = new ArrayList<>();
UsbManager usbManager = (UsbManager) context.getSystemService(Context.USB_SERVICE);
HashMap<String, UsbDevice> devicesMap = usbManager.getDeviceList();
for (Map.Entry<String, UsbDevice> entry : devicesMap.entrySet()) {
UsbDevice usbDevice = entry.getValue();
if (usbDevice.getVendorId() == vId && (usbDevice.getProductId() == pId || pId == 0)) {
res.add(usbDevice);
}
}
if (res.isEmpty())
Log.w(TAG, "getUsbDevice: failed to locate USB device, " + "VID: " + String.format("0x%04x", vId) + ", PID: " + String.format("0x%04x", pId));
return res;
}
private static boolean hasUsbPermission(Context context, UsbDevice usbDevice){
Log.d(TAG, "hasUsbPermission");
if(usbDevice == null){
Log.w(TAG, "hasUsbPermission: null USB device");
return false;
}
UsbManager usbManager = (UsbManager) context.getSystemService(Context.USB_SERVICE);
return usbManager.hasPermission(usbDevice);
}
private static void grantUsbPermissions(Context context, UsbDevice usbDevice){
Log.d(TAG, "grantUsbPermissions");
if(usbDevice == null){
Log.w(TAG, "grantUsbPermissions: null USB device");
return;
}
UsbManager usbManager = (UsbManager) context.getSystemService(Context.USB_SERVICE);
boolean permission = usbManager.hasPermission(usbDevice);
if(!permission) {
Log.i(TAG, "grantUsbPermissions:\ndevice: " + usbDevice.toString());
PendingIntent pi = PendingIntent.getBroadcast(context, 0, new Intent(UsbUtilities.ACTION_USB_PERMISSION), 0);
usbManager.requestPermission(usbDevice, pi);
}
}
private static List<UsbDevice> getDevices(Context context) {
return getUsbDevices(context, 0x8086);
}
public static void grantUsbPermissionIfNeeded(Context context) {
List<UsbDevice> usbDevices = getDevices(context);
for (UsbDevice usbDevice : usbDevices) {
if (!hasUsbPermission(context, usbDevice)) {
grantUsbPermissions(context, usbDevice);
}
}
}
}
| 1,321 |
2,748 | <filename>sofa-boot-project/sofa-boot-core/runtime-sofa-boot/src/test/java/com/alipay/sofa/runtime/test/beans/service/AnnotationSampleService.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alipay.sofa.runtime.test.beans.service;
import com.alipay.sofa.runtime.api.annotation.SofaReference;
import com.alipay.sofa.runtime.api.annotation.SofaReferenceBinding;
import com.alipay.sofa.runtime.api.annotation.SofaService;
import com.alipay.sofa.runtime.api.annotation.SofaServiceBinding;
import com.alipay.sofa.runtime.test.beans.facade.SampleService;
/**
* Publish Service via annotation
*
* @author qilong.zql
* @since 3.2.0
*/
@SofaService(interfaceType = SampleService.class, uniqueId = "${annotation.sample.service.uniqueId}", bindings = { @SofaServiceBinding(bindingType = "${annotation.sample.service.bindingType}", filters = {
"${annotation.sample.service.filter-1}",
"filter-2" }, timeout = 300) })
public class AnnotationSampleService implements SampleService {
@SofaReference(uniqueId = "${annotation.sample.ref.uniqueId}", jvmFirst = false, binding = @SofaReferenceBinding(bindingType = "${annotation.sample.ref.bindingType}", filters = {
"${annotation.sample.ref.filter-1}", "filter-2" }, directUrl = "${annotation.sample.ref.direct-url}"))
public SampleService sampleService;
@Override
public String service() {
return "AnnotationSampleService";
}
} | 1,079 |
349 | from flask import g, current_app, request
from sqlalchemy import desc, func
from apps.auth.models.users import User
from apps.project.business.cases import CaseBusiness
from apps.project.models.cases import Case
from apps.project.models.modules import Module
from apps.project.models.tasks import TaskCase
from library.api.db import db
from library.api.exceptions import SaveObjectException
from library.api.transfer import transfer2json
from library.trpc import Trpc
class ModuleBusiness(object):
user_trpc = Trpc('auth')
@classmethod
def project_permission(cls, pid=None, id=None):
project_id = cls.user_trpc.requests('get', '/user/userbindproject', {'userid': g.userid})
if g.is_admin:
return 0
if pid:
return 0 if pid in project_id else 1
else:
ret = Module.query.add_columns(Module.project_id.label('projectid')).filter(Module.id == id).first()
return 0 if ret.projectid in project_id else 1
@classmethod
def _query(cls):
return Module.query.add_columns(
Module.id.label('id'),
Module.name.label('name'),
Module.project_id.label('projectid'),
Module.description.label('description'),
Module.weight.label('weight'),
Module.status.label('status'),
Module.parent_id.label('parentid')
)
@classmethod
@transfer2json('?id|!name|!projectid|!description|!weight|!status')
def query_all_json(cls, limit, offset):
ret = cls._query().filter(Module.status == Module.ACTIVE) \
.order_by(desc(Module.id)) \
.limit(limit).offset(offset).all()
return ret
@classmethod
def module_create(cls, name, project_id, description, parent_id=None):
ret = Module.query.filter_by(name=name, project_id=project_id, status=Case.ACTIVE).first()
if ret:
raise SaveObjectException('存在相同名称的模块')
m = Module(
name=name,
project_id=project_id,
description=description,
parent_id=parent_id,
)
db.session.add(m)
db.session.commit()
return 0, None
@classmethod
def module_delete(cls, id):
try:
m = Module.query.get(id)
m.status = Module.DISABLE
db.session.add(m)
for case in Case.query.filter_by(module_id=id):
case.status = Case.DISABLE
db.session.add(case)
db.session.commit()
return 0
except Exception as e:
current_app.logger.error(str(e))
return 105, str(e)
@classmethod
def module_modify(cls, id, name, project_id, description, weight):
try:
ret = Module.query.filter(Module.name == name, Module.status != Module.DISABLE, Module.id != id).first()
if ret:
return 103, None
m = Module.query.get(id)
m.name = name
m.project_id = project_id
m.description = description
m.weight = weight
db.session.add(m)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
@transfer2json('?id|!name|!projectid|!description|!weight|!status')
def query_json_by_id(cls, id):
ret = cls._query().filter(Module.status == Module.ACTIVE,
Module.id == id).all()
return ret
@classmethod
def _query_total_taskcase(cls):
return Module.query.outerjoin(
TaskCase, TaskCase.module_id == Module.id).add_columns(
Module.id.label('id'),
Module.name.label('name'),
Module.project_id.label('projectid'),
Module.description.label('description'),
Module.weight.label('weight'),
Module.status.label('status'),
func.count('*').label('total'),
Module.parent_id.label('parentid')
)
@classmethod
@transfer2json('?id|!projectid|!status|!total')
def query_by_project_id_total(cls, pid):
ret = CaseBusiness.case_total_groupby_module().filter(Module.status == Module.ACTIVE,
Module.project_id == pid).order_by(
desc(Module.id)).group_by(Case.module_id).all()
return ret
@classmethod
@transfer2json('?id|!name|!projectid|!description|!weight|!status|!parentid')
def query_by_project_ids(cls, pid):
ret = cls._query().filter(Module.status == Module.ACTIVE,
Module.project_id == pid).order_by(desc(Module.id)).all()
return ret
@classmethod
def filter_query(cls, pid=None, parent_ids=None, only_first=False, is_second=False):
query = cls._query().filter(Module.status == Module.ACTIVE)
if pid:
query = query.filter(Module.project_id == pid)
if is_second:
query = query.filter(Module.parent_id.in_(parent_ids))
else:
module_name = request.args.get('modulename')
if module_name:
if '\\' in module_name:
module_name = module_name.replace('\\', '\\\\')
query = query.filter(Module.name.like(f'%{module_name}%'))
else:
# 转义\, mysql需要一个\\\\才能识别
if only_first:
query = query.filter(Module.parent_id.is_(None))
query = query.order_by(desc(Module.id))
return query
@classmethod
@transfer2json('?id|!name|!projectid|!description|!weight|!status|!parentid', ispagination=True)
def pageinate_data(cls, pid):
query = cls.filter_query(pid=pid, only_first=True)
count = query.count()
page_size = request.args.get('page_size')
page_index = request.args.get('page_index')
if page_size and page_index:
size, index = int(page_size), int(page_index)
query = query.limit(size).offset((index - 1) * size)
# 一级模块总数,用来分页
first_ret = query.all()
first_ret_ids = []
for fr in first_ret:
first_ret_ids.append(fr.id)
if first_ret_ids:
second_query = cls.filter_query(parent_ids=first_ret_ids, is_second=True)
second_ret = second_query.all()
if second_ret:
first_ret.extend(second_ret)
return first_ret, count
@classmethod
def query_by_project_id(cls, pid):
"""
这个版本在用例模块数量少的时候比上个版本慢,因为分页查询导致总数、二级用例模块需要多次查询数据库,但是数量多时应该会慢不少,因为不用一次
返回所有数据,以及随着数量增加导致循环数增加
"""
tlist = []
# 查总数的时候,是0就查不到
total_ret = cls.query_by_project_id_total(pid)
total_ret_dict = {}
for a in total_ret:
tlist.append(a['id'])
total_ret_dict[a['id']] = a
case_total = 0
for total_ret_obj in total_ret:
case_total += total_ret_obj['total']
first_ret, count = cls.pageinate_data(pid)
for i in range(len(first_ret)):
if first_ret[i]['id'] not in tlist:
first_ret[i]['total'] = 0
else:
first_ret[i]['total'] = total_ret_dict[first_ret[i]['id']]['total']
first_ret = cls.converter(first_ret)
page_index = request.args.get('page_index')
page_size = request.args.get('page_size')
return first_ret, case_total, page_index, page_size, count
@classmethod
def query_by_project_case(cls, pid):
# 查总数的时候,是0就查不到
# 获取当前项目所有模块
# total_ret = cls.query_by_project_id_total(pid)
# if total_ret:
total_ret = cls.query_by_project_ids(pid)
tlist = [module['id'] for module in total_ret]
module_case = cls._case_query(tlist)
for t in range(len(total_ret)):
i = 0
total_ret[t]['case_list'] = []
total_ret[t]['total'] = i
for mc in module_case:
if total_ret[t]['id'] == mc['moduleid']:
total_ret[t]['case_list'].append(mc)
i += 1
total_ret[t]['total'] = i
total_ret = cls.converter(total_ret)
total_ret = sorted(total_ret, key=lambda x: x['id'], reverse=True)
return total_ret
@staticmethod
def converter(total_ret):
# TODO 二级目录,待修改 双层循环
num = 0
tmp_total_ret = total_ret[:]
for index, module in enumerate(tmp_total_ret):
if module['parentid']:
for j in tmp_total_ret:
if j['id'] == module['parentid']:
if 'parentid' not in j or not j['parentid']:
if 'modules' not in j:
j['modules'] = []
j['modules'].append(module)
del total_ret[index - num]
num += 1
break
else:
del module['parentid']
return total_ret
@classmethod
@transfer2json(
'?id|!cnumber|!ctype|!title|!precondition|!step_result|!creation_time|!modified_time'
'|!is_auto|!status|!moduleid|!module|!userid|!username|!priority')
def _case_query(cls, tlist):
return Case.query.outerjoin(
Module, Case.module_id == Module.id).outerjoin(
User, User.id == Case.creator).add_columns(
Case.id.label('id'),
Case.cnumber.label('cnumber'),
Case.ctype.label('ctype'),
Case.title.label('title'),
Case.precondition.label('precondition'),
Case.step_result.label('step_result'),
Case.is_auto.label('is_auto'),
Case.priority.label('priority'),
Case.status.label('status'),
func.date_format(Case.creation_time, "%Y-%m-%d %H:%i:%s").label('creation_time'),
func.date_format(Case.modified_time, "%Y-%m-%d %H:%i:%s").label('modified_time'),
Module.id.label('moduleid'),
Module.name.label('module'),
User.id.label('userid'),
User.nickname.label('username')
).filter(Case.status != Case.DISABLE).filter(Case.module_id.in_(tlist)).order_by(desc(Case.id)).all()
| 5,442 |
2,113 | <reponame>vbillet/Torque3D
/**
* OpenAL cross platform audio library
* Copyright (C) 2018 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <stdlib.h>
#include <SDL2/SDL.h>
#include "alMain.h"
#include "alu.h"
#include "threads.h"
#include "compat.h"
#include "backends/base.h"
#ifdef _WIN32
#define DEVNAME_PREFIX "OpenAL Soft on "
#else
#define DEVNAME_PREFIX ""
#endif
typedef struct ALCsdl2Backend {
DERIVE_FROM_TYPE(ALCbackend);
SDL_AudioDeviceID deviceID;
ALsizei frameSize;
ALuint Frequency;
enum DevFmtChannels FmtChans;
enum DevFmtType FmtType;
ALuint UpdateSize;
} ALCsdl2Backend;
static void ALCsdl2Backend_Construct(ALCsdl2Backend *self, ALCdevice *device);
static void ALCsdl2Backend_Destruct(ALCsdl2Backend *self);
static ALCenum ALCsdl2Backend_open(ALCsdl2Backend *self, const ALCchar *name);
static ALCboolean ALCsdl2Backend_reset(ALCsdl2Backend *self);
static ALCboolean ALCsdl2Backend_start(ALCsdl2Backend *self);
static void ALCsdl2Backend_stop(ALCsdl2Backend *self);
static DECLARE_FORWARD2(ALCsdl2Backend, ALCbackend, ALCenum, captureSamples, void*, ALCuint)
static DECLARE_FORWARD(ALCsdl2Backend, ALCbackend, ALCuint, availableSamples)
static DECLARE_FORWARD(ALCsdl2Backend, ALCbackend, ClockLatency, getClockLatency)
static void ALCsdl2Backend_lock(ALCsdl2Backend *self);
static void ALCsdl2Backend_unlock(ALCsdl2Backend *self);
DECLARE_DEFAULT_ALLOCATORS(ALCsdl2Backend)
DEFINE_ALCBACKEND_VTABLE(ALCsdl2Backend);
static const ALCchar defaultDeviceName[] = DEVNAME_PREFIX "Default Device";
static void ALCsdl2Backend_Construct(ALCsdl2Backend *self, ALCdevice *device)
{
ALCbackend_Construct(STATIC_CAST(ALCbackend, self), device);
SET_VTABLE2(ALCsdl2Backend, ALCbackend, self);
self->deviceID = 0;
self->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType, device->AmbiOrder);
self->Frequency = device->Frequency;
self->FmtChans = device->FmtChans;
self->FmtType = device->FmtType;
self->UpdateSize = device->UpdateSize;
}
static void ALCsdl2Backend_Destruct(ALCsdl2Backend *self)
{
if(self->deviceID)
SDL_CloseAudioDevice(self->deviceID);
self->deviceID = 0;
ALCbackend_Destruct(STATIC_CAST(ALCbackend, self));
}
static void ALCsdl2Backend_audioCallback(void *ptr, Uint8 *stream, int len)
{
ALCsdl2Backend *self = (ALCsdl2Backend*)ptr;
ALCdevice *device = STATIC_CAST(ALCbackend, self)->mDevice;
assert((len % self->frameSize) == 0);
aluMixData(device, stream, len / self->frameSize);
}
static ALCenum ALCsdl2Backend_open(ALCsdl2Backend *self, const ALCchar *name)
{
ALCdevice *device = STATIC_CAST(ALCbackend, self)->mDevice;
SDL_AudioSpec want, have;
SDL_zero(want);
SDL_zero(have);
want.freq = device->Frequency;
switch(device->FmtType)
{
case DevFmtUByte: want.format = AUDIO_U8; break;
case DevFmtByte: want.format = AUDIO_S8; break;
case DevFmtUShort: want.format = AUDIO_U16SYS; break;
case DevFmtShort: want.format = AUDIO_S16SYS; break;
case DevFmtUInt: /* fall-through */
case DevFmtInt: want.format = AUDIO_S32SYS; break;
case DevFmtFloat: want.format = AUDIO_F32; break;
}
want.channels = (device->FmtChans == DevFmtMono) ? 1 : 2;
want.samples = device->UpdateSize;
want.callback = ALCsdl2Backend_audioCallback;
want.userdata = self;
/* Passing NULL to SDL_OpenAudioDevice opens a default, which isn't
* necessarily the first in the list.
*/
if(!name || strcmp(name, defaultDeviceName) == 0)
self->deviceID = SDL_OpenAudioDevice(NULL, SDL_FALSE, &want, &have,
SDL_AUDIO_ALLOW_ANY_CHANGE);
else
{
const size_t prefix_len = strlen(DEVNAME_PREFIX);
if(strncmp(name, DEVNAME_PREFIX, prefix_len) == 0)
self->deviceID = SDL_OpenAudioDevice(name+prefix_len, SDL_FALSE, &want, &have,
SDL_AUDIO_ALLOW_ANY_CHANGE);
else
self->deviceID = SDL_OpenAudioDevice(name, SDL_FALSE, &want, &have,
SDL_AUDIO_ALLOW_ANY_CHANGE);
}
if(self->deviceID == 0)
return ALC_INVALID_VALUE;
device->Frequency = have.freq;
if(have.channels == 1)
device->FmtChans = DevFmtMono;
else if(have.channels == 2)
device->FmtChans = DevFmtStereo;
else
{
ERR("Got unhandled SDL channel count: %d\n", (int)have.channels);
return ALC_INVALID_VALUE;
}
switch(have.format)
{
case AUDIO_U8: device->FmtType = DevFmtUByte; break;
case AUDIO_S8: device->FmtType = DevFmtByte; break;
case AUDIO_U16SYS: device->FmtType = DevFmtUShort; break;
case AUDIO_S16SYS: device->FmtType = DevFmtShort; break;
case AUDIO_S32SYS: device->FmtType = DevFmtInt; break;
case AUDIO_F32SYS: device->FmtType = DevFmtFloat; break;
default:
ERR("Got unsupported SDL format: 0x%04x\n", have.format);
return ALC_INVALID_VALUE;
}
device->UpdateSize = have.samples;
device->NumUpdates = 2; /* SDL always (tries to) use two periods. */
self->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType, device->AmbiOrder);
self->Frequency = device->Frequency;
self->FmtChans = device->FmtChans;
self->FmtType = device->FmtType;
self->UpdateSize = device->UpdateSize;
alstr_copy_cstr(&device->DeviceName, name ? name : defaultDeviceName);
return ALC_NO_ERROR;
}
static ALCboolean ALCsdl2Backend_reset(ALCsdl2Backend *self)
{
ALCdevice *device = STATIC_CAST(ALCbackend, self)->mDevice;
device->Frequency = self->Frequency;
device->FmtChans = self->FmtChans;
device->FmtType = self->FmtType;
device->UpdateSize = self->UpdateSize;
device->NumUpdates = 2;
SetDefaultWFXChannelOrder(device);
return ALC_TRUE;
}
static ALCboolean ALCsdl2Backend_start(ALCsdl2Backend *self)
{
SDL_PauseAudioDevice(self->deviceID, 0);
return ALC_TRUE;
}
static void ALCsdl2Backend_stop(ALCsdl2Backend *self)
{
SDL_PauseAudioDevice(self->deviceID, 1);
}
static void ALCsdl2Backend_lock(ALCsdl2Backend *self)
{
SDL_LockAudioDevice(self->deviceID);
}
static void ALCsdl2Backend_unlock(ALCsdl2Backend *self)
{
SDL_UnlockAudioDevice(self->deviceID);
}
typedef struct ALCsdl2BackendFactory {
DERIVE_FROM_TYPE(ALCbackendFactory);
} ALCsdl2BackendFactory;
#define ALCsdl2BACKENDFACTORY_INITIALIZER { { GET_VTABLE2(ALCsdl2BackendFactory, ALCbackendFactory) } }
ALCbackendFactory *ALCsdl2BackendFactory_getFactory(void);
static ALCboolean ALCsdl2BackendFactory_init(ALCsdl2BackendFactory *self);
static void ALCsdl2BackendFactory_deinit(ALCsdl2BackendFactory *self);
static ALCboolean ALCsdl2BackendFactory_querySupport(ALCsdl2BackendFactory *self, ALCbackend_Type type);
static void ALCsdl2BackendFactory_probe(ALCsdl2BackendFactory *self, enum DevProbe type);
static ALCbackend* ALCsdl2BackendFactory_createBackend(ALCsdl2BackendFactory *self, ALCdevice *device, ALCbackend_Type type);
DEFINE_ALCBACKENDFACTORY_VTABLE(ALCsdl2BackendFactory);
ALCbackendFactory *ALCsdl2BackendFactory_getFactory(void)
{
static ALCsdl2BackendFactory factory = ALCsdl2BACKENDFACTORY_INITIALIZER;
return STATIC_CAST(ALCbackendFactory, &factory);
}
static ALCboolean ALCsdl2BackendFactory_init(ALCsdl2BackendFactory* UNUSED(self))
{
if(SDL_InitSubSystem(SDL_INIT_AUDIO) == 0)
return AL_TRUE;
return ALC_FALSE;
}
static void ALCsdl2BackendFactory_deinit(ALCsdl2BackendFactory* UNUSED(self))
{
SDL_QuitSubSystem(SDL_INIT_AUDIO);
}
static ALCboolean ALCsdl2BackendFactory_querySupport(ALCsdl2BackendFactory* UNUSED(self), ALCbackend_Type type)
{
if(type == ALCbackend_Playback)
return ALC_TRUE;
return ALC_FALSE;
}
static void ALCsdl2BackendFactory_probe(ALCsdl2BackendFactory* UNUSED(self), enum DevProbe type)
{
int num_devices, i;
al_string name;
if(type != ALL_DEVICE_PROBE)
return;
AL_STRING_INIT(name);
num_devices = SDL_GetNumAudioDevices(SDL_FALSE);
AppendAllDevicesList(defaultDeviceName);
for(i = 0;i < num_devices;++i)
{
alstr_copy_cstr(&name, DEVNAME_PREFIX);
alstr_append_cstr(&name, SDL_GetAudioDeviceName(i, SDL_FALSE));
AppendAllDevicesList(alstr_get_cstr(name));
}
alstr_reset(&name);
}
static ALCbackend* ALCsdl2BackendFactory_createBackend(ALCsdl2BackendFactory* UNUSED(self), ALCdevice *device, ALCbackend_Type type)
{
if(type == ALCbackend_Playback)
{
ALCsdl2Backend *backend;
NEW_OBJ(backend, ALCsdl2Backend)(device);
if(!backend) return NULL;
return STATIC_CAST(ALCbackend, backend);
}
return NULL;
}
| 4,096 |
4,703 | NULL_HASH32 = b'\x00'*32
CENT = 1000000
COIN = 100*CENT
DUST = 1000
TIMEOUT = 30.0
TXO_TYPES = {
"other": 0,
"stream": 1,
"channel": 2,
"support": 3,
"purchase": 4,
"collection": 5,
"repost": 6,
}
CLAIM_TYPE_NAMES = [
'stream',
'channel',
'collection',
'repost',
]
CLAIM_TYPES = [
TXO_TYPES[name] for name in CLAIM_TYPE_NAMES
]
| 193 |
2,441 | import os
import subprocess
from glob import glob
# Pin the CPU frequency to this while running the benchmarks.
# If this is set to 0 or None, we will instead disable turbo boost
# and the CPU will run at its base frequency.
if "PIN_FREQ" in os.environ:
PIN_FREQ = int(os.environ["PIN_FREQ"])
elif os.path.exists(os.path.expanduser("~/.pinfreq")):
PIN_FREQ = int(open(os.path.expanduser("~/.pinfreq")).read())
else:
raise Exception("Specify the PIN_FREQ env var or write ~/.pinfreq")
def rel(path):
return os.path.join(os.path.dirname(__file__), path)
PYPERF = rel("pyperf_env/bin/pyperf")
if not os.path.exists(PYPERF):
subprocess.check_call(["python3", "-m", "venv", rel("pyperf_env")])
subprocess.check_call([rel("pyperf_env/bin/pip"), "install", "pyperf"])
IS_AMD = "AMD" in open("/proc/cpuinfo").read()
def write_to_sys_file(path, value):
p = subprocess.Popen(["sudo", "tee", path], stdin=subprocess.PIPE)
p.communicate(value + b"\n")
assert p.wait() == 0
def tune():
ret = subprocess.call(["sudo", PYPERF, "system", "tune"], stdout=open("/dev/null", "w"))
# 'pyperf system tune' will report an error on AMD systems because it can't do intel specific changes
# but it will still execute the non intel ones.
if IS_AMD:
# Now we have to manually disable turbo boost
write_to_sys_file("/sys/devices/system/cpu/cpufreq/boost", b"0")
for f in glob("/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor"):
write_to_sys_file(f, b"performance")
else:
if ret != 0:
ret = subprocess.call(["sudo", PYPERF, "system", "tune"])
assert 0
assert ret == 0
if PIN_FREQ:
assert not IS_AMD, "on AMD systems we don't support setting a specific frequency"
write_to_sys_file("/sys/devices/system/cpu/intel_pstate/no_turbo", b"0")
subprocess.check_call("bash -c 'echo %d | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_{min,max}_freq'" % PIN_FREQ, shell=True)
def untune():
if PIN_FREQ:
assert not IS_AMD, "on AMD systems we don't support setting a specific frequency"
subprocess.check_call("echo 0 | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq", shell=True)
subprocess.check_call("echo 99999999 | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq", shell=True)
ret = subprocess.call(["sudo", PYPERF, "system", "reset"], stdout=open("/dev/null", "w"))
# 'pyperf system reset' will report an error on AMD systems because it can't do intel specific changes
# but it will still execute the non intel ones.
if IS_AMD:
# Now we have to manually enable turbo boost
write_to_sys_file("/sys/devices/system/cpu/cpufreq/boost", b"1")
for f in glob("/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor"):
write_to_sys_file(f, b"ondemand")
else:
assert ret == 0
| 1,191 |
301 | #!/usr/bin/python3
'''
/******************************************************************
*
* Copyright 2018 Samsung Electronics All Rights Reserved.
*
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************/
'''
import os
import sys
import time
import fnmatch
import optparse
import platform
import fcntl
from os.path import isfile, join
from configuration import *
from ite.tc.container import TestSpecContainer
from ite.reporter.tc_reporter import TestSpecReporter
from ite.util import *
from ite.tc.analyzer import TestSpec
from ite.constants import *
from ite.config import *
from ite.multi_logger import set_file_handler, print_runner_output
from ite.exec.multi_runner import *
from ite.exec.run_options import *
from ite.exec.runner_info_setter import *
from ite.exec.tc_executor import *
TEST_ROOT = '..'
timeout_seconds = 300
oparser = optparse.OptionParser()
oparser.add_option("-p", action="store", dest="platform", default="linux")
oparser.add_option("--platform", action="store", dest="platform", default="linux")
oparser.add_option("-t", action="store", dest="target")
oparser.add_option("--target", action="store", dest="target")
oparser.add_option("-x", action="store", dest="transport", default="")
oparser.add_option("--transport", action="store", dest="transport", default="")
oparser.add_option("-n", action="store", dest="network", default="")
oparser.add_option("--network", action="store", dest="network", default="")
oparser.add_option("-u", action="store", dest="suite")
oparser.add_option("--testsuite", action="store", dest="suite")
oparser.add_option("-c", action="store", dest="testcase")
oparser.add_option("--case", action="store", dest="testcase")
oparser.add_option("-d", action="store", dest="device_name")
oparser.add_option("--device", action="store", dest="device_name")
oparser.add_option("-f", action="store", dest="file_filter")
oparser.add_option("--filter", action="store", dest="file_filter")
oparser.add_option("-m", action="store", dest="module")
oparser.add_option("--module", action="store", dest="module")
oparser.add_option("-w", action="store", dest="tc_framework")
oparser.add_option("--framework", action="store", dest="tc_framework")
oparser.add_option("-i", action="store", dest="tc_scenario")
oparser.add_option("--scenario", action="store", dest="tc_scenario")
oparser.add_option("-g", action="store", dest="package_name")
oparser.add_option("--package", action="store", dest="package_name")
oparser.add_option("-r", action="store", dest="test_result_dir", default="")
oparser.add_option("--result", action="store", dest="test_result_dir", default="")
oparser.add_option("--path", action="store", dest="app_path")
oparser.add_option("--just_print", action="store_true", dest="just_print")
oparser.add_option("--process_id", action="store", dest="process_id")
oparser.add_option("--max_total_count", action="store", dest="max_total_count", default="3")
oparser.add_option("--min_pass_count", action="store", dest="min_pass_count", default="1")
oparser.add_option("--max_timeout_count", action="store", dest="max_timeout_count", default="2")
oparser.add_option("--verdict_path", action="store", dest="verdict_path", default="tc_verdicts.txt")
oparser.add_option("--save_verdict", action = 'store_true', dest="save_verdict", default=False)
oparser.add_option("-b", action="store", dest="build_type", default="")
oparser.add_option("--build_type", action = 'store', dest="build_type", default="")
opts, args = oparser.parse_args()
platform_type = opts.platform
file_filter = opts.file_filter
given_module = opts.module
package_name = opts.package_name
test_result_dir = opts.test_result_dir
tc_verdict_file_path = opts.verdict_path
save_verdict = opts.save_verdict
TestRunOption.max_total_count = int(opts.max_total_count)
TestRunOption.min_pass_count = int(opts.min_pass_count)
TestRunOption.max_timeout_count = int(opts.max_timeout_count)
if save_verdict:
set_file_handler(given_module, platform_type, tc_verdict_file_path)
#sys.stdout = Multi_Logger(given_module, platform_type, tc_verdict_file_path)
print_runner_output('All parameters have taken')
dynamic_runner = None
if 'linux' == platform_type:
dynamic_runner = LinuxTestRunner()
elif 'tizen' == platform_type:
dynamic_runner = TizenTestRunner()
elif 'android' == platform_type:
tc_framework = opts.tc_framework
if not tc_framework:
tc_framework = TESTFW_TYPES.JUNIT
if TESTFW_TYPES.GTEST == tc_framework:
dynamic_runner = AndroidGtestRunner()
elif TESTFW_TYPES.JUNIT == tc_framework:
dynamic_runner = AndroidJunitRunner()
if not dynamic_runner:
print_runner_output('No runner has been set')
exit(0)
device_name = dynamic_runner.set_device_name(opts.device_name)
target = dynamic_runner.set_target(opts.target)
transport = dynamic_runner.set_transport(opts.transport)
network = dynamic_runner.set_network(opts.network)
tc_framework = dynamic_runner.set_tc_framework(opts.tc_framework)
app_path = dynamic_runner.set_app_path(opts.app_path)
given_module = dynamic_runner.set_module(given_module)
build_type = dynamic_runner.set_build_type(opts.build_type, given_module)
devices = dynamic_runner.get_devices()
if devices:
if opts.device_name and opts.device_name not in devices:
print_runner_output('Device {} not found'.format(device_name))
exit(-1)
if not device_name:
device_name = dynamic_runner.set_device_name(devices[0])
print_runner_output('dynamic_runner has been set')
if not test_result_dir:
test_result_dir = TEST_RESULT_RUN_DIR
if test_result_dir.endswith(os.sep):
test_result_dir = test_result_dir[:-1]
if opts.suite:
given_testsuites = opts.suite.split(',')
else:
given_testsuites = []
if opts.tc_scenario:
tc_scenario = opts.tc_scenario.split(',')
else:
tc_scenario = []
if opts.testcase:
given_testcases = opts.testcase.split(',')
else:
given_testcases = []
if not os.path.exists(test_result_dir):
os.makedirs(test_result_dir)
test_result_dir = os.path.abspath(test_result_dir)
if not app_path:
print (Colors.FAIL + "binary file's folder path is not given. Use --path opiotn" + Colors.ENDC)
exit(0)
if not app_path.endswith(os.sep):
app_path = app_path + os.sep
if app_path.endswith(os.sep):
app_path = app_path[:-1]
print_runner_output('Splitting parameters complete')
platform_command_prefix = dynamic_runner.get_platform_command_prefix()
testspec_path = os.path.join(test_result_dir, TEST_SPEC_XML_FOR_RESULT)
if not os.path.exists(testspec_path) and os.path.exists(API_TC_SRC_DIR):
container = TestSpecContainer()
container.extract_api_testspec(API_TC_SRC_DIR, '')
reporter = TestSpecReporter()
reporter.generate_testspec_report(container.data)
reporter.report('XML', testspec_path)
print_runner_output('Device root on (if applicable)')
dynamic_runner.device_root_on()
print_runner_output('Calling TC executor')
tc_executor = TcExecutor(timeout_seconds, test_result_dir)
print_runner_output('Calling runner info setter')
info_setter = RunnerInfoSetter(TEST_ROOT)
print_runner_output('Calling tc list setter')
if TESTFW_TYPES.GTEST in tc_framework:
info_setter.set_gtest_tc_list(test_result_dir, given_module, file_filter, given_testsuites, given_testcases, dynamic_runner)
if TESTFW_TYPES.JUNIT in tc_framework:
info_setter.set_junit_tc_list(test_result_dir, given_module, package_name, given_testsuites, given_testcases, dynamic_runner)
print_runner_output('TC list setting complete')
if opts.just_print:
tc_executor.print_tc_list('tc_list.txt', dynamic_runner)
else:
tc_executor.run_selected_testcases(dynamic_runner, tc_verdict_file_path, save_verdict)
| 2,857 |
506 | package com.xnx3.util;
/**
* 应用运行时,自动启动
* @author 管雷鸣
*/
public class AutoRun {
/**
* 新版本检测。若发现新版本,弹出提示框
*/
public static void versionCheck(){
new Thread(new Runnable() {
public void run() {
CheckVersion.cloudCheck();
}
}).start();
}
public static void main(String[] args) {
versionCheck();
}
}
| 191 |
335 | <gh_stars>100-1000
{
"word": "Rai",
"definitions": [
"A style of music fusing Arabic and Algerian folk elements with Western rock."
],
"parts-of-speech": "Noun"
} | 77 |
1,510 | <reponame>julien-faye/drill
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.store.druid;
import org.apache.drill.common.FunctionNames;
import org.apache.drill.common.expression.SchemaPath;
import org.apache.drill.exec.store.druid.common.DruidConstants;
import org.junit.Before;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class DruidScanSpecBuilderTest {
private static final String SOME_DATASOURCE_NAME = "some datasource";
private static final long SOME_DATASOURCE_SIZE = 500;
private static final String SOME_DATASOURCE_MIN_TIME = "some min time";
private static final String SOME_DATASOURCE_MAX_TIME = "some max time";
private static final String SOME_FIELD = "some field";
private static final String SOME_VALUE = "some value";
private DruidScanSpecBuilder druidScanSpecBuilder;
@Before
public void setup() {
druidScanSpecBuilder = new DruidScanSpecBuilder();
}
@Test
public void buildCalledWithEqualFxShouldBuildSelectorFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder
.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.EQ,
schemaPath,
SOME_VALUE);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"selector\",\"dimension\":\"some field\",\"value\":\"some value\"}");
}
@Test
public void buildCalledWithEqualFxIntervalFieldShouldBuildIntervalFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(DruidConstants.INTERVAL_DIMENSION_NAME);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.EQ,
schemaPath,
SOME_VALUE);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"eventInterval\":\"some value\"}");
}
@Test
public void buildCalledWithNotEqualFxShouldBuildSelectorFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.NE,
schemaPath, SOME_VALUE
);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"not\",\"field\":{\"type\":\"selector\",\"dimension\":\"some field\",\"value\":\"some value\"}}");
}
@Test
public void buildCalledWithGreaterThanOrEqualToFxShouldBuildBoundFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.GE,
schemaPath,
SOME_VALUE
);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"bound\",\"dimension\":\"some field\",\"lower\":\"some value\",\"ordering\":\"lexicographic\"}");
}
@Test
public void buildCalledWithGreaterThanFxShouldBuildBoundFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.GT,
schemaPath,
SOME_VALUE
);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"bound\",\"dimension\":\"some field\",\"lower\":\"some value\",\"lowerStrict\":true,\"ordering\":\"lexicographic\"}");
}
@Test
public void buildCalledWithGreaterThanFxAndNumericValueShouldBuildBoundFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.GT,
schemaPath,
"1"
);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"bound\",\"dimension\":\"some field\",\"lower\":\"1\",\"lowerStrict\":true,\"ordering\":\"numeric\"}");
}
@Test
public void buildCalledWithLessThanOrEqualToFxShouldBuildBoundFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.LE,
schemaPath,
SOME_VALUE);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"bound\",\"dimension\":\"some field\",\"upper\":\"some value\",\"ordering\":\"lexicographic\"}");
}
@Test
public void buildCalledWithLessThanFxShouldBuildBoundFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.LT,
schemaPath,
SOME_VALUE);
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"bound\",\"dimension\":\"some field\",\"upper\":\"some value\",\"upperStrict\":true,\"ordering\":\"lexicographic\"}");
}
@Test
public void buildCalledWithLessThanFxAndNumericValueShouldBuildBoundFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.LT,
schemaPath,
"1");
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"bound\",\"dimension\":\"some field\",\"upper\":\"1\",\"upperStrict\":true,\"ordering\":\"numeric\"}");
}
@Test
public void buildCalledWithIsNullFxShouldBuildSelectorFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.IS_NULL,
schemaPath,
null);
assertThat(druidScanSpec).isNotNull();
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"selector\",\"dimension\":\"some field\",\"value\":null}");
}
@Test
public void buildCalledWithIsNotNullFxShouldBuildSelectorFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder.build(
SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.IS_NOT_NULL,
schemaPath,
null);
assertThat(druidScanSpec).isNotNull();
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"not\",\"field\":{\"type\":\"selector\",\"dimension\":\"some field\",\"value\":null}}");
}
@Test
public void buildCalledWithLikeFxButIfValueIsPrefixedWithRegexKeywordHintShouldBuildRegexFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder
.build(SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.LIKE,
schemaPath,
"$regex$_some_regular_expression");
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"regex\",\"dimension\":\"some field\",\"pattern\":\"some_regular_expression\"}");
}
@Test
public void buildCalledWithLikeFxShouldBuildSearchFilter() {
SchemaPath schemaPath = SchemaPath.getSimplePath(SOME_FIELD);
DruidScanSpec druidScanSpec =
druidScanSpecBuilder
.build(SOME_DATASOURCE_NAME,
SOME_DATASOURCE_SIZE,
SOME_DATASOURCE_MIN_TIME,
SOME_DATASOURCE_MAX_TIME,
FunctionNames.LIKE,
schemaPath,
"some search string");
assertThat(druidScanSpec.getFilter().toJson()).isEqualTo("{\"type\":\"search\",\"dimension\":\"some field\",\"query\":{\"type\":\"contains\",\"value\":\"some search string\",\"caseSensitive\":false}}");
}
}
| 3,741 |
494 | <filename>ios/RNWatch/FileTransferEvent.h
//
// Created by <NAME> on 20/06/2020.
// Copyright (c) 2020 Facebook. All rights reserved.
//
#import <Foundation/Foundation.h>
@class FileTransferInfo;
static NSString *FILE_EVENT_PROGRESS = @"progress";
static NSString *FILE_EVENT_STARTED = @"started";
static NSString *FILE_EVENT_FINISHED = @"finished";
static NSString *FILE_EVENT_ERROR = @"error";
@interface FileTransferEvent : NSObject
@property(nonatomic, strong) NSNumber *bytesTransferred;
@property(nonatomic, strong) NSNumber *estimatedTimeRemaining;
@property(nonatomic, strong) NSString *id;
@property(nonatomic, strong) NSNumber *fractionCompleted;
@property(nonatomic, strong) NSNumber *throughput;
@property(nonatomic, strong) NSNumber *bytesTotal;
@property(nonatomic, strong) NSString *uri;
@property(nonatomic, strong) NSError *error;
@property(nonatomic, strong) NSDictionary *metadata;
@property(nonatomic, strong) NSNumber *startTime;
@property(nonatomic, strong) NSNumber *endTime;
- (NSDictionary *)serialize;
- (NSDictionary *)serializeWithEventType: (NSString*) type;
- (FileTransferEvent *)initWithTransferInfo:(FileTransferInfo *)info;
@end
| 375 |
631 | #include "sys.h"
#include <windows.h>
#include <QDebug>
#include "ntstatus/NtStatusNames.hpp"
Sys::KernelInfo Sys::getKernelInfo()
{
Sys::KernelInfo out;
out.kernelType = KernelType::Windows;
out.kernelName = "Windows";
OSVERSIONINFOW osvi;
ZeroMemory(&osvi, sizeof(OSVERSIONINFOW));
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOW);
GetVersionExW(&osvi);
out.kernelVersion = QString("%1.%2").arg(osvi.dwMajorVersion).arg(osvi.dwMinorVersion);
out.kernelMajor = osvi.dwMajorVersion;
out.kernelMinor = osvi.dwMinorVersion;
out.kernelPatch = osvi.dwBuildNumber;
return out;
}
uint64_t Sys::getSystemRam()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx( &status );
// bytes
return (uint64_t)status.ullTotalPhys;
}
bool Sys::isSystem64bit()
{
#if defined(_WIN64)
return true;
#elif defined(_WIN32)
BOOL f64 = false;
return IsWow64Process(GetCurrentProcess(), &f64) && f64;
#else
// it's some other kind of system...
return false;
#endif
}
bool Sys::isCPU64bit()
{
SYSTEM_INFO info;
ZeroMemory(&info, sizeof(SYSTEM_INFO));
GetNativeSystemInfo(&info);
auto arch = info.wProcessorArchitecture;
return arch == PROCESSOR_ARCHITECTURE_AMD64 || arch == PROCESSOR_ARCHITECTURE_IA64;
}
Sys::DistributionInfo Sys::getDistributionInfo()
{
DistributionInfo result;
return result;
}
bool Sys::lookupSystemStatusCode(uint64_t code, std::string &name, std::string &description)
{
bool hasCodeName = NtStatus::lookupNtStatusCodeName(code, name);
PSTR messageBuffer = nullptr;
HMODULE ntdll = GetModuleHandleA("ntdll.dll");
if(!ntdll)
{
// ???
qWarning() << "GetModuleHandleA returned nullptr for ntdll.dll";
return false;
}
auto messageSize = FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_FROM_HMODULE | FORMAT_MESSAGE_IGNORE_INSERTS,
ntdll,
code,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
reinterpret_cast<PSTR>(&messageBuffer),
0,
nullptr
);
bool hasDescription = messageSize > 0;
if(hasDescription)
{
description = std::string(messageBuffer, messageSize);
}
if(messageBuffer)
{
LocalFree(messageBuffer);
}
return hasCodeName || hasDescription;
}
| 1,025 |
1,350 | <filename>sdk/machinelearningservices/azure-resourcemanager-machinelearningservices/src/main/java/com/azure/resourcemanager/machinelearningservices/models/OperationStatus.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.machinelearningservices.models;
import com.azure.core.util.ExpandableStringEnum;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.util.Collection;
/** Defines values for OperationStatus. */
public final class OperationStatus extends ExpandableStringEnum<OperationStatus> {
/** Static value InProgress for OperationStatus. */
public static final OperationStatus IN_PROGRESS = fromString("InProgress");
/** Static value Succeeded for OperationStatus. */
public static final OperationStatus SUCCEEDED = fromString("Succeeded");
/** Static value CreateFailed for OperationStatus. */
public static final OperationStatus CREATE_FAILED = fromString("CreateFailed");
/** Static value StartFailed for OperationStatus. */
public static final OperationStatus START_FAILED = fromString("StartFailed");
/** Static value StopFailed for OperationStatus. */
public static final OperationStatus STOP_FAILED = fromString("StopFailed");
/** Static value RestartFailed for OperationStatus. */
public static final OperationStatus RESTART_FAILED = fromString("RestartFailed");
/** Static value ReimageFailed for OperationStatus. */
public static final OperationStatus REIMAGE_FAILED = fromString("ReimageFailed");
/** Static value DeleteFailed for OperationStatus. */
public static final OperationStatus DELETE_FAILED = fromString("DeleteFailed");
/**
* Creates or finds a OperationStatus from its string representation.
*
* @param name a name to look for.
* @return the corresponding OperationStatus.
*/
@JsonCreator
public static OperationStatus fromString(String name) {
return fromString(name, OperationStatus.class);
}
/** @return known OperationStatus values. */
public static Collection<OperationStatus> values() {
return values(OperationStatus.class);
}
}
| 644 |
487 | // Copyright 2021 The Verible Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef VERIBLE_COMMON_LSP_JSON_RPC_DISPATCHER_H
#define VERIBLE_COMMON_LSP_JSON_RPC_DISPATCHER_H
#include <functional>
#include <map>
#include <sstream>
#include <string>
#include <unordered_map>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "nlohmann/json.hpp"
namespace verible {
namespace lsp {
// A Dispatcher that is fed JSON as string, parses them to json objects and
// dispatches the contained method call to pre-registered handlers.
// Results of RPCCallHandlers are wrapped in a json rpc response object
// and written out to the provided write function.
//
// This implements the JSON RPC specification [1].
//
// All receiving (call to DispatchMessage()) and writing of response (WriteFun)
// is abstracted out to make the dispatcher agnostic of the transport layer.
//
// The RPCHandlers take and return json objects, but since nlohmann::json
// provides ways to auto-convert objects to json, it is possible to
// register properly typed handlers. To create the boilerplate for custom
// types and their conversion, simplest is to use a code generator such
// as jcxxgen [2].
//
// With that, you then can register fully typed handlers with seamless
// conversion
// dispatcer.AddRequestHandler("MyMethod",
// [](const MyParamType &p) -> MyResponseType {
// return doSomething(p);
// });
//
// [1]: https://www.jsonrpc.org/specification
// [2]: https://github.com/hzeller/jcxxgen
class JsonRpcDispatcher {
public:
// Magic constants defined in https://www.jsonrpc.org/specification
static constexpr int kParseError = -32700;
static constexpr int kMethodNotFound = -32601;
static constexpr int kInternalError = -32603;
// A notification receives a request, but does not return anything
using RPCNotification = std::function<void(const nlohmann::json &r)>;
// A RPC call receives a request and returns a response.
// If we ever have a meaningful set of error conditions to convey, maybe
// change this to absl::StatusOr<nlohmann::json> as return value.
using RPCCallHandler = std::function<nlohmann::json(const nlohmann::json &)>;
// A function of type WriteFun is called by the dispatcher to send the
// string-formatted json response. The user of the JsonRpcDispatcher then
// can wire that to the underlying transport.
using WriteFun = std::function<void(absl::string_view response)>;
// Some statistical counters of method calls or exceptions encountered.
using StatsMap = std::map<std::string, int>;
// Responses are written using the "out" write function.
explicit JsonRpcDispatcher(const WriteFun &out) : write_fun_(out) {}
JsonRpcDispatcher(const JsonRpcDispatcher &) = delete;
// Add a request handler for RPC calls that receive data and send a response.
// Returns successful registration, false if that name is already registered.
bool AddRequestHandler(const std::string &method_name,
const RPCCallHandler &fun) {
return handlers_.insert({method_name, fun}).second;
}
// Add a request handler for RPC Notifications, that are receive-only events.
// Returns successful registration, false if that name is already registered.
bool AddNotificationHandler(const std::string &method_name,
const RPCNotification &fun) {
return notifications_.insert({method_name, fun}).second;
}
// Dispatch incoming message, a string view with json data.
// Call this with the content of exactly one message.
// If this is an RPC call, response will call WriteFun.
void DispatchMessage(absl::string_view data);
// Send a notification to the client side. Parameters will be wrapped
// in a JSON-RPC message and pushed out to the WriteFun
void SendNotification(const std::string &method,
const nlohmann::json ¬ification_params);
// Get some human-readable statistical counters of methods called
// and exception messages encountered.
const StatsMap &GetStatCounters() const { return statistic_counters_; }
// Number of exceptions that have been dealt with and turned into error
// messages or ignored depending on the context.
// The counters returned by GetStatsCounters() will report counts by
// exception message.
int exception_count() const { return exception_count_; }
private:
bool CallNotification(const nlohmann::json &req, const std::string &method);
bool CallRequestHandler(const nlohmann::json &req, const std::string &method);
void SendReply(const nlohmann::json &response);
static nlohmann::json CreateError(const nlohmann::json &request, int code,
absl::string_view message);
static nlohmann::json MakeResponse(const nlohmann::json &request,
const nlohmann::json &call_result);
const WriteFun write_fun_;
std::unordered_map<std::string, RPCCallHandler> handlers_;
std::unordered_map<std::string, RPCNotification> notifications_;
int exception_count_ = 0;
StatsMap statistic_counters_;
};
} // namespace lsp
} // namespace verible
#endif // VERIBLE_COMMON_LSP_JSON_RPC_DISPATCHER_H
| 1,807 |
370 | package com.vitco.app.low.triangulate;
import com.vitco.app.util.misc.StringIndexer;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Polygon;
import org.jaitools.media.jai.vectorize.VectorizeDescriptor;
import org.poly2tri.Poly2Tri;
import org.poly2tri.geometry.polygon.PolygonPoint;
import org.poly2tri.triangulation.TriangulationAlgorithm;
import org.poly2tri.triangulation.TriangulationContext;
import org.poly2tri.triangulation.TriangulationPoint;
import org.poly2tri.triangulation.delaunay.DelaunayTriangle;
import javax.media.jai.JAI;
import javax.media.jai.ParameterBlockJAI;
import javax.media.jai.RenderedOp;
import java.awt.image.RenderedImage;
import java.util.*;
/**
* Helper class that converts a grid into triangles.
*
* Uses the surface outline as polygon and then triangulates that.
*
* Allows for optional merging of triangles (this is slow though).
*
* Reference:
* http://code.google.com/p/poly2tri/
*
* Note: The conversion voxel -> polygone with holes uses a very slow implementation
* with heavy resource usage
*/
public final class Grid2TriPolySlow {
// initialize the parameter block
private final static ParameterBlockJAI pb = new ParameterBlockJAI("Vectorize");
static {
pb.setParameter("outsideValues", Collections.singleton(0));
}
// helper - converts "black and white" image into vector representation
@SuppressWarnings("unchecked")
public static Collection<Polygon> doVectorize(RenderedImage src) {
pb.setSource("source0", src);
// Get the desintation image: this is the unmodified source image data
// plus a property for the generated vectors
RenderedOp dest = JAI.create("Vectorize", pb);
pb.removeSources(); // free data (references)
// Get the vectors
Object property = dest.getProperty(VectorizeDescriptor.VECTOR_PROPERTY_NAME);
// Note: this is unchecked (but should be fine) - order doesn't matter
return (Collection<Polygon>)property;
}
// we need only one context for all conversion (faster)
private final static TriangulationContext tcx = Poly2Tri.createContext(TriangulationAlgorithm.DTSweep);
// interpolation value (to avoid duplicate values as poly2tri can't handle those)
public final static float INTERP = 0.000001f;
// helper - method that merges two arraylists at a given point and also interpolates the point
// Note: Assumes that the point only exists once in each list
// Example: (1,2,3,4,5), (6,7,4,9,10), 4 should result in (1,2,3,4,6,7,~4,9,10)
// where the second 4 is interpolated with the interp value from above
// Note: The interpolation is done into the "correct" direction to prevent
// overlap of the areas that are described by the two arraylists
public static ArrayList<PolygonPoint> mergeInterp(ArrayList<PolygonPoint> listA,
ArrayList<PolygonPoint> listB, PolygonPoint p) {
ArrayList<PolygonPoint> result = new ArrayList<PolygonPoint>();
// true once the point was found in the inner list
boolean found = false;
// counter that indicates how many values were inserted from
// the second list after the point was found in it
int count = 0;
// loop over first list
for (int i = 0, size = listA.size(); i < size; i++) {
// add point to new list
PolygonPoint pA = listA.get(i);
result.add(pA);
// check if this is the merge point
if (pA.getX() == p.getX() && pA.getY() == p.getY()) {
// loop over second list
for (PolygonPoint pB : listB) {
if (pB.getX() == p.getX() && pB.getY() == p.getY()) {
// this is the merge point in the inner list
found = true;
} else {
// check if we already found the point in the second list
if (found) {
count++;
result.add(i + count, pB);
} else {
result.add(pB);
}
}
}
// interpolate the second occurrence of the merge point (into the correct direction!)
PolygonPoint refPoint = i + 1 < size ? listA.get(i + 1) : listA.get(0);
double x = pA.getX();
double y = pA.getY();
// interpolate x value if appropriate
if (refPoint.getX() > x) {
x += INTERP;
} else if (refPoint.getX() < x) {
x -= INTERP;
} else {
// interpolate y value
if (refPoint.getY() > y) {
y += INTERP;
} else if (refPoint.getY() < y) {
y -= INTERP;
}
}
// add the interpolated point
result.add(new PolygonPoint(x, y));
}
}
return result;
}
// helper - Merge triangles if they form one big triangle
private static DelaunayTriangle reduce(DelaunayTriangle triA, DelaunayTriangle triB) {
ArrayList<TriangulationPoint> newTri = new ArrayList<TriangulationPoint>();
// compute which values are in both triangles
boolean[] foundA = new boolean[3];
boolean[] foundB = new boolean[3];
int found = 0;
for (int i = 0; i < 3; i++) {
TriangulationPoint p1 = triA.points[i];
newTri.add(p1);
for (int j = 0; j < 3; j++) {
TriangulationPoint p2 = triB.points[j];
if (p1.equals(p2)) {
foundA[i] = true;
foundB[j] = true;
found++;
}
}
}
if (found != 2) {
return null;
}
// create a triangle with four points and check if we can
// merge this into a "real" triangle
// the four point triangle always looks like this: n - f - n - f
for (int i = 0; i < 3; i++) {
if (!foundB[i]) {
if (!foundA[0]) {
newTri.add(2, triB.points[i]);
} else if (!foundA[1]) {
newTri.add(0, triB.points[i]);
} else {
newTri.add(0, newTri.remove(2));
newTri.add(2, triB.points[i]);
}
}
}
// check if we can remove a point
TriangulationPoint p1 = newTri.get(0);
TriangulationPoint p2 = newTri.get(1);
TriangulationPoint p3 = newTri.get(2);
float derivative1 = (p2.getYf() - p1.getYf())/(p2.getXf() - p1.getXf());
float derivative2 = (p3.getYf() - p1.getYf())/(p3.getXf() - p1.getXf());
if (Math.abs(derivative1 - derivative2) < 0.001) {
return new DelaunayTriangle(p1, p3, newTri.get(3));
}
p2 = newTri.get(3);
derivative1 = (p1.getYf() - p2.getYf())/(p1.getXf() - p2.getXf());
derivative2 = (p3.getYf() - p2.getYf())/(p3.getXf() - p2.getXf());
if (Math.abs(derivative1 - derivative2) < 0.001) {
return new DelaunayTriangle(p1, newTri.get(1), p3);
}
return null;
}
// helper - compress the triangles in list (merge what is possible)
private static List<DelaunayTriangle> reduce(List<DelaunayTriangle> toMerge) {
// loop over all entries
for (int i = 0; i < toMerge.size() - 1; i++) {
DelaunayTriangle tri = toMerge.get(i);
// check all neighbours
for (int j = i + 1; j < toMerge.size(); j++) {
DelaunayTriangle triN = toMerge.get(j);
// check if we can merge with the neighbour
DelaunayTriangle merged = reduce(tri, triN);
if (merged != null) {
// set merged triangle and remove neighbour
toMerge.set(i, merged);
toMerge.remove(j);
i--;
break;
}
}
}
return toMerge;
}
// triangulate a polygon
// Note: Since poly2tri has problems with duplicate points in the polygon data
// we need to "fix" that by merging border holes into the polygon outline
// and also merging bordering inside holes. Duplicate points are moved apart
// so that no area intersection is created.
public static ArrayList<DelaunayTriangle> triangulate(Collection<Polygon> polys, boolean triangleReduction) {
ArrayList<DelaunayTriangle> result = new ArrayList<DelaunayTriangle>();
// loop over all polygon (a polygon consists of exterior and interior ring)
for (Polygon poly : polys) {
// stores and manages all seen points
StringIndexer indexer = new StringIndexer();
// stores the "fixed" polygon (zero entry is outside, others are holes)
HashMap<Integer, ArrayList<PolygonPoint>> polygon = new HashMap<Integer, ArrayList<PolygonPoint>>();
// initialize
ArrayList<PolygonPoint> active = new ArrayList<PolygonPoint>();
int activeId = 0;
polygon.put(activeId, active);
// loop over polygon outline (has no clashing points)
Coordinate[] coordinates = poly.getExteriorRing().getCoordinates();
for (int i = 0; i < coordinates.length - 1; i++) {
// add point to list
PolygonPoint point = new PolygonPoint(coordinates[i].x, coordinates[i].y);
active.add(point);
// index the point
indexer.index(point.toString(), activeId);
}
// loop over all holes
for (int n = 0, size = poly.getNumInteriorRing(); n < size; n++) {
// create new active point list
active = new ArrayList<PolygonPoint>();
activeId++;
// not empty iff this point was seen
ArrayList<Integer> seenInList = new ArrayList<Integer>();
ArrayList<PolygonPoint> seenPointsList = new ArrayList<PolygonPoint>();
boolean needToMerge = false;
// loop over all points in this hole
coordinates = poly.getInteriorRingN(n).getCoordinates();
for (int i = 0; i < coordinates.length - 1; i++) {
// add point to list (holes)
PolygonPoint point = new PolygonPoint(coordinates[i].x, coordinates[i].y);
active.add(point);
// check if this needs merging
Integer seenInTmp = indexer.getIndex(point.toString());
if (seenInTmp != null) {
// store all information we need for merging
seenInList.add(seenInTmp);
seenPointsList.add(point);
needToMerge = true;
} else {
// point is unknown, add to index
indexer.index(point.toString(), activeId);
}
}
// merge
if (needToMerge) {
// initial merge (the active list is not stored in "polygon" yet)
// Note: there might be no points indexed yet with activeId (if all points in hole
// were already indexed before!)
int prevSeenIn = seenInList.get(0);
polygon.put(prevSeenIn, mergeInterp(polygon.get(prevSeenIn), active, seenPointsList.get(0)));
indexer.changeIndex(prevSeenIn, activeId);
// merge further seen points
for (int i = 1; i < seenInList.size(); i++) {
// retrieve merge information
Integer seenIn = seenInList.get(i);
PolygonPoint point = seenPointsList.get(i);
// We always merge to lower id. This is required since the lowest id is
// the exterior ring of the polygon.
int mergeTo = Math.min(seenIn, prevSeenIn);
int mergeFrom = Math.max(seenIn, prevSeenIn);
// further merge
polygon.put(mergeTo, mergeInterp(polygon.get(mergeTo), polygon.get(mergeFrom), point));
indexer.changeIndex(mergeTo, mergeFrom);
// update all remaining merges (the index might no longer exist!)
for (int j = i + 1; j < seenInList.size(); j++) {
if (seenInList.get(j) == mergeFrom) {
seenInList.set(j, mergeTo);
}
}
// remove old list
polygon.remove(mergeFrom);
// store the id that we previously merged to (for next merge)
prevSeenIn = mergeTo;
}
} else {
polygon.put(activeId, active);
}
}
// convert to polygon from raw data (zero is always the id that contains the exterior of the polygon)
org.poly2tri.geometry.polygon.Polygon polyR = new org.poly2tri.geometry.polygon.Polygon(polygon.remove(0));
for (ArrayList<PolygonPoint> hole : polygon.values()) {
polyR.addHole(new org.poly2tri.geometry.polygon.Polygon(hole));
}
// do the triangulation and add the triangles
tcx.prepareTriangulation(polyR);
Poly2Tri.triangulate(tcx);
tcx.clear();
if (triangleReduction) {
result.addAll(reduce(polyR.getTriangles()));
} else {
result.addAll(polyR.getTriangles());
}
}
return result;
}
}
| 6,927 |
1,156 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import random
import unittest
import numpy as np
import torch
from reagent.core import types as rlt
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.ope_adapter import (
OPEstimatorAdapter,
SequentialOPEstimatorAdapter,
)
from reagent.ope.estimators.contextual_bandits_estimators import (
DMEstimator,
DoublyRobustEstimator,
IPSEstimator,
SwitchDREstimator,
SwitchEstimator,
)
from reagent.ope.estimators.sequential_estimators import (
DoublyRobustEstimator as SeqDREstimator,
EpsilonGreedyRLPolicy,
RandomRLPolicy,
RLEstimatorInput,
)
from reagent.ope.estimators.types import Action, ActionSpace
from reagent.ope.test.envs import PolicyLogGenerator
from reagent.ope.test.gridworld import GridWorld, NoiseGridWorldModel
from reagent.ope.trainers.rl_tabular_trainers import (
DPTrainer,
DPValueFunction,
TabularPolicy,
)
from reagent.test.evaluation.test_evaluation_data_page import (
FakeSeq2SlateRewardNetwork,
FakeSeq2SlateTransformerNet,
)
logger = logging.getLogger(__name__)
def rlestimator_input_to_edp(
input: RLEstimatorInput, num_actions: int
) -> EvaluationDataPage:
mdp_ids = []
logged_propensities = []
logged_rewards = []
action_mask = []
model_propensities = []
model_values = []
for mdp in input.log:
mdp_id = len(mdp_ids)
for t in mdp:
mdp_ids.append(mdp_id)
logged_propensities.append(t.action_prob)
logged_rewards.append(t.reward)
assert t.action is not None
action_mask.append(
[1 if x == t.action.value else 0 for x in range(num_actions)]
)
assert t.last_state is not None
model_propensities.append(
[
input.target_policy(t.last_state)[Action(x)]
for x in range(num_actions)
]
)
assert input.value_function is not None
model_values.append(
[
input.value_function(t.last_state, Action(x))
for x in range(num_actions)
]
)
return EvaluationDataPage(
mdp_id=torch.tensor(mdp_ids).reshape(len(mdp_ids), 1),
logged_propensities=torch.tensor(logged_propensities).reshape(
(len(logged_propensities), 1)
),
logged_rewards=torch.tensor(logged_rewards).reshape((len(logged_rewards), 1)),
action_mask=torch.tensor(action_mask),
model_propensities=torch.tensor(model_propensities),
model_values=torch.tensor(model_values),
sequence_number=torch.tensor([]),
model_rewards=torch.tensor([]),
model_rewards_for_logged_action=torch.tensor([]),
)
class TestOPEModuleAlgs(unittest.TestCase):
GAMMA = 0.9
CPE_PASS_BAR = 1.0
CPE_MAX_VALUE = 2.0
MAX_HORIZON = 1000
NOISE_EPSILON = 0.3
EPISODES = 2
def test_gridworld_sequential_adapter(self):
"""
Create a gridworld environment, logging policy, and target policy
Evaluates target policy using the direct OPE sequential doubly robust estimator,
then transforms the log into an evaluation data page which is passed to the ope adapter.
This test is meant to verify the adaptation of EDPs into RLEstimatorInputs as employed
by ReAgent since ReAgent provides EDPs to Evaluators. Going from EDP -> RLEstimatorInput
is more involved than RLEstimatorInput -> EDP since the EDP does not store the state
at each timestep in each MDP, only the corresponding logged outputs & model outputs.
Thus, the adapter must do some tricks to represent these timesteps as states so the
ope module can extract the correct outputs.
Note that there is some randomness in the model outputs since the model is purposefully
noisy. However, the same target policy is being evaluated on the same logged walks through
the gridworld, so the two results should be close in value (within 1).
"""
random.seed(0)
np.random.seed(0)
torch.random.manual_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else None
gridworld = GridWorld.from_grid(
[
["s", "0", "0", "0", "0"],
["0", "0", "0", "W", "0"],
["0", "0", "0", "0", "0"],
["0", "W", "0", "0", "0"],
["0", "0", "0", "0", "g"],
],
max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
)
action_space = ActionSpace(4)
opt_policy = TabularPolicy(action_space)
trainer = DPTrainer(gridworld, opt_policy)
value_func = trainer.train(gamma=TestOPEModuleAlgs.GAMMA)
behavivor_policy = RandomRLPolicy(action_space)
target_policy = EpsilonGreedyRLPolicy(
opt_policy, TestOPEModuleAlgs.NOISE_EPSILON
)
model = NoiseGridWorldModel(
gridworld,
action_space,
epsilon=TestOPEModuleAlgs.NOISE_EPSILON,
max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
)
value_func = DPValueFunction(target_policy, model, TestOPEModuleAlgs.GAMMA)
ground_truth = DPValueFunction(
target_policy, gridworld, TestOPEModuleAlgs.GAMMA
)
log = []
log_generator = PolicyLogGenerator(gridworld, behavivor_policy)
num_episodes = TestOPEModuleAlgs.EPISODES
for state in gridworld.states:
for _ in range(num_episodes):
log.append(log_generator.generate_log(state))
estimator_input = RLEstimatorInput(
gamma=TestOPEModuleAlgs.GAMMA,
log=log,
target_policy=target_policy,
value_function=value_func,
ground_truth=ground_truth,
)
edp = rlestimator_input_to_edp(estimator_input, len(model.action_space))
dr_estimator = SeqDREstimator(
weight_clamper=None, weighted=False, device=device
)
module_results = SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate(
dr_estimator.evaluate(estimator_input)
)
adapter_results = SequentialOPEstimatorAdapter(
dr_estimator, TestOPEModuleAlgs.GAMMA, device=device
).estimate(edp)
self.assertAlmostEqual(
adapter_results.raw,
module_results.raw,
delta=TestOPEModuleAlgs.CPE_PASS_BAR,
), f"OPE adapter results differed too much from underlying module (Diff: {abs(adapter_results.raw - module_results.raw)} > {TestOPEModuleAlgs.CPE_PASS_BAR})"
self.assertLess(
adapter_results.raw, TestOPEModuleAlgs.CPE_MAX_VALUE
), f"OPE adapter results are too large ({adapter_results.raw} > {TestOPEModuleAlgs.CPE_MAX_VALUE})"
def test_seq2slate_eval_data_page(self):
"""
Create 3 slate ranking logs and evaluate using Direct Method, Inverse
Propensity Scores, and Doubly Robust.
The logs are as follows:
state: [1, 0, 0], [0, 1, 0], [0, 0, 1]
indices in logged slates: [3, 2], [3, 2], [3, 2]
model output indices: [2, 3], [3, 2], [2, 3]
logged reward: 4, 5, 7
logged propensities: 0.2, 0.5, 0.4
predicted rewards on logged slates: 2, 4, 6
predicted rewards on model outputted slates: 1, 4, 5
predicted propensities: 0.4, 0.3, 0.7
When eval_greedy=True:
Direct Method uses the predicted rewards on model outputted slates.
Thus the result is expected to be (1 + 4 + 5) / 3
Inverse Propensity Scores would scale the reward by 1.0 / logged propensities
whenever the model output slate matches with the logged slate.
Since only the second log matches with the model output, the IPS result
is expected to be 5 / 0.5 / 3
Doubly Robust is the sum of the direct method result and propensity-scaled
reward difference; the latter is defined as:
1.0 / logged_propensities * (logged reward - predicted reward on logged slate)
* Indicator(model slate == logged slate)
Since only the second logged slate matches with the model outputted slate,
the DR result is expected to be (1 + 4 + 5) / 3 + 1.0 / 0.5 * (5 - 4) / 3
When eval_greedy=False:
Only Inverse Propensity Scores would be accurate. Because it would be too
expensive to compute all possible slates' propensities and predicted rewards
for Direct Method.
The expected IPS = (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3
"""
batch_size = 3
state_dim = 3
src_seq_len = 2
tgt_seq_len = 2
candidate_dim = 2
reward_net = FakeSeq2SlateRewardNetwork()
seq2slate_net = FakeSeq2SlateTransformerNet()
src_seq = torch.eye(candidate_dim).repeat(batch_size, 1, 1)
tgt_out_idx = torch.LongTensor([[3, 2], [3, 2], [3, 2]])
tgt_out_seq = src_seq[
torch.arange(batch_size).repeat_interleave(tgt_seq_len),
tgt_out_idx.flatten() - 2,
].reshape(batch_size, tgt_seq_len, candidate_dim)
ptb = rlt.PreprocessedRankingInput(
state=rlt.FeatureData(float_features=torch.eye(state_dim)),
src_seq=rlt.FeatureData(float_features=src_seq),
tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq),
src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len),
tgt_out_idx=tgt_out_idx,
tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]),
slate_reward=torch.tensor([4.0, 5.0, 7.0]),
extras=rlt.ExtraData(
sequence_number=torch.tensor([0, 0, 0]),
mdp_id=np.array(["0", "1", "2"]),
),
)
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=True
)
logger.info("---------- Start evaluating eval_greedy=True -----------------")
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
switch_estimator = OPEstimatorAdapter(SwitchEstimator())
switch_dr_estimator = OPEstimatorAdapter(SwitchDREstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
# Verify that Switch with low exponent is equivalent to IPS
switch_ips = switch_estimator.estimate(edp, exp_base=1)
# Verify that Switch with no candidates is equivalent to DM
switch_dm = switch_estimator.estimate(edp, candidates=0)
# Verify that SwitchDR with low exponent is equivalent to DR
switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1)
# Verify that SwitchDR with no candidates is equivalent to DM
switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0)
logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}")
avg_logged_reward = (4 + 5 + 7) / 3
self.assertAlmostEqual(direct_method.raw, (1 + 4 + 5) / 3, delta=1e-6)
self.assertAlmostEqual(
direct_method.normalized, direct_method.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(inverse_propensity.raw, 5 / 0.5 / 3, delta=1e-6)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
self.assertAlmostEqual(
doubly_robust.raw, direct_method.raw + 1 / 0.5 * (5 - 4) / 3, delta=1e-6
)
self.assertAlmostEqual(
doubly_robust.normalized, doubly_robust.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(switch_ips.raw, inverse_propensity.raw, delta=1e-6)
self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6)
logger.info("---------- Finish evaluating eval_greedy=True -----------------")
logger.info("---------- Start evaluating eval_greedy=False -----------------")
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb, eval_greedy=False
)
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
self.assertAlmostEqual(
inverse_propensity.raw,
(0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3,
delta=1e-6,
)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
logger.info("---------- Finish evaluating eval_greedy=False -----------------")
| 6,122 |
1,346 | <reponame>wwjiang007/dal
package com.ctrip.platform.dal.dao.datasource.read;
import com.ctrip.platform.dal.common.enums.SqlType;
import com.ctrip.platform.dal.dao.datasource.read.param.*;
import com.ctrip.platform.dal.dao.helper.SqlUtils;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.*;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
public class GroupPreparedStatement extends GroupStatement implements PreparedStatement {
private int autoGeneratedKeys = -1;
private int[] columnIndexes;
private String[] columnNames;
private List<ParamContext> params = new ArrayList<ParamContext>();
private List<List<ParamContext>> pstBatchedArgs;
private String sql;
public GroupPreparedStatement(GroupConnection connection, String sql) {
super(connection);
this.sql = sql;
}
@Override
public ResultSet executeQuery() throws SQLException {
checkClosed();
closeCurrentResultSet();
Connection conn = this.groupConnection.getRealConnection(sql, false);
return executeQueryOnConnection(conn, sql);
}
private ResultSet executeQueryOnConnection(Connection conn, String sql) throws SQLException {
PreparedStatement pstmt = createPreparedStatementInternal(conn, sql);
setParams(pstmt);
this.currentResultSet = new GroupResultSet(pstmt.executeQuery());
return this.currentResultSet;
}
protected void setParams(PreparedStatement pstmt) throws SQLException {
for (ParamContext paramContext : params) {
paramContext.setParam(pstmt);
}
}
@Override
public int executeUpdate() throws SQLException {
checkClosed();
closeCurrentResultSet();
Connection conn = this.groupConnection.getRealConnection(sql, true);
updateCount = executeUpdateOnConnection(conn);
return updateCount;
}
private int executeUpdateOnConnection(final Connection conn) throws SQLException {
PreparedStatement pstmt = createPreparedStatementInternal(conn, sql);
setParams(pstmt);
return pstmt.executeUpdate();
}
@Override
public void setNull(int parameterIndex, int sqlType) throws SQLException {
params.add(new NullParamContext(parameterIndex, new Object[] { sqlType }));
}
@Override
public void setBoolean(int parameterIndex, boolean x) throws SQLException {
params.add(new BooleanParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setByte(int parameterIndex, byte x) throws SQLException {
params.add(new ByteParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setShort(int parameterIndex, short x) throws SQLException {
params.add(new ShortParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setInt(int parameterIndex, int x) throws SQLException {
params.add(new IntParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setLong(int parameterIndex, long x) throws SQLException {
params.add(new LongParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setFloat(int parameterIndex, float x) throws SQLException {
params.add(new FloatParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setDouble(int parameterIndex, double x) throws SQLException {
params.add(new DoubleParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
params.add(new BigDecimalParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setString(int parameterIndex, String x) throws SQLException {
params.add(new StringParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setBytes(int parameterIndex, byte[] x) throws SQLException {
params.add(new ByteArrayParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setDate(int parameterIndex, Date x) throws SQLException {
params.add(new DateParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setTime(int parameterIndex, Time x) throws SQLException {
params.add(new TimeParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
params.add(new TimestampParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
params.add(new AsciiParamContext(parameterIndex, new Object[] { x, length }));
}
@Override
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
params.add(new AsciiParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
params.add(new BinaryStreamParamContext(parameterIndex, new Object[] { x, length }));
}
@Override
public void clearParameters() throws SQLException {
params.clear();
}
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
params.add(new ObjectParamContext(parameterIndex, new Object[] { x, targetSqlType }));
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
params.add(new ObjectParamContext(parameterIndex, new Object[] { x }));
}
@Override
public boolean execute() throws SQLException {
SqlType sqlType = SqlUtils.getSqlType(sql);
if (sqlType.isQuery()) {
executeQuery();
return true;
} else {
this.updateCount = executeUpdate();
return false;
}
}
@Override
public void addBatch() throws SQLException {
if (pstBatchedArgs == null) {
pstBatchedArgs = new ArrayList<List<ParamContext>>();
}
List<ParamContext> newArgs = new ArrayList<ParamContext>(params.size());
newArgs.addAll(params);
params.clear();
pstBatchedArgs.add(newArgs);
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
params.add(new CharacterStreamParamContext(parameterIndex, new Object[] { reader, length }));
}
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
params.add(new RefParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setBlob(int parameterIndex, Blob x) throws SQLException {
params.add(new BlobParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setClob(int parameterIndex, Clob x) throws SQLException {
params.add(new ClobParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setArray(int parameterIndex, Array x) throws SQLException {
params.add(new ArrayParamContext(parameterIndex, new Object[] { x }));
}
@Override
public ResultSetMetaData getMetaData() throws SQLException {
throw new UnsupportedOperationException("dal does not support getMetaData");
}
public void setColumnIndexes(int[] columnIndexes) {
this.columnIndexes = columnIndexes;
}
public void setColumnNames(String[] columnNames) {
this.columnNames = columnNames;
}
@Override
public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
params.add(new DateParamContext(parameterIndex, new Object[] { x, cal }));
}
@Override
public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
params.add(new TimeParamContext(parameterIndex, new Object[] { x, cal }));
}
@Override
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
params.add(new TimestampParamContext(parameterIndex, new Object[] { x, cal }));
}
@Override
public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
params.add(new NullParamContext(parameterIndex, new Object[] { sqlType, typeName }));
}
@Override
public void setURL(int parameterIndex, URL x) throws SQLException {
params.add(new URLParamContext(parameterIndex, new Object[] { x }));
}
@Override
public ParameterMetaData getParameterMetaData() throws SQLException {
throw new UnsupportedOperationException("dal does not support getParameterMetaData");
}
@Override
public void setRowId(int parameterIndex, RowId x) throws SQLException {
params.add(new RowIdParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setNString(int parameterIndex, String value) throws SQLException {
params.add(new NStringParamContext(parameterIndex, new Object[] { value }));
}
@Override
public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
params.add(new NCharacterStreamParamContext(parameterIndex, new Object[] { value, length }));
}
@Override
public void setNClob(int parameterIndex, NClob value) throws SQLException {
params.add(new NClobParamContext(parameterIndex, new Object[] { value }));
}
@Override
public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
params.add(new ClobParamContext(parameterIndex, new Object[] { reader, length }));
}
public void setResultSetHoldability(int resultSetHoldability) {
this.resultSetHoldability = resultSetHoldability;
}
@Override
public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
params.add(new BlobParamContext(parameterIndex, new Object[] { inputStream, length }));
}
@Override
public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
params.add(new NClobParamContext(parameterIndex, new Object[] { reader, length }));
}
@Override
public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
params.add(new SQLXMLParamContext(parameterIndex, new Object[] { xmlObject }));
}
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
params.add(new ObjectParamContext(parameterIndex, new Object[] { x, targetSqlType, scaleOrLength }));
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
params.add(new AsciiParamContext(parameterIndex, new Object[] { x, length }));
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
params.add(new BinaryStreamParamContext(parameterIndex, new Object[] { x, length }));
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
params.add(new CharacterStreamParamContext(parameterIndex, new Object[] { reader, length }));
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
params.add(new AsciiParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
params.add(new BinaryStreamParamContext(parameterIndex, new Object[] { x }));
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
params.add(new CharacterStreamParamContext(parameterIndex, new Object[] { reader }));
}
@Override
public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
params.add(new NCharacterStreamParamContext(parameterIndex, new Object[] { value }));
}
@Override
public void setClob(int parameterIndex, Reader reader) throws SQLException {
params.add(new ClobParamContext(parameterIndex, new Object[] { reader }));
}
@Override
public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
params.add(new BlobParamContext(parameterIndex, new Object[] { inputStream }));
}
@Override
public void setNClob(int parameterIndex, Reader reader) throws SQLException {
params.add(new NClobParamContext(parameterIndex, new Object[] { reader }));
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
checkClosed();
closeCurrentResultSet();
Connection conn = this.groupConnection.getRealConnection(sql, false);
return executeQueryOnConnection(conn, sql);
}
@Override
public int[] executeBatch() throws SQLException {
try {
checkClosed();
closeCurrentResultSet();
if (pstBatchedArgs == null || pstBatchedArgs.isEmpty()) {
return new int[0];
}
Connection conn = this.groupConnection.getRealConnection(sql, true);
return executeBatchOnConnection(conn);
} finally {
if (pstBatchedArgs != null) {
pstBatchedArgs.clear();
}
}
}
private int[] executeBatchOnConnection(Connection conn) throws SQLException {
PreparedStatement pstmt = createPreparedStatementInternal(conn, sql);
for (List<ParamContext> tmpParams : pstBatchedArgs) {
setBatchParams(pstmt, tmpParams);
pstmt.addBatch();
}
return pstmt.executeBatch();
}
private void setBatchParams(PreparedStatement pstmt, List<ParamContext> params) throws SQLException {
for (ParamContext param : params) {
param.setParam(pstmt);
}
}
private PreparedStatement createPreparedStatementInternal(Connection conn, String sql) throws SQLException {
PreparedStatement pstmt;
if (autoGeneratedKeys != -1) {
pstmt = conn.prepareStatement(sql, autoGeneratedKeys);
} else if (columnIndexes != null) {
pstmt = conn.prepareStatement(sql, columnIndexes);
} else if (columnNames != null) {
pstmt = conn.prepareStatement(sql, columnNames);
} else {
int resultSetHoldability = this.resultSetHoldability;
if (resultSetHoldability == -1) {
resultSetHoldability = conn.getHoldability();
}
pstmt = conn.prepareStatement(sql, this.resultSetType, this.resultSetConcurrency, resultSetHoldability);
}
pstmt.setQueryTimeout(queryTimeout);
pstmt.setFetchSize(fetchSize);
pstmt.setMaxRows(maxRows);
setInnerStatement(pstmt);
return pstmt;
}
public void setAutoGeneratedKeys(int autoGeneratedKeys) {
this.autoGeneratedKeys = autoGeneratedKeys;
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
return null;
}
}
| 5,622 |
14,668 | <gh_stars>1000+
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/base/datagram_buffer.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace net {
namespace test {
const size_t kMaxBufferSize = 1024;
class DatagramBufferTest : public testing::Test {
public:
DatagramBufferTest() : pool_(kMaxBufferSize) {}
DatagramBufferPool pool_;
};
TEST_F(DatagramBufferTest, EnqueueCopiesData) {
DatagramBuffers buffers;
const char data[] = "foo";
pool_.Enqueue(data, sizeof(data), &buffers);
EXPECT_EQ(sizeof(data), buffers.front()->length());
EXPECT_NE(data, buffers.front()->data());
EXPECT_EQ(0, memcmp(data, buffers.front()->data(), sizeof(data)));
}
TEST_F(DatagramBufferTest, DatgramBufferPoolRecycles) {
DatagramBuffers buffers;
const char data1[] = "foo";
pool_.Enqueue(data1, sizeof(data1), &buffers);
DatagramBuffer* buffer1_ptr = buffers.back().get();
EXPECT_EQ(1u, buffers.size());
const char data2[] = "bar";
pool_.Enqueue(data2, sizeof(data2), &buffers);
DatagramBuffer* buffer2_ptr = buffers.back().get();
EXPECT_EQ(2u, buffers.size());
pool_.Dequeue(&buffers);
EXPECT_EQ(0u, buffers.size());
const char data3[] = "baz";
pool_.Enqueue(data3, sizeof(data3), &buffers);
EXPECT_EQ(1u, buffers.size());
EXPECT_EQ(buffer1_ptr, buffers.back().get());
const char data4[] = "bag";
pool_.Enqueue(data4, sizeof(data4), &buffers);
EXPECT_EQ(2u, buffers.size());
EXPECT_EQ(buffer2_ptr, buffers.back().get());
}
} // namespace test
} // namespace net
| 594 |
739 | {
"name": "<NAME>",
"img": "https://olvanotjeanclaude.github.io/PHOTOS/olvanot.jpg",
"email":"",
"links": {
"website": "https://olvanotjeanclaude.github.io/",
"linkedin": "https://www.linkedin.com/in/rakotonirina-olvanot-jean-claude-627a69204/",
"github": "https://github.com/olvanotjeanclaude"
},
"jobTitle": "Full stack developer",
"location": {
"city": "Bursa",
"state": "",
"country": "Turkey"
}
} | 235 |
1,014 | <filename>nn-core/src/main/java/com/github/neuralnetworks/builder/layer/SigmoidLayerBuilder.java
package com.github.neuralnetworks.builder.layer;
import com.github.neuralnetworks.architecture.Layer;
import com.github.neuralnetworks.architecture.NeuralNetworkImpl;
import com.github.neuralnetworks.builder.layer.structure.NamedSingleInputLayerBuilder;
import com.github.neuralnetworks.training.Hyperparameters;
/**
* @author tmey
*/
public class SigmoidLayerBuilder extends NamedSingleInputLayerBuilder
{
public static String DEFAULT_LAYER_NAME = "SigmoidLayer";
public SigmoidLayerBuilder()
{
super(DEFAULT_LAYER_NAME);
}
@Override
protected Layer build(NeuralNetworkImpl neuralNetwork, String newLayerName, Layer inputLayer, Hyperparameters hyperparameters)
{
// TODO write content
return null;
}
@Override
public String toString()
{
final StringBuilder sb = new StringBuilder("SigmoidLayerBuilder{");
sb.append("name=").append(getName());
sb.append(", inputLayer=").append(getInputLayerName());
sb.append('}');
return sb.toString();
}
}
| 351 |
3,269 | <reponame>Sourav692/FAANG-Interview-Preparation<filename>Algo and DSA/LeetCode-Solutions-master/Python/exam-room.py
# Time: seat: O(logn), amortized
# leave: O(logn)
# Space: O(n)
import heapq
class ExamRoom(object):
def __init__(self, N):
"""
:type N: int
"""
self.__num = N
self.__seats = {-1: [-1, self.__num], self.__num: [-1, self.__num]}
self.__max_heap = [(-self.__distance((-1, self.__num)), -1, self.__num)]
def seat(self):
"""
:rtype: int
"""
while self.__max_heap[0][1] not in self.__seats or \
self.__max_heap[0][2] not in self.__seats or \
self.__seats[self.__max_heap[0][1]][1] != self.__max_heap[0][2] or \
self.__seats[self.__max_heap[0][2]][0] != self.__max_heap[0][1]:
heapq.heappop(self.__max_heap) # lazy deletion
_, left, right = heapq.heappop(self.__max_heap)
mid = 0 if left == -1 \
else self.__num-1 if right == self.__num \
else (left+right) // 2
self.__seats[mid] = [left, right]
heapq.heappush(self.__max_heap, (-self.__distance((left, mid)), left, mid))
heapq.heappush(self.__max_heap, (-self.__distance((mid, right)), mid, right))
self.__seats[left][1] = mid
self.__seats[right][0] = mid
return mid
def leave(self, p):
"""
:type p: int
:rtype: void
"""
left, right = self.__seats[p]
self.__seats.pop(p)
self.__seats[left][1] = right
self.__seats[right][0] = left
heapq.heappush(self.__max_heap, (-self.__distance((left, right)), left, right))
def __distance(self, segment):
return segment[1]-segment[0]-1 if segment[0] == -1 or segment[1] == self.__num \
else (segment[1]-segment[0]) // 2
| 970 |
1,292 | from scipy import stats
from tabpy.models.utils import setup_utils
def ttest(_arg1, _arg2):
"""
T-Test is a statistical hypothesis test that is used to compare
two sample means or a sample’s mean against a known population mean.
For more information on the function and how to use it please refer
to tabpy-tools.md
"""
# one sample test with mean
if len(_arg2) == 1:
test_stat, p_value = stats.ttest_1samp(_arg1, _arg2)
return p_value
# two sample t-test where _arg1 is numeric and _arg2 is a binary factor
elif len(set(_arg2)) == 2:
# each sample in _arg1 needs to have a corresponding classification
# in _arg2
if not (len(_arg1) == len(_arg2)):
raise ValueError
class1, class2 = set(_arg2)
sample1 = []
sample2 = []
for i in range(len(_arg1)):
if _arg2[i] == class1:
sample1.append(_arg1[i])
else:
sample2.append(_arg1[i])
test_stat, p_value = stats.ttest_ind(sample1, sample2, equal_var=False)
return p_value
# arg1 is a sample and arg2 is a sample
else:
test_stat, p_value = stats.ttest_ind(_arg1, _arg2, equal_var=False)
return p_value
if __name__ == "__main__":
setup_utils.deploy_model("ttest", ttest, "Returns the p-value form a t-test")
| 597 |
323 | from parsl.config import Config
from parsl.data_provider.file_noop import NoOpFileStaging
from parsl.data_provider.ftp import FTPInTaskStaging
from parsl.executors.threads import ThreadPoolExecutor
config = Config(
executors=[
ThreadPoolExecutor(
label='local_threads_http_in_task',
storage_access=[FTPInTaskStaging(), NoOpFileStaging()]
)
]
)
| 159 |
4,050 | <gh_stars>1000+
/*
* Copyright 2021 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.container;
import azkaban.execapp.FlowPreparerTestBase;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutorManagerException;
import azkaban.jobtype.javautils.FileUtils;
import azkaban.utils.DependencyTransferManager;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.*;
public class ContainerizedFlowPreparerTest extends FlowPreparerTestBase {
private ContainerizedFlowPreparer instance;
private final File execDir = new File(String.valueOf(
ContainerizedFlowPreparer.getCurrentDir()), ContainerizedFlowPreparer.PROJECT_DIR);
@Before
public void setUp() throws Exception {
this.dependencyTransferManager = mock(DependencyTransferManager.class);
this.instance = spy(
new ContainerizedFlowPreparer(createMockStorageManager(),
this.dependencyTransferManager));
}
@Test
public void testSetupContainerizedFlow() throws ExecutorManagerException {
final ExecutableFlow executableFlow = mock(ExecutableFlow.class);
when(executableFlow.getExecutionId()).thenReturn(12345);
when(executableFlow.getProjectId()).thenReturn(FAT_PROJECT_ID);
when(executableFlow.getVersion()).thenReturn(34);
this.instance.setup(executableFlow);
assertTrue(new File(execDir, SAMPLE_FLOW_01).exists());
}
@After
public void cleanUp() {
FileUtils.deleteFileOrDirectory(execDir);
}
}
| 663 |
32,544 | <reponame>DBatOWL/tutorials
package com.baeldung.resttemplate.json.consumer.service;
import org.junit.Before;
import org.junit.Test;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.test.web.client.ExpectedCount;
import org.springframework.test.web.client.MockRestServiceServer;
import org.springframework.web.client.RestTemplate;
import java.util.Arrays;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.springframework.test.web.client.match.MockRestRequestMatchers.method;
import static org.springframework.test.web.client.match.MockRestRequestMatchers.requestTo;
import static org.springframework.test.web.client.response.MockRestResponseCreators.withStatus;
@SpringBootTest
public class UserConsumerServiceImplUnitTest {
private static String USER_JSON = "[{\"id\":1,\"name\":\"user1\",\"addressList\":[{\"addressLine1\":\"address1_addressLine1\",\"addressLine2\":\"address1_addressLine2\",\"town\":\"address1_town\",\"postCode\":\"user1_address1_postCode\"}," +
"{\"addressLine1\":\"address2_addressLine1\",\"addressLine2\":\"address2_addressLine2\",\"town\":\"address2_town\",\"postCode\":\"user1_address2_postCode\"}]}," +
"{\"id\":2,\"name\":\"user2\",\"addressList\":[{\"addressLine1\":\"address1_addressLine1\",\"addressLine2\":\"address1_addressLine2\",\"town\":\"address1_town\",\"postCode\":\"user2_address1_postCode\"}]}]";
private MockRestServiceServer mockServer;
private final RestTemplate restTemplate = new RestTemplate();
private final UserConsumerService tested = new UserConsumerServiceImpl(restTemplate);
@Before
public void init() {
mockServer = MockRestServiceServer.createServer(restTemplate);
}
@Test
public void whenProcessUserDataAsObjects_thenOK() {
String url = "http://localhost:8080/users";
List<String> expected = Arrays.asList("user1", "user2");
mockServer.expect(ExpectedCount.once(),
requestTo(url))
.andExpect(method(HttpMethod.GET))
.andRespond(withStatus(HttpStatus.OK)
.contentType(MediaType.APPLICATION_JSON)
.body(USER_JSON));
List<String> actual = tested.processUserDataFromObjectArray();
mockServer.verify();
assertThat(actual).containsExactly(expected.get(0), expected.get(1));
}
@Test
public void whenProcessUserDataAsArray_thenOK() {
String url = "http://localhost:8080/users";
List<String> expected = Arrays.asList("user1", "user2");
mockServer.expect(ExpectedCount.once(),
requestTo(url))
.andExpect(method(HttpMethod.GET))
.andRespond(withStatus(HttpStatus.OK)
.contentType(MediaType.APPLICATION_JSON)
.body(USER_JSON));
List<String> actual = tested.processUserDataFromUserArray();
mockServer.verify();
assertThat(actual).containsExactly(expected.get(0), expected.get(1));
}
@Test
public void whenProcessUserDataAsList_thenOK() {
String url = "http://localhost:8080/users";
List<String> expected = Arrays.asList("user1", "user2");
mockServer.expect(ExpectedCount.once(),
requestTo(url))
.andExpect(method(HttpMethod.GET))
.andRespond(withStatus(HttpStatus.OK)
.contentType(MediaType.APPLICATION_JSON)
.body(USER_JSON));
List<String> actual = tested.processUserDataFromUserList();
mockServer.verify();
assertThat(actual).containsExactly(expected.get(0), expected.get(1));
}
@Test
public void whenProcessNestedUserDataFromArray_thenOK() {
String url = "http://localhost:8080/users";
List<String> expected = Arrays.asList("user1_address1_postCode", "user1_address2_postCode", "user2_address1_postCode");
mockServer.expect(ExpectedCount.once(),
requestTo(url))
.andExpect(method(HttpMethod.GET))
.andRespond(withStatus(HttpStatus.OK)
.contentType(MediaType.APPLICATION_JSON)
.body(USER_JSON));
List<String> actual = tested.processNestedUserDataFromUserArray();
mockServer.verify();
assertThat(actual).containsExactly(expected.get(0), expected.get(1), expected.get(2));
}
@Test
public void whenProcessNestedUserDataFromList_thenOK() {
String url = "http://localhost:8080/users";
List<String> expected = Arrays.asList("user1_address1_postCode", "user1_address2_postCode", "user2_address1_postCode");
mockServer.expect(ExpectedCount.once(),
requestTo(url))
.andExpect(method(HttpMethod.GET))
.andRespond(withStatus(HttpStatus.OK)
.contentType(MediaType.APPLICATION_JSON)
.body(USER_JSON));
List<String> actual = tested.processNestedUserDataFromUserList();
mockServer.verify();
assertThat(actual).containsExactly(expected.get(0), expected.get(1), expected.get(2));
}
} | 2,033 |
521 | /**
* @file include/retdec/config/language.h
* @brief Decompilation configuration manipulation: language.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
#ifndef RETDEC_CONFIG_LANGUAGE_H
#define RETDEC_CONFIG_LANGUAGE_H
#include <string>
#include "retdec/config/base.h"
namespace retdec {
namespace config {
/**
* Represents input binary's language.
* Language's name is its unique ID.
*/
class Language
{
public:
explicit Language(const std::string& langName);
static Language fromJsonValue(const Json::Value& val);
Json::Value getJsonValue() const;
/// @name Language query methods.
/// @{
bool isUnknown() const;
bool isKnown() const;
bool isModuleCountSet() const;
bool isBytecode() const;
/// @}
/// @name Language set methods.
/// @{
void setIsUnknown();
void setModuleCount(unsigned c);
void setIsBytecode(bool b);
/// @}
/// @name Language get methods.
/// @{
std::string getId() const;
std::string getName() const;
unsigned getModuleCount() const;
/// @}
bool operator<(const Language& val) const;
bool operator==(const Language& val) const;
private:
/// Unique ID.
std::string _name;
int _moduleCount = -1;
bool _bytecode = false;
};
/**
* Set container with languages' names as unique ID (set key).
* See Language class for details.
*/
class LanguageContainer : public BaseSetContainer<Language>
{
public:
const Language* getFirstBytecode() const;
bool hasLanguage(const std::string& sub) const;
};
} // namespace config
} // namespace retdec
#endif
| 527 |
7,482 | <filename>bsp/stm32/libraries/STM32MPxx_HAL/STM32MP1xx_HAL_Driver/Inc/stm32mp1xx_hal_pwr_ex.h<gh_stars>1000+
/**
******************************************************************************
* @file stm32mp1xx_hal_pwr_ex.h
* @author MCD Application Team
* @brief Header file of PWR HAL Extension module.
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2019 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __STM32MP1xx_HAL_PWR_EX_H
#define __STM32MP1xx_HAL_PWR_EX_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "stm32mp1xx_hal_def.h"
/** @addtogroup STM32MP1xx_HAL_Driver
* @{
*/
/** @addtogroup PWREx
* @{
*/
/* Exported types ------------------------------------------------------------*/
/** @defgroup PWREx_Exported_Types PWREx Exported Types
* @{
*/
/**
* @brief PWREx AVD configuration structure definition
*/
typedef struct
{
uint32_t AVDLevel; /*!< AVDLevel: Specifies the AVD detection level.
This parameter can be a value of @ref PWREx AVD detection level */
uint32_t Mode; /*!< Mode: Specifies the operating mode for the selected pins.
This parameter can be a value of @ref PWREx AVD Mode */
} PWREx_AVDTypeDef;
/**
* @}
*/
/* Exported constants --------------------------------------------------------*/
/** @defgroup PWREx_Exported_Constants PWREx Exported Constants
* @{
*/
/** @defgroup PWREx_Exported_Constants_Group1 PWREx_WakeUp_Pins
* @{
*/
#ifdef CORE_CA7
/* Defines for legacy purpose */
#define PWR_WAKEUP_PIN_MASK PWR_MPUWKUPENR_WKUPEN
#define PWR_WAKEUP_PIN6 PWR_MPUWKUPENR_WKUPEN_6
#define PWR_WAKEUP_PIN5 PWR_MPUWKUPENR_WKUPEN_5
#define PWR_WAKEUP_PIN4 PWR_MPUWKUPENR_WKUPEN_4
#define PWR_WAKEUP_PIN3 PWR_MPUWKUPENR_WKUPEN_3
#define PWR_WAKEUP_PIN2 PWR_MPUWKUPENR_WKUPEN_2
#define PWR_WAKEUP_PIN1 PWR_MPUWKUPENR_WKUPEN_1
/* Defines for legacy purpose */
/* High level + No pull */
#define PWR_WAKEUP_PIN6_HIGH PWR_MPUWKUPENR_WKUPEN_6
#define PWR_WAKEUP_PIN5_HIGH PWR_MPUWKUPENR_WKUPEN_5
#define PWR_WAKEUP_PIN4_HIGH PWR_MPUWKUPENR_WKUPEN_4
#define PWR_WAKEUP_PIN3_HIGH PWR_MPUWKUPENR_WKUPEN_3
#define PWR_WAKEUP_PIN2_HIGH PWR_MPUWKUPENR_WKUPEN_2
#define PWR_WAKEUP_PIN1_HIGH PWR_MPUWKUPENR_WKUPEN_1
/* Low level + No pull */
#define PWR_WAKEUP_PIN6_LOW (uint32_t)(PWR_WKUPCR_WKUPP_6 | PWR_MPUWKUPENR_WKUPEN_6)
#define PWR_WAKEUP_PIN5_LOW (uint32_t)(PWR_WKUPCR_WKUPP_5 | PWR_MPUWKUPENR_WKUPEN_5)
#define PWR_WAKEUP_PIN4_LOW (uint32_t)(PWR_WKUPCR_WKUPP_4 | PWR_MPUWKUPENR_WKUPEN_4)
#define PWR_WAKEUP_PIN3_LOW (uint32_t)(PWR_WKUPCR_WKUPP_3 | PWR_MPUWKUPENR_WKUPEN_3)
#define PWR_WAKEUP_PIN2_LOW (uint32_t)(PWR_WKUPCR_WKUPP_2 | PWR_MPUWKUPENR_WKUPEN_2)
#define PWR_WAKEUP_PIN1_LOW (uint32_t)(PWR_WKUPCR_WKUPP_1 | PWR_MPUWKUPENR_WKUPEN_1)
#endif /*CORE_CA7*/
#ifdef CORE_CM4
/* Defines for legacy purpose */
#define PWR_WAKEUP_PIN_MASK PWR_MCUWKUPENR_WKUPEN
#define PWR_WAKEUP_PIN6 PWR_MCUWKUPENR_WKUPEN6
#define PWR_WAKEUP_PIN5 PWR_MCUWKUPENR_WKUPEN5
#define PWR_WAKEUP_PIN4 PWR_MCUWKUPENR_WKUPEN4
#define PWR_WAKEUP_PIN3 PWR_MCUWKUPENR_WKUPEN3
#define PWR_WAKEUP_PIN2 PWR_MCUWKUPENR_WKUPEN2
#define PWR_WAKEUP_PIN1 PWR_MCUWKUPENR_WKUPEN1
/* Defines for legacy purpose */
/* High level + No pull */
#define PWR_WAKEUP_PIN6_HIGH PWR_MCUWKUPENR_WKUPEN6
#define PWR_WAKEUP_PIN5_HIGH PWR_MCUWKUPENR_WKUPEN5
#define PWR_WAKEUP_PIN4_HIGH PWR_MCUWKUPENR_WKUPEN4
#define PWR_WAKEUP_PIN3_HIGH PWR_MCUWKUPENR_WKUPEN3
#define PWR_WAKEUP_PIN2_HIGH PWR_MCUWKUPENR_WKUPEN2
#define PWR_WAKEUP_PIN1_HIGH PWR_MCUWKUPENR_WKUPEN1
/* Low level + No pull */
#define PWR_WAKEUP_PIN6_LOW (uint32_t)(PWR_WKUPCR_WKUPP_6 | PWR_MCUWKUPENR_WKUPEN6)
#define PWR_WAKEUP_PIN5_LOW (uint32_t)(PWR_WKUPCR_WKUPP_5 | PWR_MCUWKUPENR_WKUPEN5)
#define PWR_WAKEUP_PIN4_LOW (uint32_t)(PWR_WKUPCR_WKUPP_4 | PWR_MCUWKUPENR_WKUPEN4)
#define PWR_WAKEUP_PIN3_LOW (uint32_t)(PWR_WKUPCR_WKUPP_3 | PWR_MCUWKUPENR_WKUPEN3)
#define PWR_WAKEUP_PIN2_LOW (uint32_t)(PWR_WKUPCR_WKUPP_2 | PWR_MCUWKUPENR_WKUPEN2)
#define PWR_WAKEUP_PIN1_LOW (uint32_t)(PWR_WKUPCR_WKUPP_1 | PWR_MCUWKUPENR_WKUPEN1)
#endif /*CORE_CM4*/
/* High level + Pull-up */
#define PWR_WAKEUP_PIN6_HIGH_PULLUP (uint32_t)(PWR_MPUWKUPENR_WKUPEN_6 | PWR_WKUPCR_WKUPPUPD6_0 )
#define PWR_WAKEUP_PIN5_HIGH_PULLUP (uint32_t)(PWR_MPUWKUPENR_WKUPEN_5 | PWR_WKUPCR_WKUPPUPD5_0 )
#define PWR_WAKEUP_PIN4_HIGH_PULLUP (uint32_t)(PWR_MPUWKUPENR_WKUPEN_4 | PWR_WKUPCR_WKUPPUPD4_0 )
#define PWR_WAKEUP_PIN3_HIGH_PULLUP (uint32_t)(PWR_MPUWKUPENR_WKUPEN_3 | PWR_WKUPCR_WKUPPUPD3_0 )
#define PWR_WAKEUP_PIN2_HIGH_PULLUP (uint32_t)(PWR_MPUWKUPENR_WKUPEN_2 | PWR_WKUPCR_WKUPPUPD2_0 )
#define PWR_WAKEUP_PIN1_HIGH_PULLUP (uint32_t)(PWR_MPUWKUPENR_WKUPEN_1 | PWR_WKUPCR_WKUPPUPD1_0 )
/* Low level + Pull-up */
#define PWR_WAKEUP_PIN6_LOW_PULLUP (uint32_t)(PWR_WKUPCR_WKUPP_6 | PWR_MPUWKUPENR_WKUPEN_6 | PWR_WKUPCR_WKUPPUPD6_0)
#define PWR_WAKEUP_PIN5_LOW_PULLUP (uint32_t)(PWR_WKUPCR_WKUPP_5 | PWR_MPUWKUPENR_WKUPEN_5 | PWR_WKUPCR_WKUPPUPD5_0)
#define PWR_WAKEUP_PIN4_LOW_PULLUP (uint32_t)(PWR_WKUPCR_WKUPP_4 | PWR_MPUWKUPENR_WKUPEN_4 | PWR_WKUPCR_WKUPPUPD4_0)
#define PWR_WAKEUP_PIN3_LOW_PULLUP (uint32_t)(PWR_WKUPCR_WKUPP_3 | PWR_MPUWKUPENR_WKUPEN_3 | PWR_WKUPCR_WKUPPUPD3_0)
#define PWR_WAKEUP_PIN2_LOW_PULLUP (uint32_t)(PWR_WKUPCR_WKUPP_2 | PWR_MPUWKUPENR_WKUPEN_2 | PWR_WKUPCR_WKUPPUPD2_0)
#define PWR_WAKEUP_PIN1_LOW_PULLUP (uint32_t)(PWR_WKUPCR_WKUPP_1 | PWR_MPUWKUPENR_WKUPEN_1 | PWR_WKUPCR_WKUPPUPD1_0)
/* High level + Pull-down */
#define PWR_WAKEUP_PIN6_HIGH_PULLDOWN (uint32_t)(PWR_MPUWKUPENR_WKUPEN_6 | PWR_WKUPCR_WKUPPUPD6_1 )
#define PWR_WAKEUP_PIN5_HIGH_PULLDOWN (uint32_t)(PWR_MPUWKUPENR_WKUPEN_5 | PWR_WKUPCR_WKUPPUPD5_1 )
#define PWR_WAKEUP_PIN4_HIGH_PULLDOWN (uint32_t)(PWR_MPUWKUPENR_WKUPEN_4 | PWR_WKUPCR_WKUPPUPD4_1 )
#define PWR_WAKEUP_PIN3_HIGH_PULLDOWN (uint32_t)(PWR_MPUWKUPENR_WKUPEN_3 | PWR_WKUPCR_WKUPPUPD3_1 )
#define PWR_WAKEUP_PIN2_HIGH_PULLDOWN (uint32_t)(PWR_MPUWKUPENR_WKUPEN_2 | PWR_WKUPCR_WKUPPUPD2_1 )
#define PWR_WAKEUP_PIN1_HIGH_PULLDOWN (uint32_t)(PWR_MPUWKUPENR_WKUPEN_1 | PWR_WKUPCR_WKUPPUPD1_1 )
/* Low level + Pull-down */
#define PWR_WAKEUP_PIN6_LOW_PULLDOWN (uint32_t)(PWR_WKUPCR_WKUPP_6 | PWR_MPUWKUPENR_WKUPEN_6 | PWR_WKUPCR_WKUPPUPD6_1)
#define PWR_WAKEUP_PIN5_LOW_PULLDOWN (uint32_t)(PWR_WKUPCR_WKUPP_5 | PWR_MPUWKUPENR_WKUPEN_5 | PWR_WKUPCR_WKUPPUPD5_1)
#define PWR_WAKEUP_PIN4_LOW_PULLDOWN (uint32_t)(PWR_WKUPCR_WKUPP_4 | PWR_MPUWKUPENR_WKUPEN_4 | PWR_WKUPCR_WKUPPUPD4_1)
#define PWR_WAKEUP_PIN3_LOW_PULLDOWN (uint32_t)(PWR_WKUPCR_WKUPP_3 | PWR_MPUWKUPENR_WKUPEN_3 | PWR_WKUPCR_WKUPPUPD3_1)
#define PWR_WAKEUP_PIN2_LOW_PULLDOWN (uint32_t)(PWR_WKUPCR_WKUPP_2 | PWR_MPUWKUPENR_WKUPEN_2 | PWR_WKUPCR_WKUPPUPD2_1)
#define PWR_WAKEUP_PIN1_LOW_PULLDOWN (uint32_t)(PWR_WKUPCR_WKUPP_1 | PWR_MPUWKUPENR_WKUPEN_1 | PWR_WKUPCR_WKUPPUPD1_1)
/**
* @}
*/
/** @defgroup PWREx_Exported_Constants_Group2 PWREx Wakeup Pins Flags
* @{
*/
#define PWR_WAKEUP_PIN_FLAG1 PWR_WKUPFR_WKUPF1 /*!< Wakeup event on pin 1 */
#define PWR_WAKEUP_PIN_FLAG2 PWR_WKUPFR_WKUPF2 /*!< Wakeup event on pin 2 */
#define PWR_WAKEUP_PIN_FLAG3 PWR_WKUPFR_WKUPF3 /*!< Wakeup event on pin 3 */
#define PWR_WAKEUP_PIN_FLAG4 PWR_WKUPFR_WKUPF4 /*!< Wakeup event on pin 4 */
#define PWR_WAKEUP_PIN_FLAG5 PWR_WKUPFR_WKUPF5 /*!< Wakeup event on pin 5 */
#define PWR_WAKEUP_PIN_FLAG6 PWR_WKUPFR_WKUPF6 /*!< Wakeup event on pin 6 */
/**
* @}
*/
/** @defgroup PWREx_Exported_Constants_Group3 PWREx Core definition
* @{
*/
#define PWR_CORE_CPU1 ((uint32_t)0x00)
#define PWR_CORE_CPU2 ((uint32_t)0x01)
/**
* @}
*/
/** @defgroup PWREx_Exported_Constants_Group4 PWREx AVD detection level
* @{
*/
#define PWR_AVDLEVEL_0 PWR_CR1_ALS_LEV0 /* 1.7 V */
#define PWR_AVDLEVEL_1 PWR_CR1_ALS_LEV1 /* 2.1 V */
#define PWR_AVDLEVEL_2 PWR_CR1_ALS_LEV2 /* 2.5 V */
#define PWR_AVDLEVEL_3 PWR_CR1_ALS_LEV3 /* 2.8 V */
/**
* @}
*/
/** @defgroup PWREx_Exported_Constants_Group5 PWREx AVD Mode
* @{
*/
#define PWR_AVD_MODE_NORMAL ((uint32_t)0x00000000U) /*!< Basic mode is used */
#define PWR_AVD_MODE_IT_RISING ((uint32_t)0x00010001U) /*!< External Interrupt Mode with Rising edge trigger detection */
#define PWR_AVD_MODE_IT_FALLING ((uint32_t)0x00010002U) /*!< External Interrupt Mode with Falling edge trigger detection */
#define PWR_AVD_MODE_IT_RISING_FALLING ((uint32_t)0x00010003U) /*!< External Interrupt Mode with Rising/Falling edge trigger detection */
/**
* @}
*/
/** @defgroup PWREx_Exported_Constants_Group6 PWR battery charging resistor selection
* @{
*/
#define PWR_BATTERY_CHARGING_RESISTOR_5 ((uint32_t)0x00000000U) /*!< VBAT charging through a 5 kOhm resistor */
#define PWR_BATTERY_CHARGING_RESISTOR_1_5 PWR_CR3_VBRS /*!< VBAT charging through a 1.5 kOhm resistor */
/**
* @}
*/
/** @defgroup PWREx_Exported_Constants_Group7 PWREx VBAT Thresholds
* @{
*/
#define PWR_VBAT_BETWEEN_HIGH_LOW_THRESHOLD ((uint32_t)0x00000000U)
#define PWR_VBAT_BELOW_LOW_THRESHOLD PWR_CR2_VBATL /*!< Vsw low threshold is ~1.35V */
#define PWR_VBAT_ABOVE_HIGH_THRESHOLD PWR_CR2_VBATH /*!< Vsw high threshold is ~3.6V */
/**
* @}
*/
/** @defgroup PWREx_Exported_Constants_Group8 PWREx Temperature Thresholds
* @{
*/
#define PWR_TEMP_BETWEEN_HIGH_LOW_THRESHOLD ((uint32_t)0x00000000U)
#define PWR_TEMP_BELOW_LOW_THRESHOLD PWR_CR2_TEMPL
#define PWR_TEMP_ABOVE_HIGH_THRESHOLD PWR_CR2_TEMPH
/**
* @}
*/
/**
* @}
*/
/* Exported macro ------------------------------------------------------------*/
/** @defgroup PWREx_Exported_Macro PWREx Exported Macro
* @{
*/
/** @brief Check Wake Up flag is set or not.
* @param __WUFLAG__: specifies the Wake Up flag to check.
* This parameter can be one of the following values:
* @arg PWR_WAKEUP_PIN_FLAG1: Wakeup Pin Flag 1
* @arg PWR_WAKEUP_PIN_FLAG2: Wakeup Pin Flag 2
* @arg PWR_WAKEUP_PIN_FLAG3: Wakeup Pin Flag 3
* @arg PWR_WAKEUP_PIN_FLAG4: Wakeup Pin Flag 4
* @arg PWR_WAKEUP_PIN_FLAG5: Wakeup Pin Flag 5
* @arg PWR_WAKEUP_PIN_FLAG6: Wakeup Pin Flag 6
*/
#define __HAL_PWR_GET_WAKEUP_FLAG(__WUFLAG__) (PWR->WKUPFR & (__WUFLAG__))
/** @brief Clear the WakeUp pins flags.
* @param __WUFLAG__: specifies the Wake Up pin flag to clear.
* This parameter can be one of the following values:
* @arg PWR_WAKEUP_PIN_FLAG1: Wakeup Pin Flag 1
* @arg PWR_WAKEUP_PIN_FLAG2: Wakeup Pin Flag 2
* @arg PWR_WAKEUP_PIN_FLAG3: Wakeup Pin Flag 3
* @arg PWR_WAKEUP_PIN_FLAG4: Wakeup Pin Flag 4
* @arg PWR_WAKEUP_PIN_FLAG5: Wakeup Pin Flag 5
* @arg PWR_WAKEUP_PIN_FLAG6: Wakeup Pin Flag 6
*/
#define __HAL_PWR_CLEAR_WAKEUP_FLAG(__WUFLAG__) SET_BIT(PWR->WKUPCR, (__WUFLAG__))
/**
* @}
*/
/* Exported functions --------------------------------------------------------*/
/** @defgroup PWREx_Exported_Functions PWREx Exported Functions
* @{
*/
/** @defgroup PWREx_Exported_Functions_Group1 Low power control functions
* @{
*/
/* Power core holding functions */
HAL_StatusTypeDef HAL_PWREx_HoldCore(uint32_t CPU);
void HAL_PWREx_ReleaseCore(uint32_t CPU);
/* Power Wakeup PIN IRQ Handler */
void HAL_PWREx_WAKEUP_PIN_IRQHandler(void);
void HAL_PWREx_WKUP1_Callback(void);
void HAL_PWREx_WKUP2_Callback(void);
void HAL_PWREx_WKUP3_Callback(void);
void HAL_PWREx_WKUP4_Callback(void);
void HAL_PWREx_WKUP5_Callback(void);
void HAL_PWREx_WKUP6_Callback(void);
/**
* @}
*/
/** @defgroup PWREx_Exported_Functions_Group2 Peripherals control functions
* @{
*/
/* Backup regulator control functions */
HAL_StatusTypeDef HAL_PWREx_EnableBkUpReg(void);
HAL_StatusTypeDef HAL_PWREx_DisableBkUpReg(void);
/* Retention regulator control functions */
HAL_StatusTypeDef HAL_PWREx_EnableRetReg(void);
HAL_StatusTypeDef HAL_PWREx_DisableRetReg(void);
/* 1V1 regulator control functions */
HAL_StatusTypeDef HAL_PWREx_Enable1V1Reg(void);
HAL_StatusTypeDef HAL_PWREx_Disable1V1Reg(void);
/* 1V8 regulator control functions */
HAL_StatusTypeDef HAL_PWREx_Enable1V8Reg(void);
HAL_StatusTypeDef HAL_PWREx_Disable1V8Reg(void);
/* Battery control functions */
void HAL_PWREx_EnableBatteryCharging(uint32_t ResistorValue);
void HAL_PWREx_DisableBatteryCharging(void);
/**
* @}
*/
/** @defgroup PWREx_Exported_Functions_Group3 Power Monitoring functions
* @{
*/
/* Power VBAT/Temperature monitoring functions */
void HAL_PWREx_EnableMonitoring(void);
void HAL_PWREx_DisableMonitoring(void);
uint32_t HAL_PWREx_GetTemperatureLevel(void);
uint32_t HAL_PWREx_GetVBATLevel(void);
/* USB Voltage level detector functions */
HAL_StatusTypeDef HAL_PWREx_EnableUSBVoltageDetector(void);
HAL_StatusTypeDef HAL_PWREx_DisableUSBVoltageDetector(void);
/* Power AVD configuration functions */
void HAL_PWREx_ConfigAVD(PWREx_AVDTypeDef *sConfigAVD);
void HAL_PWREx_EnableAVD(void);
void HAL_PWREx_DisableAVD(void);
/* Power PVD/AVD IRQ Handler */
void HAL_PWREx_PVD_AVD_IRQHandler(void);
void HAL_PWREx_AVDCallback(void);
/**
* @}
*/
/**
* @}
*/
/* Private types -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
/* Private constants ---------------------------------------------------------*/
/* Private macros ------------------------------------------------------------*/
/** @defgroup PWREx_Private_Macros PWREx Private Macros
* @{
*/
/** @defgroup PWREx_IS_PWR_Definitions PWREx Private macros to check input parameters
* @{
*/
#define IS_PWR_WAKEUP_PIN(__PIN__) (((__PIN__) == PWR_WAKEUP_PIN1) || \
((__PIN__) == PWR_WAKEUP_PIN2) || \
((__PIN__) == PWR_WAKEUP_PIN3) || \
((__PIN__) == PWR_WAKEUP_PIN4) || \
((__PIN__) == PWR_WAKEUP_PIN5) || \
((__PIN__) == PWR_WAKEUP_PIN6) || \
((__PIN__) == PWR_WAKEUP_PIN1_HIGH) || \
((__PIN__) == PWR_WAKEUP_PIN2_HIGH) || \
((__PIN__) == PWR_WAKEUP_PIN3_HIGH) || \
((__PIN__) == PWR_WAKEUP_PIN4_HIGH) || \
((__PIN__) == PWR_WAKEUP_PIN5_HIGH) || \
((__PIN__) == PWR_WAKEUP_PIN6_HIGH) || \
((__PIN__) == PWR_WAKEUP_PIN1_LOW) || \
((__PIN__) == PWR_WAKEUP_PIN2_LOW) || \
((__PIN__) == PWR_WAKEUP_PIN3_LOW) || \
((__PIN__) == PWR_WAKEUP_PIN4_LOW) || \
((__PIN__) == PWR_WAKEUP_PIN5_LOW) || \
((__PIN__) == PWR_WAKEUP_PIN6_LOW) || \
((__PIN__) == PWR_WAKEUP_PIN6_HIGH_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN5_HIGH_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN4_HIGH_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN3_HIGH_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN2_HIGH_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN1_HIGH_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN6_LOW_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN5_LOW_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN4_LOW_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN3_LOW_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN2_LOW_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN1_LOW_PULLUP) || \
((__PIN__) == PWR_WAKEUP_PIN6_HIGH_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN5_HIGH_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN4_HIGH_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN3_HIGH_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN2_HIGH_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN1_HIGH_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN6_LOW_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN5_LOW_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN4_LOW_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN3_LOW_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN2_LOW_PULLDOWN) || \
((__PIN__) == PWR_WAKEUP_PIN1_LOW_PULLDOWN))
#define IS_PWR_AVD_LEVEL(LEVEL) (((LEVEL) == PWR_AVDLEVEL_0) || ((LEVEL) == PWR_AVDLEVEL_1) || \
((LEVEL) == PWR_AVDLEVEL_2) || ((LEVEL) == PWR_AVDLEVEL_3))
#define IS_PWR_AVD_MODE(MODE) (((MODE) == PWR_AVD_MODE_IT_RISING)|| ((MODE) == PWR_AVD_MODE_IT_FALLING) || \
((MODE) == PWR_AVD_MODE_IT_RISING_FALLING) || ((MODE) == PWR_AVD_MODE_NORMAL))
#define IS_PWR_BATTERY_RESISTOR_SELECT(RESISTOR) (((RESISTOR) == PWR_BATTERY_CHARGING_RESISTOR_5) ||\
((RESISTOR) == PWR_BATTERY_CHARGING_RESISTOR_1_5))
#define IS_PWR_CORE(CPU) (((CPU) == PWR_CORE_CPU1) || ((CPU) == PWR_CORE_CPU2))
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif /* __STM32MP1xx_HAL_PWR_EX_H */
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| 10,828 |
3,579 | <gh_stars>1000+
/*
* Copyright 2015, The Querydsl Team (http://www.querydsl.com/team)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.querydsl.core.util;
import static org.junit.Assert.*;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class MathUtilsTest {
@Rule
public final ExpectedException expectedException = ExpectedException.none();
@Test
public void sum() {
assertEquals(Integer.valueOf(5), MathUtils.sum(2, 3.0));
}
@Test
public void difference() {
assertEquals(Integer.valueOf(2), MathUtils.difference(5, 3.0));
}
@Test
public void cast_returns_correct_type() {
checkCast(1, BigDecimal.class);
checkCast(1, BigInteger.class);
checkCast(1, Double.class);
checkCast(1, Float.class);
checkCast(1, Integer.class);
checkCast(1, Long.class);
checkCast(1, Short.class);
checkCast(1, Byte.class);
}
@Test
public void cast_returns_argument_as_is_when_compatible() {
checkSame(BigDecimal.ONE, BigDecimal.class);
checkSame(BigInteger.ONE, BigInteger.class);
checkSame((double) 1, Double.class);
checkSame((float) 1, Float.class);
checkSame(1, Integer.class);
checkSame((long) 1, Long.class);
checkSame((short) 1, Short.class);
checkSame((byte) 1, Byte.class);
}
@Test
public void cast_returns_null_when_input_is_null() {
Integer result = MathUtils.cast(null, Integer.class);
assertNull(result);
}
@Test
public void cast_throws_on_unsupported_numbers() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Unsupported target type");
checkCast(1, AtomicInteger.class);
}
private static void checkCast(Number value, Class<? extends Number> targetClass) {
Number target = MathUtils.cast(value, targetClass);
assertSame(targetClass, target.getClass());
}
private static <N extends Number> void checkSame(N value, Class<N> targetClass) {
N target = MathUtils.cast(value, targetClass);
assertSame(value, target);
}
}
| 1,051 |
619 | #pragma once
#include "mull/Mutant.h"
#include "mull/MutationResult.h"
namespace mull {
class progress_counter;
class DryRunMutantExecutionTask {
public:
using In = const std::vector<std::unique_ptr<Mutant>>;
using Out = std::vector<std::unique_ptr<MutationResult>>;
using iterator = In::const_iterator;
void operator()(iterator begin, iterator end, Out &storage, progress_counter &counter);
};
} // namespace mull
| 138 |
14,668 | #if HAVE_GNUC_ATTRIBUTE
#ifdef __fcfreetype__
# undef FcFreeTypeCharIndex
extern __typeof (FcFreeTypeCharIndex) FcFreeTypeCharIndex __attribute((alias("IA__FcFreeTypeCharIndex"))) FC_ATTRIBUTE_VISIBILITY_EXPORT;
# undef FcFreeTypeCharSetAndSpacing
extern __typeof (FcFreeTypeCharSetAndSpacing) FcFreeTypeCharSetAndSpacing __attribute((alias("IA__FcFreeTypeCharSetAndSpacing"))) FC_ATTRIBUTE_VISIBILITY_EXPORT;
# undef FcFreeTypeCharSet
extern __typeof (FcFreeTypeCharSet) FcFreeTypeCharSet __attribute((alias("IA__FcFreeTypeCharSet"))) FC_ATTRIBUTE_VISIBILITY_EXPORT;
#endif /* __fcfreetype__ */
#ifdef __fcpat__
# undef FcPatternGetFTFace
extern __typeof (FcPatternGetFTFace) FcPatternGetFTFace __attribute((alias("IA__FcPatternGetFTFace"))) FC_ATTRIBUTE_VISIBILITY_EXPORT;
# undef FcPatternAddFTFace
extern __typeof (FcPatternAddFTFace) FcPatternAddFTFace __attribute((alias("IA__FcPatternAddFTFace"))) FC_ATTRIBUTE_VISIBILITY_EXPORT;
#endif /* __fcpat__ */
#ifdef __fcfreetype__
# undef FcFreeTypeQueryFace
extern __typeof (FcFreeTypeQueryFace) FcFreeTypeQueryFace __attribute((alias("IA__FcFreeTypeQueryFace"))) FC_ATTRIBUTE_VISIBILITY_EXPORT;
#endif /* */
#endif /* HAVE_GNUC_ATTRIBUTE */
| 446 |
437 | import sys
notation = {
"---": "0",
"--x": "1",
"-w-": "2",
"-wx": "3",
"r--": "4",
"r-x": "5",
"rw-": "6",
"rwx": "7",
}
def symb_to_num(symbolic):
"""
Convert symbolic permission notation to numeric notation.
"""
if len(symbolic) == 9:
group = (symbolic[:-6], symbolic[3:-3], symbolic[6:])
try:
numeric = notation[group[0]] + notation[group[1]] + notation[group[2]]
except:
numeric = "Invalid Symbolic Representation!"
else:
numeric = "Symbolic input should be of lengh 9!"
return numeric
def num_to_symb(num):
"""
Convert number permission notation to symbolic notation.
"""
num = str(num)
if len(num) == 3:
group = (num[0], num[1], num[2])
symbolic = ""
for key, value in notation.items():
for g in group:
if int(g) > 8 or int(g) < 0:
symbolic = "Invalid Numerical Representation!"
elif g == value:
symbolic = symbolic + key
else:
symbolic = "Number input should be of length 3!"
return symbolic
def main():
representation = sys.argv[1]
mode = sys.argv[2]
if mode == "S":
print(num_to_symb(representation))
elif mode == "N":
print(symb_to_num(representation))
else:
print(
"Invalid Mode Selection. Please select 'S' for numerical --> symbolic conversion or 'N' for symbolic --> numerical conversion!"
)
if __name__ == "__main__":
main()
| 719 |
1,408 | <reponame>Mu-L/springBoot-study
package com.pancm.shell;
import org.springframework.shell.standard.ShellComponent;
import org.springframework.shell.standard.ShellMethod;
import org.springframework.shell.standard.ShellOption;
/**
* @author pancm
* @Title: springboot-shell
* @Description: shell命令实现
* @Version:1.0.0
* @Since:jdk1.8
* @date 2021/3/25
*/
@ShellComponent
public class MyCommands {
/**
* 运行程序,在控制台输入 add 1 2,即可看到打印3
**/
@ShellMethod("Add two integers together.")
public int add(int a, int b) {
return a + b;
}
@ShellMethod(value = "Add numbers.", key = "sum")
public int add2(int a, int b) {
return a + b;
}
/**
* 自定义命名参数键
* 如上所示,为命名参数派生键的默认策略是使用方法签名的java名称,并在其前面加上两个破折号(--)。可以通过两种方式自定义:
* 要更改整个方法的默认前缀,请使用注释的prefix()属性 @ShellMethod
* 要以每个参数的方式覆盖整个键,请使用注释对参数进行@ShellOption注释。
**/
@ShellMethod(value = "Display stuff.", prefix="-")
public String echo(int a, int b, @ShellOption("--third") int c) {
return String.format("You said a=%d, b=%d, c=%d", a, b, c);
}
}
| 676 |
575 | <filename>components/dom_distiller/content/browser/distiller_javascript_service_impl.h
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_DOM_DISTILLER_CONTENT_BROWSER_DISTILLER_JAVASCRIPT_SERVICE_IMPL_H_
#define COMPONENTS_DOM_DISTILLER_CONTENT_BROWSER_DISTILLER_JAVASCRIPT_SERVICE_IMPL_H_
#include "components/dom_distiller/content/common/mojom/distiller_javascript_service.mojom.h"
#include "components/dom_distiller/core/distilled_page_prefs.h"
#include "components/dom_distiller/core/distiller_ui_handle.h"
#include "components/dom_distiller/core/mojom/distilled_page_prefs.mojom.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
namespace dom_distiller {
class DistilledPagePrefs;
// This is the receiving end of "distiller" JavaScript object calls.
class DistillerJavaScriptServiceImpl
: public mojom::DistillerJavaScriptService {
public:
DistillerJavaScriptServiceImpl(DistillerUIHandle* distiller_ui_handle,
DistilledPagePrefs* distilled_page_prefs);
~DistillerJavaScriptServiceImpl() override;
// Mojo mojom::DistillerJavaScriptService implementation.
// Show the Android view containing Reader Mode settings.
void HandleDistillerOpenSettingsCall() override;
void HandleStoreThemePref(mojom::Theme theme) override;
void HandleStoreFontFamilyPref(mojom::FontFamily font_family) override;
void HandleStoreFontScalingPref(float font_scale) override;
DistillerJavaScriptServiceImpl(const DistillerJavaScriptServiceImpl&) =
delete;
DistillerJavaScriptServiceImpl& operator=(
const DistillerJavaScriptServiceImpl&) = delete;
private:
DistillerUIHandle* distiller_ui_handle_;
DistilledPagePrefs* distilled_page_prefs_;
};
// static
void CreateDistillerJavaScriptService(
DistillerUIHandle* distiller_ui_handle,
DistilledPagePrefs* distilled_page_prefs,
mojo::PendingReceiver<mojom::DistillerJavaScriptService> receiver);
} // namespace dom_distiller
#endif // COMPONENTS_DOM_DISTILLER_CONTENT_BROWSER_DISTILLER_JAVASCRIPT_SERVICE_IMPL_H_
| 736 |
842 | <filename>pgoapi/protos/pogoprotos/settings/master/quest/daily_quest_settings_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/master/quest/daily_quest_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/master/quest/daily_quest_settings.proto',
package='pogoprotos.settings.master.quest',
syntax='proto3',
serialized_pb=_b('\n;pogoprotos/settings/master/quest/daily_quest_settings.proto\x12 pogoprotos.settings.master.quest\"\x7f\n\x12\x44\x61ilyQuestSettings\x12\x17\n\x0f\x62uckets_per_day\x18\x01 \x01(\x05\x12\x15\n\rstreak_length\x18\x02 \x01(\x05\x12\x18\n\x10\x62onus_multiplier\x18\x03 \x01(\x02\x12\x1f\n\x17streak_bonus_multiplier\x18\x04 \x01(\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DAILYQUESTSETTINGS = _descriptor.Descriptor(
name='DailyQuestSettings',
full_name='pogoprotos.settings.master.quest.DailyQuestSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='buckets_per_day', full_name='pogoprotos.settings.master.quest.DailyQuestSettings.buckets_per_day', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='streak_length', full_name='pogoprotos.settings.master.quest.DailyQuestSettings.streak_length', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bonus_multiplier', full_name='pogoprotos.settings.master.quest.DailyQuestSettings.bonus_multiplier', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='streak_bonus_multiplier', full_name='pogoprotos.settings.master.quest.DailyQuestSettings.streak_bonus_multiplier', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=97,
serialized_end=224,
)
DESCRIPTOR.message_types_by_name['DailyQuestSettings'] = _DAILYQUESTSETTINGS
DailyQuestSettings = _reflection.GeneratedProtocolMessageType('DailyQuestSettings', (_message.Message,), dict(
DESCRIPTOR = _DAILYQUESTSETTINGS,
__module__ = 'pogoprotos.settings.master.quest.daily_quest_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.master.quest.DailyQuestSettings)
))
_sym_db.RegisterMessage(DailyQuestSettings)
# @@protoc_insertion_point(module_scope)
| 1,455 |
4,304 | #include <gtest/gtest.h>
#include "util.h"
// Literal limit checks
#define TEST_COMPILE(src) DO(test_compile(src, "expr"))
#define TEST_ERROR(src) DO(test_error(src, "expr"))
class LiteralLimitTest : public PassTest
{
};
TEST_F(LiteralLimitTest, U8Max)
{
TEST_COMPILE("class Foo let m:U8 = 255");
TEST_ERROR("class Foo let m:U8 = 256");
}
TEST_F(LiteralLimitTest, I8Max)
{
TEST_COMPILE("class Foo let m:I8 = 127");
TEST_ERROR("class Foo let m:I8 = 128");
}
TEST_F(LiteralLimitTest, I8Min)
{
TEST_COMPILE("class Foo let m:I8 = -128");
TEST_ERROR("class Foo let m:I8 = -129");
}
TEST_F(LiteralLimitTest, U16Max)
{
TEST_COMPILE("class Foo let m:U16 = 65535");
TEST_ERROR("class Foo let m:U16 = 65536");
}
TEST_F(LiteralLimitTest, I16Max)
{
TEST_COMPILE("class Foo let m:I16 = 32767");
TEST_ERROR("class Foo let m:I16 = 32768");
}
TEST_F(LiteralLimitTest, I16Min)
{
TEST_COMPILE("class Foo let m:I16 = -32768");
TEST_ERROR("class Foo let m:I16 = -32769");
}
TEST_F(LiteralLimitTest, U32Max)
{
TEST_COMPILE("class Foo let m:U32 = 0xFFFFFFFF");
TEST_ERROR("class Foo let m:U32 = 0x100000000");
}
TEST_F(LiteralLimitTest, I32Max)
{
TEST_COMPILE("class Foo let m:I32 = 0x7FFFFFFF");
TEST_ERROR("class Foo let m:I32 = 0x80000000");
}
TEST_F(LiteralLimitTest, I32Min)
{
TEST_COMPILE("class Foo let m:I32 = -0x80000000");
TEST_ERROR("class Foo let m:I32 = -0x80000001");
}
TEST_F(LiteralLimitTest, U64Max)
{
TEST_COMPILE("class Foo let m:U64 = 0xFFFFFFFFFFFFFFFF");
TEST_ERROR("class Foo let m:U64 = 0x10000000000000000");
}
TEST_F(LiteralLimitTest, I64Max)
{
TEST_COMPILE("class Foo let m:I64 = 0x7FFFFFFFFFFFFFFF");
TEST_ERROR("class Foo let m:I64 = 0x8000000000000000");
}
TEST_F(LiteralLimitTest, I64Min)
{
TEST_COMPILE("class Foo let m:I64 = -0x8000000000000000");
TEST_ERROR("class Foo let m:I64 = -0x8000000000000001");
}
TEST_F(LiteralLimitTest, U128Max)
{
// This is checked by the lexer, including here for completeness
TEST_COMPILE("class Foo let m:U128 = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
TEST_ERROR("class Foo let m:U128 = 0x100000000000000000000000000000000");
}
TEST_F(LiteralLimitTest, I128Max)
{
TEST_COMPILE("class Foo let m:I128 = 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
TEST_ERROR("class Foo let m:I128 = 0x80000000000000000000000000000000");
}
TEST_F(LiteralLimitTest, I128Min)
{
TEST_COMPILE("class Foo let m:I128 = -0x80000000000000000000000000000000");
TEST_ERROR("class Foo let m:I128 = -0x80000000000000000000000000000001");
}
| 1,035 |
1,887 | <reponame>gmshepard/mini-spring
package org.springframework.aop.framework;
import org.springframework.aop.AdvisedSupport;
/**
* @author derekyi
* @date 2020/12/6
*/
public class ProxyFactory {
private AdvisedSupport advisedSupport;
public ProxyFactory(AdvisedSupport advisedSupport) {
this.advisedSupport = advisedSupport;
}
public Object getProxy() {
return createAopProxy().getProxy();
}
private AopProxy createAopProxy() {
if (advisedSupport.isProxyTargetClass()) {
return new CglibAopProxy(advisedSupport);
}
return new JdkDynamicAopProxy(advisedSupport);
}
}
| 197 |
1,338 | <reponame>Kirishikesan/haiku
#include "StringCaseTest.h"
#include "cppunit/TestCaller.h"
#include <String.h>
StringCaseTest::StringCaseTest(std::string name)
: BTestCase(name)
{
}
StringCaseTest::~StringCaseTest()
{
}
void
StringCaseTest::PerformTest(void)
{
BString *string;
// Capitalize
NextSubTest();
string = new BString("this is a sentence");
string->Capitalize();
CPPUNIT_ASSERT(strcmp(string->String(), "This is a sentence") == 0);
delete string;
NextSubTest();
string = new BString("134this is a sentence");
string->Capitalize();
CPPUNIT_ASSERT(strcmp(string->String(), "134this is a sentence") == 0);
delete string;
NextSubTest();
string = new BString;
string->Capitalize();
CPPUNIT_ASSERT(strcmp(string->String(), "") == 0);
delete string;
// ToLower
NextSubTest();
string = new BString("1a2B3c4d5e6f7G");
string->ToLower();
CPPUNIT_ASSERT(strcmp(string->String(), "1a2b3c4d5e6f7g") == 0);
delete string;
NextSubTest();
string = new BString;
string->ToLower();
CPPUNIT_ASSERT(strcmp(string->String(), "") == 0);
delete string;
// ToUpper
NextSubTest();
string = new BString("1a2b3c4d5E6f7g");
string->ToUpper();
CPPUNIT_ASSERT(strcmp(string->String(), "1A2B3C4D5E6F7G") == 0);
delete string;
NextSubTest();
string = new BString;
string->ToUpper();
CPPUNIT_ASSERT(strcmp(string->String(), "") == 0);
delete string;
// CapitalizeEachWord
NextSubTest();
string = new BString("each wOrd 3will_be >capiTalized");
string->CapitalizeEachWord();
CPPUNIT_ASSERT(strcmp(string->String(), "Each Word 3Will_Be >Capitalized") == 0);
delete string;
NextSubTest();
string = new BString;
string->CapitalizeEachWord();
CPPUNIT_ASSERT(strcmp(string->String(), "") == 0);
delete string;
}
CppUnit::Test *StringCaseTest::suite(void)
{
typedef CppUnit::TestCaller<StringCaseTest>
StringCaseTestCaller;
return(new StringCaseTestCaller("BString::Case Test",
&StringCaseTest::PerformTest));
}
| 774 |
466 | <reponame>hsyuan/gporca
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2014 Pivotal, Inc.
//
// @filename:
// CXformInnerJoin2IndexGetApply.h
//
// @doc:
// Transform Inner Join to IndexGet Apply
//---------------------------------------------------------------------------
#ifndef GPOPT_CXformInnerJoin2IndexGetApply_H
#define GPOPT_CXformInnerJoin2IndexGetApply_H
#include "gpos/base.h"
#include "gpopt/xforms/CXformJoin2IndexApplyBase.h"
namespace gpopt
{
using namespace gpos;
//---------------------------------------------------------------------------
// @class:
// CXformInnerJoin2IndexGetApply
//
// @doc:
// Transform Inner Join to IndexGet Apply
//
//---------------------------------------------------------------------------
class CXformInnerJoin2IndexGetApply
: public CXformJoin2IndexApplyBase<
CLogicalInnerJoin, CLogicalIndexApply, CLogicalGet,
false /*fWithSelect*/, false /*is_partial*/, IMDIndex::EmdindBtree>
{
private:
// private copy ctor
CXformInnerJoin2IndexGetApply(const CXformInnerJoin2IndexGetApply &);
public:
// ctor
explicit CXformInnerJoin2IndexGetApply(CMemoryPool *mp)
: CXformJoin2IndexApplyBase<CLogicalInnerJoin, CLogicalIndexApply,
CLogicalGet, false /*fWithSelect*/,
false /*is_partial*/,
IMDIndex::EmdindBtree>(mp)
{
}
// dtor
virtual ~CXformInnerJoin2IndexGetApply()
{
}
// ident accessors
virtual EXformId
Exfid() const
{
return ExfInnerJoin2IndexGetApply;
}
virtual const CHAR *
SzId() const
{
return "CXformInnerJoin2IndexGetApply";
}
}; // class CXformInnerJoin2IndexGetApply
} // namespace gpopt
#endif // !GPOPT_CXformInnerJoin2IndexGetApply_H
// EOF
| 605 |
852 | <gh_stars>100-1000
#ifndef RecoEgamma_EgammaIsolationAlgos_EgammaL1TkIsolation_h
#define RecoEgamma_EgammaIsolationAlgos_EgammaL1TkIsolation_h
#include "DataFormats/L1TrackTrigger/interface/L1Track.h"
#include "DataFormats/TrackReco/interface/TrackBase.h"
#include "DataFormats/TrackReco/interface/TrackFwd.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
#include "FWCore/ParameterSet/interface/ParameterSetDescription.h"
//author <NAME> (RAL/CERN)
//based on the work of <NAME> and <NAME>
class EgammaL1TkIsolation {
public:
explicit EgammaL1TkIsolation(const edm::ParameterSet& para);
static void fillPSetDescription(edm::ParameterSetDescription& desc);
static edm::ParameterSetDescription makePSetDescription() {
edm::ParameterSetDescription desc;
fillPSetDescription(desc);
return desc;
}
std::pair<int, double> calIsol(const reco::TrackBase& trk, const L1TrackCollection& l1Tks) const;
std::pair<int, double> calIsol(const double objEta,
const double objPhi,
const double objZ,
const L1TrackCollection& l1Tks) const;
//little helper function for the two calIsol functions for it to directly return the pt
template <typename... Args>
double calIsolPt(Args&&... args) const {
return calIsol(std::forward<Args>(args)...).second;
}
private:
struct TrkCuts {
float minPt;
float minDR2;
float maxDR2;
float minDEta;
float maxDZ;
explicit TrkCuts(const edm::ParameterSet& para);
static edm::ParameterSetDescription makePSetDescription();
};
size_t etaBinNr(double eta) const;
static bool passTrkSel(const L1Track& trk,
const double trkPt,
const TrkCuts& cuts,
const double objEta,
const double objPhi,
const double objZ);
bool useAbsEta_;
std::vector<double> etaBoundaries_;
std::vector<TrkCuts> trkCuts_;
};
#endif
| 879 |
10,608 | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KorQuAD v1.0:The Korean Question Answering Dataset"""
import json
import datasets
from datasets.tasks import QuestionAnsweringExtractive
_CITATION = """\
@article{lim2019korquad1,
title={Korquad1. 0: Korean qa dataset for machine reading comprehension},
author={<NAME> and <NAME> and <NAME>},
journal={arXiv preprint arXiv:1909.07005},
year={2019}
}
"""
_DESCRIPTION = """\
KorQuAD 1.0 is a large-scale Korean dataset for machine reading comprehension task consisting of human generated questions for Wikipedia articles. We benchmark the data collecting process of SQuADv1.0 and crowdsourced 70,000+ question-answer pairs. 1,637 articles and 70,079 pairs of question answers were collected. 1,420 articles are used for the training set, 140 for the dev set, and 77 for the test set. 60,407 question-answer pairs are for the training set, 5,774 for the dev set, and 3,898 for the test set.
"""
_HOMEPAGE = "https://korquad.github.io/KorQuad%201.0/"
_LICENSE = "CC BY-ND 2.0 KR"
_URL = "https://korquad.github.io/dataset/"
_URLS = {
"train": _URL + "KorQuAD_v1.0_train.json",
"dev": _URL + "KorQuAD_v1.0_dev.json",
}
class SquadKorV1(datasets.GeneratorBasedBuilder):
"""KorQuAD 1.0 dataset"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="squad_kor_v1",
version=VERSION,
description=_DESCRIPTION,
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
task_templates=[
QuestionAnsweringExtractive(
question_column="question", context_column="context", answers_column="answers"
)
],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# download and extract URLs
urls_to_download = _URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
squad = json.load(f)
for example in squad["data"]:
title = example.get("title", "").strip()
for paragraph in example["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
| 2,202 |
921 | <filename>tests/scanner/audit/resource_rules_engine_test.py
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the ResourceRulesEngine."""
import copy
import itertools
import json
import unittest.mock as mock
import tempfile
import unittest
import yaml
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
from google.cloud.forseti.scanner.audit import resource_rules_engine
from google.cloud.forseti.scanner.audit import rules as scanner_rules
from tests.scanner.test_data import fake_resource_scanner_data as data
def get_rules_engine_with_rule(rule):
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(rule.encode())
f.flush()
rules_engine = resource_rules_engine.ResourceRulesEngine(
rules_file_path=f.name)
rules_engine.build_rule_book()
return rules_engine
class ResourceRulesEngineTest(ForsetiTestCase):
"""Tests for the ResourceRulesEngine."""
def setUp(self):
resource_rules_engine.LOGGER = mock.MagicMock()
def test_build_rule_book_from_local_yaml_file(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [project]
resource_trees: []
"""
rules_engine = get_rules_engine_with_rule(rule)
self.assertEqual(1, len(rules_engine.rule_book.rules))
def test_build_rule_book_no_resource_types(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: []
resource_trees: []
"""
with self.assertRaises(InvalidRulesSchemaError):
get_rules_engine_with_rule(rule)
def test_build_rule_book_no_mode(self):
rule = """
rules:
- name: Resource test rule
resource_types: [project]
resource_trees: []
"""
with self.assertRaises(InvalidRulesSchemaError):
get_rules_engine_with_rule(rule)
def test_get_applicable_resource_types(self):
rule = """
rules:
- name: rule 1
mode: required
resource_types: [project]
resource_trees: []
- name: rule 2
mode: required
resource_types: [organization, project]
resource_trees: []
"""
rules_engine = get_rules_engine_with_rule(rule)
got_types = rules_engine.rule_book.get_applicable_resource_types()
self.assertEqual(got_types, set(['organization', 'project']))
def test_find_violations_single_node_match(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [project]
resource_trees:
- type: project
resource_id: p1
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations([data.PROJECT1]))
self.assertEqual(got_violations, [])
def test_find_violations_single_node_no_match(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [project]
resource_trees:
- type: project
resource_id: p1
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations(
[data.PROJECT1, data.PROJECT2]))
self.assertEqual(got_violations, data.build_violations(data.PROJECT2))
def test_find_violations_multiple_roots(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [project]
resource_trees:
- type: project
resource_id: p1
- type: project
resource_id: p2
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations(
[data.PROJECT1, data.PROJECT2]))
self.assertEqual(got_violations, [])
def test_find_violations_child_found(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [organization, project]
resource_trees:
- type: organization
resource_id: '234'
children:
- type: project
resource_id: p1
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations(
[data.ORGANIZATION, data.PROJECT1]))
self.assertEqual(got_violations, [])
def test_find_violations_missing(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [project]
resource_trees:
- type: project
resource_id: p1
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations([]))
violation = data.build_violations(data.PROJECT2)[0]
violation = resource_rules_engine.RuleViolation(
resource_id='p1',
resource_name='p1',
resource_type='project',
full_name='p1',
rule_index=0,
rule_name='Resource test rule',
violation_type='RESOURCE_VIOLATION',
violation_data='',
resource_data='',
)
self.assertEqual(got_violations, [violation])
def test_find_violations_child_missing(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [organization, project]
resource_trees:
- type: organization
resource_id: '234'
children:
- type: project
resource_id: p1
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations(
[data.ORGANIZATION]))
violation = resource_rules_engine.RuleViolation(
resource_id='p1',
resource_name='p1',
resource_type='project',
full_name='p1',
rule_index=0,
rule_name='Resource test rule',
violation_type='RESOURCE_VIOLATION',
violation_data='',
resource_data='',
)
self.assertEqual(got_violations, [violation])
def test_find_violations_wrong_parent(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [project, bucket]
resource_trees:
- type: project
resource_id: p1
- type: project
resource_id: p2
children:
- type: bucket
resource_id: p1-bucket1
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations(
[data.PROJECT1, data.PROJECT2, data.BUCKET]))
node_violation = resource_rules_engine.RuleViolation(
resource_id='p1-bucket1',
resource_name='p1-bucket1',
resource_type='bucket',
full_name='p1-bucket1',
rule_index=0,
rule_name='Resource test rule',
violation_type='RESOURCE_VIOLATION',
violation_data='',
resource_data='',
)
self.assertEqual(got_violations,
data.build_violations(data.BUCKET) + [node_violation])
def test_find_violations_wildcard(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [project]
resource_trees:
- type: project
resource_id: '*'
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations([data.PROJECT1]))
self.assertEqual(got_violations, [])
def test_find_violations_wildcard_and_sibling(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [organization, project]
resource_trees:
- type: organization
resource_id: '*'
- type: organization
resource_id: '234'
children:
- type: project
resource_id: p1
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations(
[data.ORGANIZATION, data.PROJECT1]))
self.assertEqual(got_violations, [])
def test_find_violations_empty_tree(self):
rule = """
rules:
- name: Resource test rule
mode: required
resource_types: [organization]
resource_trees: []
"""
rules_engine = get_rules_engine_with_rule(rule)
got_violations = list(rules_engine.find_violations([data.ORGANIZATION]))
self.assertEqual(got_violations,
data.build_violations(data.ORGANIZATION))
if __name__ == '__main__':
unittest.main()
| 3,611 |
852 | <reponame>malbouis/cmssw
/*----------------------------------------------------------------------
----------------------------------------------------------------------*/
#include "DuplicateChecker.h"
#include "PoolSource.h"
#include "InputFile.h"
#include "RootFile.h"
#include "RootSecondaryFileSequence.h"
#include "RootTree.h"
#include "DataFormats/Provenance/interface/BranchID.h"
#include "DataFormats/Provenance/interface/ProductRegistry.h"
#include "FWCore/Catalog/interface/InputFileCatalog.h"
#include "FWCore/Catalog/interface/SiteLocalConfig.h"
#include "FWCore/MessageLogger/interface/MessageLogger.h"
#include "FWCore/ParameterSet/interface/ParameterSet.h"
#include "FWCore/ParameterSet/interface/ParameterSetDescription.h"
#include "FWCore/ServiceRegistry/interface/Service.h"
#include "Utilities/StorageFactory/interface/StorageFactory.h"
namespace edm {
RootSecondaryFileSequence::RootSecondaryFileSequence(ParameterSet const& pset,
PoolSource& input,
InputFileCatalog const& catalog)
: RootInputFileSequence(pset, catalog),
input_(input),
orderedProcessHistoryIDs_(),
enablePrefetching_(false),
enforceGUIDInFileName_(pset.getUntrackedParameter<bool>("enforceGUIDInFileName")) {
// The SiteLocalConfig controls the TTreeCache size and the prefetching settings.
Service<SiteLocalConfig> pSLC;
if (pSLC.isAvailable()) {
enablePrefetching_ = pSLC->enablePrefetching();
}
// Prestage the files
//NOTE: we do not want to stage in all secondary files since we can be given a list of
// thousands of files and prestaging all those files can cause a site to fail.
// So, we stage in the first secondary file only.
setAtFirstFile();
storage::StorageFactory::get()->stagein(fileNames()[0]);
// Open the first file.
for (setAtFirstFile(); !noMoreFiles(); setAtNextFile()) {
initFile(input_.skipBadFiles());
if (rootFile())
break;
}
if (rootFile()) {
input_.productRegistryUpdate().updateFromInput(rootFile()->productRegistry()->productList());
}
}
RootSecondaryFileSequence::~RootSecondaryFileSequence() {}
void RootSecondaryFileSequence::endJob() { closeFile_(); }
void RootSecondaryFileSequence::closeFile_() {
// close the currently open file, if any, and delete the RootFile object.
if (rootFile()) {
rootFile()->close();
rootFile().reset();
}
}
void RootSecondaryFileSequence::initFile_(bool skipBadFiles) {
initTheFile(skipBadFiles, false, nullptr, "secondaryFiles", InputType::SecondaryFile);
}
RootSecondaryFileSequence::RootFileSharedPtr RootSecondaryFileSequence::makeRootFile(
std::shared_ptr<InputFile> filePtr) {
size_t currentIndexIntoFile = sequenceNumberOfFile();
return std::make_shared<RootFile>(fileNames()[0],
input_.processConfiguration(),
logicalFileName(),
filePtr,
input_.nStreams(),
input_.treeMaxVirtualSize(),
input_.processingMode(),
input_.runHelper(),
input_.productSelectorRules(),
InputType::SecondaryFile,
input_.branchIDListHelper(),
input_.thinnedAssociationsHelper(),
&associationsFromSecondary_,
input_.dropDescendants(),
input_.processHistoryRegistryForUpdate(),
indexesIntoFiles(),
currentIndexIntoFile,
orderedProcessHistoryIDs_,
input_.bypassVersionCheck(),
input_.labelRawDataLikeMC(),
enablePrefetching_,
enforceGUIDInFileName_);
}
void RootSecondaryFileSequence::initAssociationsFromSecondary(std::set<BranchID> const& associationsFromSecondary) {
for (auto const& branchID : associationsFromSecondary) {
associationsFromSecondary_.push_back(branchID);
}
rootFile()->initAssociationsFromSecondary(associationsFromSecondary_);
}
} // namespace edm
| 2,083 |
32,544 | <gh_stars>1000+
package com.baeldung.persistence.service.common;
import java.io.Serializable;
import java.util.List;
import com.baeldung.persistence.dao.common.IOperations;
import org.springframework.transaction.annotation.Transactional;
@Transactional(value = "jpaTransactionManager")
public abstract class AbstractJpaService<T extends Serializable> extends AbstractService<T> implements IOperations<T> {
@Override
public T findOne(final long id) {
return super.findOne(id);
}
@Override
public List<T> findAll() {
return super.findAll();
}
@Override
public void create(final T entity) {
super.create(entity);
}
@Override
public T update(final T entity) {
return super.update(entity);
}
@Override
public void delete(final T entity) {
super.delete(entity);
}
@Override
public void deleteById(final long entityId) {
super.deleteById(entityId);
}
}
| 363 |
190,993 | <filename>tensorflow/python/util/lock_util_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lock_util."""
import random
import time
from absl.testing import parameterized
from tensorflow.python.platform import test
from tensorflow.python.util import lock_util
class GroupLockTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(1, 2, 3, 5, 10)
def testGroups(self, num_groups):
lock = lock_util.GroupLock(num_groups)
num_threads = 10
finished = set()
def thread_fn(thread_id):
time.sleep(random.random() * 0.1)
group_id = thread_id % num_groups
with lock.group(group_id):
time.sleep(random.random() * 0.1)
self.assertGreater(lock._group_member_counts[group_id], 0)
for g, c in enumerate(lock._group_member_counts):
if g != group_id:
self.assertEqual(0, c)
finished.add(thread_id)
threads = [
self.checkedThread(target=thread_fn, args=(i,))
for i in range(num_threads)
]
for i in range(num_threads):
threads[i].start()
for i in range(num_threads):
threads[i].join()
self.assertEqual(set(range(num_threads)), finished)
if __name__ == "__main__":
test.main()
| 657 |
2,254 | <filename>tests/modules/pkg1/sub/__main__.py
# Used in the tests for PyRunner
import sys
print("pkg1.sub.__main__: passed %s" % sys.argv[1])
| 54 |
3,372 | <gh_stars>1000+
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.fms.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* An ordered list of actions you can take to remediate a violation.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/RemediationActionWithOrder" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class RemediationActionWithOrder implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* Information about an action you can take to remediate a violation.
* </p>
*/
private RemediationAction remediationAction;
/**
* <p>
* The order of the remediation actions in the list.
* </p>
*/
private Integer order;
/**
* <p>
* Information about an action you can take to remediate a violation.
* </p>
*
* @param remediationAction
* Information about an action you can take to remediate a violation.
*/
public void setRemediationAction(RemediationAction remediationAction) {
this.remediationAction = remediationAction;
}
/**
* <p>
* Information about an action you can take to remediate a violation.
* </p>
*
* @return Information about an action you can take to remediate a violation.
*/
public RemediationAction getRemediationAction() {
return this.remediationAction;
}
/**
* <p>
* Information about an action you can take to remediate a violation.
* </p>
*
* @param remediationAction
* Information about an action you can take to remediate a violation.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RemediationActionWithOrder withRemediationAction(RemediationAction remediationAction) {
setRemediationAction(remediationAction);
return this;
}
/**
* <p>
* The order of the remediation actions in the list.
* </p>
*
* @param order
* The order of the remediation actions in the list.
*/
public void setOrder(Integer order) {
this.order = order;
}
/**
* <p>
* The order of the remediation actions in the list.
* </p>
*
* @return The order of the remediation actions in the list.
*/
public Integer getOrder() {
return this.order;
}
/**
* <p>
* The order of the remediation actions in the list.
* </p>
*
* @param order
* The order of the remediation actions in the list.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public RemediationActionWithOrder withOrder(Integer order) {
setOrder(order);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getRemediationAction() != null)
sb.append("RemediationAction: ").append(getRemediationAction()).append(",");
if (getOrder() != null)
sb.append("Order: ").append(getOrder());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof RemediationActionWithOrder == false)
return false;
RemediationActionWithOrder other = (RemediationActionWithOrder) obj;
if (other.getRemediationAction() == null ^ this.getRemediationAction() == null)
return false;
if (other.getRemediationAction() != null && other.getRemediationAction().equals(this.getRemediationAction()) == false)
return false;
if (other.getOrder() == null ^ this.getOrder() == null)
return false;
if (other.getOrder() != null && other.getOrder().equals(this.getOrder()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getRemediationAction() == null) ? 0 : getRemediationAction().hashCode());
hashCode = prime * hashCode + ((getOrder() == null) ? 0 : getOrder().hashCode());
return hashCode;
}
@Override
public RemediationActionWithOrder clone() {
try {
return (RemediationActionWithOrder) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.fms.model.transform.RemediationActionWithOrderMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| 2,261 |
337 | /*
* Copyright 2018 The Polycube Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../interface/StatsInterface.h"
#include "polycube/services/utils.h"
#include <thread>
#include <spdlog/spdlog.h>
class Ddosmitigator;
using namespace io::swagger::server::model;
class Stats : public StatsInterface {
public:
Stats(Ddosmitigator &parent, const StatsJsonObject &conf);
virtual ~Stats();
std::shared_ptr<spdlog::logger> logger();
void update(const StatsJsonObject &conf) override;
StatsJsonObject toJsonObject() override;
/// <summary>
/// Dropped Packets/s
/// </summary>
uint64_t getPps() override;
/// <summary>
/// Total Dropped Packets
/// </summary>
uint64_t getPkts() override;
private:
Ddosmitigator &parent_;
};
| 388 |
2,728 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure_devtools.scenario_tests.base import ReplayableTest
import pytest
try:
from unittest import mock
except ImportError: # python < 3.3
import mock # type: ignore
VCR = ReplayableTest.__module__ + ".vcr.VCR"
def test_default_match_configuration():
"""ReplayableTest should default to VCR's default matching configuration"""
with mock.patch(VCR) as mock_vcr:
ReplayableTest("__init__")
assert not any("match_on" in call.kwargs for call in mock_vcr.call_args_list)
@pytest.mark.parametrize("opt_in", (True, False, None))
def test_match_body(opt_in):
"""match_body should control opting in to vcr.py's in-box body matching, and default to False"""
mock_vcr = mock.Mock(match_on=())
with mock.patch(VCR, lambda *_, **__: mock_vcr):
ReplayableTest("__init__", match_body=opt_in)
assert ("body" in mock_vcr.match_on) == (opt_in == True)
def test_custom_request_matchers():
"""custom request matchers should be registered with vcr.py and added to the default matchers"""
matcher = mock.Mock(__name__="mock matcher")
mock_vcr = mock.Mock(match_on=())
with mock.patch(VCR, lambda *_, **__: mock_vcr):
ReplayableTest("__init__", custom_request_matchers=[matcher])
assert mock.call(matcher.__name__, matcher) in mock_vcr.register_matcher.call_args_list
assert matcher.__name__ in mock_vcr.match_on
| 541 |
777 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SERVICES_VIDEO_CAPTURE_VIDEO_CAPTURE_SERVICE_H_
#define SERVICES_VIDEO_CAPTURE_VIDEO_CAPTURE_SERVICE_H_
#include <memory>
#include "mojo/public/cpp/bindings/binding_set.h"
#include "services/service_manager/public/cpp/interface_factory.h"
#include "services/service_manager/public/cpp/service.h"
#include "services/video_capture/public/interfaces/service.mojom.h"
namespace video_capture {
class DeviceFactoryMediaToMojoAdapter;
// Implementation of video_capture::mojom::Service as a Service Manager service.
class ServiceImpl : public service_manager::Service,
public service_manager::InterfaceFactory<mojom::Service>,
public mojom::Service {
public:
ServiceImpl();
~ServiceImpl() override;
// service_manager::Service:
bool OnConnect(const service_manager::ServiceInfo& remote_info,
service_manager::InterfaceRegistry* registry) override;
// service_manager::InterfaceFactory<video_capture::mojom::Service>:
void Create(const service_manager::Identity& remote_identity,
mojom::ServiceRequest request) override;
// video_capture::mojom::Service
void ConnectToDeviceFactory(mojom::DeviceFactoryRequest request) override;
void ConnectToFakeDeviceFactory(mojom::DeviceFactoryRequest request) override;
private:
void LazyInitializeDeviceFactory();
void LazyInitializeFakeDeviceFactory();
mojo::BindingSet<mojom::Service> service_bindings_;
mojo::BindingSet<mojom::DeviceFactory> factory_bindings_;
mojo::BindingSet<mojom::DeviceFactory> fake_factory_bindings_;
std::unique_ptr<DeviceFactoryMediaToMojoAdapter> device_factory_;
std::unique_ptr<DeviceFactoryMediaToMojoAdapter> fake_device_factory_;
};
} // namespace video_capture
#endif // SERVICES_VIDEO_CAPTURE_VIDEO_CAPTURE_SERVICE_H_
| 655 |
2,151 | <reponame>google-ar/chromium
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.net.impl;
import android.support.annotation.IntDef;
import android.support.annotation.Nullable;
import org.chromium.net.CronetException;
import org.chromium.net.RequestFinishedInfo;
import org.chromium.net.UrlResponseInfo;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.util.Collection;
import java.util.Collections;
/**
* Implements information about a finished request. Passed to {@link RequestFinishedInfo.Listener}.
*/
public class RequestFinishedInfoImpl extends RequestFinishedInfo {
private final String mUrl;
private final Collection<Object> mAnnotations;
private final RequestFinishedInfo.Metrics mMetrics;
@FinishedReason
private final int mFinishedReason;
@Nullable
private final UrlResponseInfo mResponseInfo;
@Nullable
private final CronetException mException;
@IntDef({SUCCEEDED, FAILED, CANCELED})
@Retention(RetentionPolicy.SOURCE)
public @interface FinishedReason {}
public RequestFinishedInfoImpl(String url, Collection<Object> annotations,
RequestFinishedInfo.Metrics metrics, @FinishedReason int finishedReason,
@Nullable UrlResponseInfo responseInfo, @Nullable CronetException exception) {
mUrl = url;
mAnnotations = annotations;
mMetrics = metrics;
mFinishedReason = finishedReason;
mResponseInfo = responseInfo;
mException = exception;
}
@Override
public String getUrl() {
return mUrl;
}
@Override
public Collection<Object> getAnnotations() {
if (mAnnotations == null) {
return Collections.emptyList();
}
return mAnnotations;
}
@Override
public Metrics getMetrics() {
return mMetrics;
}
@Override
@FinishedReason
public int getFinishedReason() {
return mFinishedReason;
}
@Override
@Nullable
public UrlResponseInfo getResponseInfo() {
return mResponseInfo;
}
@Override
@Nullable
public CronetException getException() {
return mException;
}
}
| 819 |
1,253 | #include <bits/stdc++.h>
using namespace std;
/*
Khun algorithm computes maximum bipartite matching in O(|V|^3)
Sample input:
5 5
0 3
1 3
1 4
2 3
2 4
*/
bool tryKhun(int n, const vector<vector<int>>& adj, vector<bool>& used, vector<int>& M, int v)
{
if (used[v]) return false;
used[v] = true;
for (int u : adj[v]) {
if (M[u] == -1) {
M[u] = v;
return true;
}
}
for (int u : adj[v]) {
if (tryKhun(n, adj, used, M, M[u])) {
M[u] = v;
return true;
}
}
return false;
}
vector<pair<int,int>> maximumBipartiteMatching(vector<vector<int>>& adj)
{ // Return: Vector of edges in maximum matching
int n = adj.size();
vector<bool> used(n, false);
vector<int> M(n, -1); // -1 if not in maximum matching, else value is the other vertex of the edge
M.assign(n, -1);
for (int v = 0; v < n; ++v) {
used.assign(n, false);
tryKhun(n, adj, used, M, v);
}
vector<pair<int,int>> MM;
for (int v = 0; v < n; ++v) {
if (M[v] != -1) {
pair<int,int> edge = make_pair(min(v, M[v]), max(v, M[v]));
if (not binary_search(MM.begin(), MM.end(), edge)) { // Avoid repeating same edges
MM.push_back(edge);
}
}
}
return MM;
}
int main()
{
ios::sync_with_stdio(0);
int n, m;
cin >> n >> m; // read number of vertices, number of edges
vector<vector<int>> adj(n); // undirected graph adjacency list
for (int i = 0; i < n; ++i) {
int v, u;
cin >> v >> u; // read edge
adj[v].push_back(u);
adj[u].push_back(v);
}
vector<pair<int,int>> MM = maximumBipartiteMatching(adj);
cout << "maximum bipartite matching size = " << MM.size() << ", edges:" << endl;
for (auto edge : MM) {
cout << edge.first << ' ' << edge.second << endl;
}
} | 979 |
977 | <filename>test/expected_json_nulls.json
[{"_key":0,"str_col":"Hello","bool_col":false,"int_col":1,"timestamp_col":"1970-01-01 00:00:01"},{"_key":1,"str_col":null,"bool_col":null,"int_col":null,"timestamp_col":null}] | 86 |
1,545 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.meta;
import java.io.IOException;
import java.util.List;
import java.util.NavigableSet;
import java.util.TreeSet;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.conf.AbstractConfiguration;
import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.Processor;
import org.apache.bookkeeper.util.StringUtils;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An abstract class for managing hierarchical ledgers.
*/
public abstract class AbstractHierarchicalLedgerManager extends AbstractZkLedgerManager {
private static final Logger LOG = LoggerFactory.getLogger(AbstractHierarchicalLedgerManager.class);
/**
* Constructor.
*
* @param conf
* Configuration object
* @param zk
* ZooKeeper Client Handle
*/
public AbstractHierarchicalLedgerManager(AbstractConfiguration conf, ZooKeeper zk) {
super(conf, zk);
}
/**
* Process hash nodes in a given path.
*/
void asyncProcessLevelNodes(
final String path, final Processor<String> processor,
final AsyncCallback.VoidCallback finalCb, final Object context,
final int successRc, final int failureRc) {
zk.sync(path, new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String path, Object ctx) {
if (rc == Code.NONODE.intValue()) {
// Raced with node removal
finalCb.processResult(successRc, null, context);
return;
} else if (rc != Code.OK.intValue()) {
LOG.error("Error syncing path " + path + " when getting its chidren: ",
KeeperException.create(KeeperException.Code.get(rc), path));
finalCb.processResult(failureRc, null, context);
return;
}
zk.getChildren(path, false, new AsyncCallback.ChildrenCallback() {
@Override
public void processResult(int rc, String path, Object ctx,
List<String> levelNodes) {
if (rc == Code.NONODE.intValue()) {
// Raced with node removal
finalCb.processResult(successRc, null, context);
return;
} else if (rc != Code.OK.intValue()) {
LOG.error("Error polling hash nodes of " + path,
KeeperException.create(KeeperException.Code.get(rc), path));
finalCb.processResult(failureRc, null, context);
return;
}
AsyncListProcessor<String> listProcessor =
new AsyncListProcessor<String>(scheduler);
// process its children
listProcessor.process(levelNodes, processor, finalCb,
context, successRc, failureRc);
}
}, null);
}
}, null);
}
/**
* Process list one by one in asynchronize way. Process will be stopped immediately
* when error occurred.
*/
private static class AsyncListProcessor<T> {
// use this to prevent long stack chains from building up in callbacks
ScheduledExecutorService scheduler;
/**
* Constructor.
*
* @param scheduler
* Executor used to prevent long stack chains
*/
public AsyncListProcessor(ScheduledExecutorService scheduler) {
this.scheduler = scheduler;
}
/**
* Process list of items.
*
* @param data
* List of data to process
* @param processor
* Callback to process element of list when success
* @param finalCb
* Final callback to be called after all elements in the list are processed
* @param context
* Context of final callback
* @param successRc
* RC passed to final callback on success
* @param failureRc
* RC passed to final callback on failure
*/
public void process(final List<T> data, final Processor<T> processor,
final AsyncCallback.VoidCallback finalCb, final Object context,
final int successRc, final int failureRc) {
if (data == null || data.size() == 0) {
finalCb.processResult(successRc, null, context);
return;
}
final int size = data.size();
final AtomicInteger current = new AtomicInteger(0);
AsyncCallback.VoidCallback stubCallback = new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String path, Object ctx) {
if (rc != successRc) {
// terminal immediately
finalCb.processResult(failureRc, null, context);
return;
}
// process next element
int next = current.incrementAndGet();
if (next >= size) { // reach the end of list
finalCb.processResult(successRc, null, context);
return;
}
final T dataToProcess = data.get(next);
final AsyncCallback.VoidCallback stub = this;
scheduler.submit(new Runnable() {
@Override
public void run() {
processor.process(dataToProcess, stub);
}
});
}
};
T firstElement = data.get(0);
processor.process(firstElement, stubCallback);
}
}
// get ledger from all level nodes
long getLedgerId(String...levelNodes) throws IOException {
return StringUtils.stringToHierarchicalLedgerId(levelNodes);
}
/**
* Get all ledger ids in the given zk path.
*
* @param ledgerNodes
* List of ledgers in the given path
* example:- {L1652, L1653, L1650}
* @param path
* The zookeeper path of the ledger ids. The path should start with {@ledgerRootPath}
* example (with ledgerRootPath = /ledgers):- /ledgers/00/0053
*/
@Override
protected NavigableSet<Long> ledgerListToSet(List<String> ledgerNodes, String path) {
NavigableSet<Long> zkActiveLedgers = new TreeSet<Long>();
if (!path.startsWith(ledgerRootPath)) {
LOG.warn("Ledger path [{}] is not a valid path name, it should start wth {}", path, ledgerRootPath);
return zkActiveLedgers;
}
long ledgerIdPrefix = 0;
char ch;
for (int i = ledgerRootPath.length() + 1; i < path.length(); i++) {
ch = path.charAt(i);
if (ch < '0' || ch > '9') {
continue;
}
ledgerIdPrefix = ledgerIdPrefix * 10 + (ch - '0');
}
for (String ledgerNode : ledgerNodes) {
if (isSpecialZnode(ledgerNode)) {
continue;
}
long ledgerId = ledgerIdPrefix;
for (int i = 0; i < ledgerNode.length(); i++) {
ch = ledgerNode.charAt(i);
if (ch < '0' || ch > '9') {
continue;
}
ledgerId = ledgerId * 10 + (ch - '0');
}
zkActiveLedgers.add(ledgerId);
}
return zkActiveLedgers;
}
}
| 4,249 |
5,607 | package io.micronaut.docs.http.client.bind.annotation;
import io.micronaut.context.ApplicationContext;
import io.micronaut.runtime.server.EmbeddedServer;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.LinkedHashMap;
import java.util.Map;
public class AnnotationBinderSpec {
@Test
void testBindingToTheRequest() {
EmbeddedServer server = ApplicationContext.run(EmbeddedServer.class);
MetadataClient client = server.getApplicationContext().getBean(MetadataClient.class);
Map<String, Object> metadata = new LinkedHashMap<>();
metadata.put("version", 3.6);
metadata.put("deploymentId", 42L);
String resp = client.get(metadata);
Assertions.assertEquals("3.6", resp);
server.close();
}
}
| 301 |
400 | <gh_stars>100-1000
/*
* aiq3a_util.cpp - aiq 3a utility:
*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: <NAME> <<EMAIL>>
* Author: <NAME> <<EMAIL>>
*/
#include "aiq3a_utils.h"
#include "x3a_isp_config.h"
namespace XCam {
bool
translate_3a_stats (XCam3AStats *from, struct atomisp_3a_statistics *to)
{
XCAM_ASSERT (from);
XCAM_ASSERT (to);
struct atomisp_grid_info &to_info = to->grid_info;
XCam3AStatsInfo &from_info = from->info;
uint32_t color_count = (from_info.grid_pixel_size / 2) * (from_info.grid_pixel_size / 2);
XCAM_ASSERT (to_info.bqs_per_grid_cell == 8);
for (uint32_t i = 0; i < from_info.height; ++i)
for (uint32_t j = 0; j < from_info.width; ++j) {
to->data [i * to_info.aligned_width + j].ae_y =
from->stats [i * from_info.aligned_width + j].avg_y * color_count;
to->data [i * to_info.aligned_width + j].awb_gr =
from->stats [i * from_info.aligned_width + j].avg_gr * color_count;
to->data [i * to_info.aligned_width + j].awb_r =
from->stats [i * from_info.aligned_width + j].avg_r * color_count;
to->data [i * to_info.aligned_width + j].awb_b =
from->stats [i * from_info.aligned_width + j].avg_b * color_count;
to->data [i * to_info.aligned_width + j].awb_gb =
from->stats [i * from_info.aligned_width + j].avg_gb * color_count;
to->data [i * to_info.aligned_width + j].awb_cnt =
from->stats [i * from_info.aligned_width + j].valid_wb_count;
to->data [i * to_info.aligned_width + j].af_hpf1 =
from->stats [i * from_info.aligned_width + j].f_value1;
to->data [i * to_info.aligned_width + j].af_hpf2 =
from->stats [i * from_info.aligned_width + j].f_value2;
}
return true;
}
static void
matrix_3x3_mutiply (double *dest, const double *src1, const double *src2)
{
dest[0] = src1[0] * src2[0] + src1[1] * src2[3] + src1[2] * src2[6];
dest[1] = src1[0] * src2[1] + src1[1] * src2[4] + src1[2] * src2[7];
dest[2] = src1[0] * src2[2] + src1[1] * src2[5] + src1[2] * src2[8];
dest[3] = src1[3] * src2[0] + src1[4] * src2[3] + src1[5] * src2[6];
dest[4] = src1[3] * src2[1] + src1[4] * src2[4] + src1[5] * src2[7];
dest[5] = src1[3] * src2[2] + src1[4] * src2[5] + src1[5] * src2[8];
dest[6] = src1[6] * src2[0] + src1[7] * src2[3] + src1[8] * src2[6];
dest[7] = src1[6] * src2[1] + src1[7] * src2[4] + src1[8] * src2[7];
dest[8] = src1[6] * src2[2] + src1[7] * src2[5] + src1[8] * src2[8];
}
static uint32_t
translate_atomisp_parameters (
const struct atomisp_parameters &atomisp_params,
XCam3aResultHead *results[], uint32_t max_count)
{
uint32_t result_count = 0;
double coefficient = 0.0;
/* Translation for white balance */
XCAM_ASSERT (result_count < max_count);
if (atomisp_params.wb_config) {
XCam3aResultWhiteBalance *wb = xcam_malloc0_type (XCam3aResultWhiteBalance);
XCAM_ASSERT (wb);
wb->head.type = XCAM_3A_RESULT_WHITE_BALANCE;
wb->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
wb->head.version = xcam_version ();
coefficient = pow (2, (16 - atomisp_params.wb_config->integer_bits));
wb->r_gain = atomisp_params.wb_config->r / coefficient;
wb->gr_gain = atomisp_params.wb_config->gr / coefficient;
wb->gb_gain = atomisp_params.wb_config->gb / coefficient;
wb->b_gain = atomisp_params.wb_config->b / coefficient;
results[result_count++] = (XCam3aResultHead*)wb;
}
/* Translation for black level correction */
XCAM_ASSERT (result_count < max_count);
if (atomisp_params.ob_config) {
XCam3aResultBlackLevel *blc = xcam_malloc0_type (XCam3aResultBlackLevel);
XCAM_ASSERT (blc);
blc->head.type = XCAM_3A_RESULT_BLACK_LEVEL;
blc->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
blc->head.version = xcam_version ();
if (atomisp_params.ob_config->mode == atomisp_ob_mode_fixed) {
blc->r_level = atomisp_params.ob_config->level_r / (double)65536;
blc->gr_level = atomisp_params.ob_config->level_gr / (double)65536;
blc->gb_level = atomisp_params.ob_config->level_gb / (double)65536;
blc->b_level = atomisp_params.ob_config->level_b / (double)65536;
}
results[result_count++] = (XCam3aResultHead*)blc;
}
/* Translation for color correction */
XCAM_ASSERT (result_count < max_count);
if (atomisp_params.yuv2rgb_cc_config) {
static const double rgb2yuv_matrix [XCAM_COLOR_MATRIX_SIZE] = {
0.299, 0.587, 0.114,
-0.14713, -0.28886, 0.436,
0.615, -0.51499, -0.10001
};
static const double r_ycgco_matrix [XCAM_COLOR_MATRIX_SIZE] = {
0.25, 0.5, 0.25,
-0.25, 0.5, -0.25,
0.5, 0, -0.5
};
double tmp_matrix [XCAM_COLOR_MATRIX_SIZE] = {0.0};
double cc_matrix [XCAM_COLOR_MATRIX_SIZE] = {0.0};
XCam3aResultColorMatrix *cm = xcam_malloc0_type (XCam3aResultColorMatrix);
XCAM_ASSERT (cm);
cm->head.type = XCAM_3A_RESULT_RGB2YUV_MATRIX;
cm->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
cm->head.version = xcam_version ();
coefficient = pow (2, atomisp_params.yuv2rgb_cc_config->fraction_bits);
for (int i = 0; i < XCAM_COLOR_MATRIX_SIZE; i++) {
tmp_matrix [i] = atomisp_params.yuv2rgb_cc_config->matrix [i] / coefficient;
}
matrix_3x3_mutiply (cc_matrix, tmp_matrix, r_ycgco_matrix);
matrix_3x3_mutiply (cm->matrix, rgb2yuv_matrix, cc_matrix);
//results = yuv2rgb_matrix * tmp_matrix * r_ycgco_matrix
results[result_count++] = (XCam3aResultHead*)cm;
}
/* Translation for gamma table */
XCAM_ASSERT (result_count < max_count);
if (atomisp_params.g_gamma_table) {
XCam3aResultGammaTable *gt = xcam_malloc0_type (XCam3aResultGammaTable);
XCAM_ASSERT (gt);
gt->head.type = XCAM_3A_RESULT_G_GAMMA;
gt->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
gt->head.version = xcam_version ();
for (int i = 0; i < XCAM_GAMMA_TABLE_SIZE; i++) {
gt->table[i] = (double)atomisp_params.g_gamma_table->data.vamem_2[i] / 16;
}
results[result_count++] = (XCam3aResultHead*)gt;
}
/* Translation for macc matrix table */
XCAM_ASSERT (result_count < max_count);
if (atomisp_params.macc_config) {
XCam3aResultMaccMatrix *macc = xcam_malloc0_type (XCam3aResultMaccMatrix);
XCAM_ASSERT (macc);
macc->head.type = XCAM_3A_RESULT_MACC;
macc->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
macc->head.version = xcam_version ();
coefficient = pow (2, (13 - atomisp_params.macc_config->color_effect));
for (int i = 0; i < XCAM_CHROMA_AXIS_SIZE * XCAM_CHROMA_MATRIX_SIZE; i++) {
macc->table[i] = (double)atomisp_params.macc_table->data[i] / coefficient;
}
results[result_count++] = (XCam3aResultHead*)macc;
}
/* Translation for defect pixel correction */
XCAM_ASSERT (result_count < max_count);
if (atomisp_params.dp_config) {
XCam3aResultDefectPixel *dpc = xcam_malloc0_type (XCam3aResultDefectPixel);
XCAM_ASSERT (dpc);
dpc->head.type = XCAM_3A_RESULT_DEFECT_PIXEL_CORRECTION;
dpc->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
dpc->head.version = xcam_version ();
coefficient = pow (2, 16);
dpc->gr_threshold = atomisp_params.dp_config->threshold / coefficient;
dpc->r_threshold = atomisp_params.dp_config->threshold / coefficient;
dpc->b_threshold = atomisp_params.dp_config->threshold / coefficient;
dpc->gb_threshold = atomisp_params.dp_config->threshold / coefficient;
results[result_count++] = (XCam3aResultHead*)dpc;
}
/* OCL has defined BNR config, no need to translate ISP BNR config */
#if 0
/* Translation for bnr config */
XCAM_ASSERT (result_count < max_count);
if (atomisp_params.nr_config) {
XCam3aResultBayerNoiseReduction *bnr = xcam_malloc0_type (XCam3aResultBayerNoiseReduction);
XCAM_ASSERT (bnr);
bnr->head.type = XCAM_3A_RESULT_BAYER_NOISE_REDUCTION;
bnr->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
bnr->head.version = xcam_version ();
bnr->bnr_gain = (double)atomisp_params.nr_config->bnr_gain / pow(2, 16);
bnr->direction = (double)atomisp_params.nr_config->direction / pow(2, 16);
results[result_count++] = (XCam3aResultHead*)bnr;
}
#endif
return result_count;
}
uint32_t
translate_3a_results_to_xcam (X3aResultList &list,
XCam3aResultHead *results[], uint32_t max_count)
{
uint32_t result_count = 0;
for (X3aResultList::iterator iter = list.begin (); iter != list.end (); ++iter) {
SmartPtr<X3aResult> &isp_result = *iter;
switch (isp_result->get_type()) {
case X3aIspConfig::IspExposureParameters: {
SmartPtr<X3aIspExposureResult> isp_exposure =
isp_result.dynamic_cast_ptr<X3aIspExposureResult> ();
XCAM_ASSERT (isp_exposure.ptr ());
const XCam3aResultExposure &exposure = isp_exposure->get_standard_result ();
XCam3aResultExposure *new_exposure = xcam_malloc0_type (XCam3aResultExposure);
XCAM_ASSERT (new_exposure);
*new_exposure = exposure;
new_exposure->head.type = XCAM_3A_RESULT_EXPOSURE;
new_exposure->head.process_type = XCAM_IMAGE_PROCESS_ALWAYS;
new_exposure->head.version = xcam_version ();
results[result_count++] = (XCam3aResultHead*)new_exposure;
break;
}
case X3aIspConfig::IspAllParameters: {
SmartPtr<X3aAtomIspParametersResult> isp_3a_all =
isp_result.dynamic_cast_ptr<X3aAtomIspParametersResult> ();
XCAM_ASSERT (isp_3a_all.ptr ());
const struct atomisp_parameters &atomisp_params = isp_3a_all->get_isp_config ();
result_count += translate_atomisp_parameters (atomisp_params, &results[result_count], max_count - result_count);
break;
}
case XCAM_3A_RESULT_BRIGHTNESS: {
SmartPtr<X3aBrightnessResult> xcam_brightness =
isp_result.dynamic_cast_ptr<X3aBrightnessResult>();
const XCam3aResultBrightness &brightness = xcam_brightness->get_standard_result();
XCam3aResultBrightness *new_brightness = xcam_malloc0_type(XCam3aResultBrightness);
XCAM_ASSERT (new_brightness);
*new_brightness = brightness;
results[result_count++] = (XCam3aResultHead*)new_brightness;
break;
}
case XCAM_3A_RESULT_3D_NOISE_REDUCTION:
case XCAM_3A_RESULT_TEMPORAL_NOISE_REDUCTION_YUV:
{
SmartPtr<X3aTemporalNoiseReduction> xcam_tnr =
isp_result.dynamic_cast_ptr<X3aTemporalNoiseReduction> ();
const XCam3aResultTemporalNoiseReduction &tnr = xcam_tnr->get_standard_result();
XCam3aResultTemporalNoiseReduction *new_tnr = xcam_malloc0_type(XCam3aResultTemporalNoiseReduction);
XCAM_ASSERT (new_tnr);
*new_tnr = tnr;
results[result_count++] = (XCam3aResultHead*)new_tnr;
break;
}
case XCAM_3A_RESULT_EDGE_ENHANCEMENT:
{
SmartPtr<X3aEdgeEnhancementResult> xcam_ee =
isp_result.dynamic_cast_ptr<X3aEdgeEnhancementResult> ();
const XCam3aResultEdgeEnhancement &ee = xcam_ee->get_standard_result();
XCam3aResultEdgeEnhancement *new_ee = xcam_malloc0_type(XCam3aResultEdgeEnhancement);
XCAM_ASSERT (new_ee);
*new_ee = ee;
results[result_count++] = (XCam3aResultHead*)new_ee;
break;
}
case XCAM_3A_RESULT_BAYER_NOISE_REDUCTION:
{
SmartPtr<X3aBayerNoiseReduction> xcam_bnr =
isp_result.dynamic_cast_ptr<X3aBayerNoiseReduction> ();
const XCam3aResultBayerNoiseReduction &bnr = xcam_bnr->get_standard_result();
XCam3aResultBayerNoiseReduction *new_bnr = xcam_malloc0_type(XCam3aResultBayerNoiseReduction);
XCAM_ASSERT (new_bnr);
*new_bnr = bnr;
results[result_count++] = (XCam3aResultHead*)new_bnr;
break;
}
case XCAM_3A_RESULT_WAVELET_NOISE_REDUCTION:
{
SmartPtr<X3aWaveletNoiseReduction> xcam_wavelet =
isp_result.dynamic_cast_ptr<X3aWaveletNoiseReduction> ();
const XCam3aResultWaveletNoiseReduction &wavelet = xcam_wavelet->get_standard_result();
XCam3aResultWaveletNoiseReduction *new_wavelet = xcam_malloc0_type(XCam3aResultWaveletNoiseReduction);
XCAM_ASSERT (new_wavelet);
*new_wavelet = wavelet;
results[result_count++] = (XCam3aResultHead*)new_wavelet;
break;
}
default: {
XCAM_LOG_WARNING ("unknown type(%d) in translation", isp_result->get_type());
break;
}
}
}
return result_count;
}
void
free_3a_result (XCam3aResultHead *result)
{
xcam_free (result);
}
}
| 6,829 |
334 | <gh_stars>100-1000
// Auto generated code, do not modify
package nxt.http.callers;
import nxt.http.APICall;
public class StopShufflerCall extends APICall.Builder<StopShufflerCall> {
private StopShufflerCall() {
super(ApiSpec.stopShuffler);
}
public static StopShufflerCall create() {
return new StopShufflerCall();
}
public StopShufflerCall secretPhrase(String secretPhrase) {
return param("secretPhrase", secretPhrase);
}
public StopShufflerCall account(String account) {
return param("account", account);
}
public StopShufflerCall account(long account) {
return unsignedLongParam("account", account);
}
public StopShufflerCall adminPassword(String adminPassword) {
return param("adminPassword", adminPassword);
}
public StopShufflerCall shufflingFullHash(String shufflingFullHash) {
return param("shufflingFullHash", shufflingFullHash);
}
public StopShufflerCall shufflingFullHash(byte[] shufflingFullHash) {
return param("shufflingFullHash", shufflingFullHash);
}
}
| 393 |
348 | <filename>docs/data/leg-t2/019/01901285.json
{"nom":"Vigeois","circ":"1ère circonscription","dpt":"Corrèze","inscrits":905,"abs":376,"votants":529,"blancs":41,"nuls":26,"exp":462,"res":[{"nuance":"REM","nom":"<NAME>","voix":270},{"nuance":"SOC","nom":"<NAME>","voix":192}]} | 110 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.